blob: 6ca4535c47fb63dd48fc15c9c91a60ee34552def [file] [log] [blame]
Thomas Gleixnerd2912cb2019-06-04 10:11:33 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * linux/arch/arm/lib/memset.S
4 *
5 * Copyright (C) 1995-2000 Russell King
6 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * ASM optimised string functions
8 */
9#include <linux/linkage.h>
10#include <asm/assembler.h>
Lin Yongtingc2459d352014-11-16 03:14:58 +010011#include <asm/unwind.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012
13 .text
14 .align 5
Linus Torvalds1da177e2005-04-16 15:20:36 -070015
Russell King1bd46782015-07-03 15:22:54 +010016ENTRY(mmioset)
Linus Torvalds1da177e2005-04-16 15:20:36 -070017ENTRY(memset)
Lin Yongtingc2459d352014-11-16 03:14:58 +010018UNWIND( .fnstart )
Nicolas Pitre418df63a2013-03-12 13:00:42 +010019 ands r3, r0, #3 @ 1 unaligned?
20 mov ip, r0 @ preserve r0 as return value
21 bne 6f @ 1
Linus Torvalds1da177e2005-04-16 15:20:36 -070022/*
Ivan Djelic455bd4c2013-03-06 20:09:27 +010023 * we know that the pointer in ip is aligned to a word boundary.
Linus Torvalds1da177e2005-04-16 15:20:36 -070024 */
Nicolas Pitre418df63a2013-03-12 13:00:42 +0100251: orr r1, r1, r1, lsl #8
Linus Torvalds1da177e2005-04-16 15:20:36 -070026 orr r1, r1, r1, lsl #16
27 mov r3, r1
Matthew Wilcoxfd1d3622017-09-08 16:14:00 -0700287: cmp r2, #16
Linus Torvalds1da177e2005-04-16 15:20:36 -070029 blt 4f
Nicolas Pitref91a8dc2008-04-11 21:04:28 -040030
31#if ! CALGN(1)+0
32
Linus Torvalds1da177e2005-04-16 15:20:36 -070033/*
Ivan Djelic455bd4c2013-03-06 20:09:27 +010034 * We need 2 extra registers for this loop - use r8 and the LR
Linus Torvalds1da177e2005-04-16 15:20:36 -070035 */
Ivan Djelic455bd4c2013-03-06 20:09:27 +010036 stmfd sp!, {r8, lr}
Lin Yongtingc2459d352014-11-16 03:14:58 +010037UNWIND( .fnend )
38UNWIND( .fnstart )
39UNWIND( .save {r8, lr} )
Ivan Djelic455bd4c2013-03-06 20:09:27 +010040 mov r8, r1
Matthew Wilcoxfd1d3622017-09-08 16:14:00 -070041 mov lr, r3
Linus Torvalds1da177e2005-04-16 15:20:36 -070042
432: subs r2, r2, #64
Stefan Agnere44fc382019-02-18 00:57:38 +010044 stmiage ip!, {r1, r3, r8, lr} @ 64 bytes at a time.
45 stmiage ip!, {r1, r3, r8, lr}
46 stmiage ip!, {r1, r3, r8, lr}
47 stmiage ip!, {r1, r3, r8, lr}
Linus Torvalds1da177e2005-04-16 15:20:36 -070048 bgt 2b
Stefan Agnere44fc382019-02-18 00:57:38 +010049 ldmfdeq sp!, {r8, pc} @ Now <64 bytes to go.
Linus Torvalds1da177e2005-04-16 15:20:36 -070050/*
51 * No need to correct the count; we're only testing bits from now on
52 */
53 tst r2, #32
Stefan Agnere44fc382019-02-18 00:57:38 +010054 stmiane ip!, {r1, r3, r8, lr}
55 stmiane ip!, {r1, r3, r8, lr}
Linus Torvalds1da177e2005-04-16 15:20:36 -070056 tst r2, #16
Stefan Agnere44fc382019-02-18 00:57:38 +010057 stmiane ip!, {r1, r3, r8, lr}
Ivan Djelic455bd4c2013-03-06 20:09:27 +010058 ldmfd sp!, {r8, lr}
Lin Yongtingc2459d352014-11-16 03:14:58 +010059UNWIND( .fnend )
Linus Torvalds1da177e2005-04-16 15:20:36 -070060
Nicolas Pitref91a8dc2008-04-11 21:04:28 -040061#else
62
63/*
64 * This version aligns the destination pointer in order to write
65 * whole cache lines at once.
66 */
67
Ivan Djelic455bd4c2013-03-06 20:09:27 +010068 stmfd sp!, {r4-r8, lr}
Lin Yongtingc2459d352014-11-16 03:14:58 +010069UNWIND( .fnend )
70UNWIND( .fnstart )
71UNWIND( .save {r4-r8, lr} )
Nicolas Pitref91a8dc2008-04-11 21:04:28 -040072 mov r4, r1
Matthew Wilcoxfd1d3622017-09-08 16:14:00 -070073 mov r5, r3
Nicolas Pitref91a8dc2008-04-11 21:04:28 -040074 mov r6, r1
Matthew Wilcoxfd1d3622017-09-08 16:14:00 -070075 mov r7, r3
Ivan Djelic455bd4c2013-03-06 20:09:27 +010076 mov r8, r1
Matthew Wilcoxfd1d3622017-09-08 16:14:00 -070077 mov lr, r3
Nicolas Pitref91a8dc2008-04-11 21:04:28 -040078
79 cmp r2, #96
Ivan Djelic455bd4c2013-03-06 20:09:27 +010080 tstgt ip, #31
Nicolas Pitref91a8dc2008-04-11 21:04:28 -040081 ble 3f
82
Ivan Djelic455bd4c2013-03-06 20:09:27 +010083 and r8, ip, #31
84 rsb r8, r8, #32
85 sub r2, r2, r8
86 movs r8, r8, lsl #(32 - 4)
Stefan Agnere44fc382019-02-18 00:57:38 +010087 stmiacs ip!, {r4, r5, r6, r7}
88 stmiami ip!, {r4, r5}
Ivan Djelic455bd4c2013-03-06 20:09:27 +010089 tst r8, #(1 << 30)
90 mov r8, r1
91 strne r1, [ip], #4
Nicolas Pitref91a8dc2008-04-11 21:04:28 -040092
933: subs r2, r2, #64
Stefan Agnere44fc382019-02-18 00:57:38 +010094 stmiage ip!, {r1, r3-r8, lr}
95 stmiage ip!, {r1, r3-r8, lr}
Nicolas Pitref91a8dc2008-04-11 21:04:28 -040096 bgt 3b
Stefan Agnere44fc382019-02-18 00:57:38 +010097 ldmfdeq sp!, {r4-r8, pc}
Nicolas Pitref91a8dc2008-04-11 21:04:28 -040098
99 tst r2, #32
Stefan Agnere44fc382019-02-18 00:57:38 +0100100 stmiane ip!, {r1, r3-r8, lr}
Nicolas Pitref91a8dc2008-04-11 21:04:28 -0400101 tst r2, #16
Stefan Agnere44fc382019-02-18 00:57:38 +0100102 stmiane ip!, {r4-r7}
Ivan Djelic455bd4c2013-03-06 20:09:27 +0100103 ldmfd sp!, {r4-r8, lr}
Lin Yongtingc2459d352014-11-16 03:14:58 +0100104UNWIND( .fnend )
Nicolas Pitref91a8dc2008-04-11 21:04:28 -0400105
106#endif
107
Lin Yongtingc2459d352014-11-16 03:14:58 +0100108UNWIND( .fnstart )
Linus Torvalds1da177e2005-04-16 15:20:36 -07001094: tst r2, #8
Stefan Agnere44fc382019-02-18 00:57:38 +0100110 stmiane ip!, {r1, r3}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111 tst r2, #4
Ivan Djelic455bd4c2013-03-06 20:09:27 +0100112 strne r1, [ip], #4
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113/*
Matthew Wilcoxfd1d3622017-09-08 16:14:00 -0700114 * When we get here, we've got less than 4 bytes to set. We
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115 * may have an unaligned pointer as well.
116 */
1175: tst r2, #2
Stefan Agnere44fc382019-02-18 00:57:38 +0100118 strbne r1, [ip], #1
119 strbne r1, [ip], #1
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120 tst r2, #1
Stefan Agnere44fc382019-02-18 00:57:38 +0100121 strbne r1, [ip], #1
Russell King6ebbf2c2014-06-30 16:29:12 +0100122 ret lr
Nicolas Pitre418df63a2013-03-12 13:00:42 +0100123
1246: subs r2, r2, #4 @ 1 do we have enough
125 blt 5b @ 1 bytes to align with?
126 cmp r3, #2 @ 1
Stefan Agnere44fc382019-02-18 00:57:38 +0100127 strblt r1, [ip], #1 @ 1
128 strble r1, [ip], #1 @ 1
Nicolas Pitre418df63a2013-03-12 13:00:42 +0100129 strb r1, [ip], #1 @ 1
130 add r2, r2, r3 @ 1 (r2 = r2 - (4 - r3))
131 b 1b
Lin Yongtingc2459d352014-11-16 03:14:58 +0100132UNWIND( .fnend )
Catalin Marinas93ed3972008-08-28 11:22:32 +0100133ENDPROC(memset)
Russell King1bd46782015-07-03 15:22:54 +0100134ENDPROC(mmioset)
Matthew Wilcoxfd1d3622017-09-08 16:14:00 -0700135
136ENTRY(__memset32)
137UNWIND( .fnstart )
138 mov r3, r1 @ copy r1 to r3 and fall into memset64
139UNWIND( .fnend )
140ENDPROC(__memset32)
141ENTRY(__memset64)
142UNWIND( .fnstart )
143 mov ip, r0 @ preserve r0 as return value
144 b 7b @ jump into the middle of memset
145UNWIND( .fnend )
146ENDPROC(__memset64)