blob: 6e2294d13e2f1f9105b6524c7b52950e20ed906e [file] [log] [blame]
Thomas Gleixnerd2912cb2019-06-04 10:11:33 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Vineet Gupta5210d1e2013-01-18 15:12:18 +05302/*
3 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
Vineet Gupta5210d1e2013-01-18 15:12:18 +05304 */
5
6/* If dst and src are 4 byte aligned, copy 8 bytes at a time.
7 If the src is 4, but not 8 byte aligned, we first read 4 bytes to get
8 it 8 byte aligned. Thus, we can do a little read-ahead, without
9 dereferencing a cache line that we should not touch.
10 Note that short and long instructions have been scheduled to avoid
11 branch stalls.
12 The beq_s to r3z could be made unaligned & long to avoid a stall
13 there, but the it is not likely to be taken often, and it
14 would also be likey to cost an unaligned mispredict at the next call. */
15
Vineet Guptaec7ac6a2014-02-07 13:47:43 +053016#include <linux/linkage.h>
Vineet Gupta5210d1e2013-01-18 15:12:18 +053017
Vineet Gupta86effd02016-09-19 16:42:25 -070018ENTRY_CFI(strcpy)
Vineet Gupta5210d1e2013-01-18 15:12:18 +053019 or r2,r0,r1
20 bmsk_s r2,r2,1
21 brne.d r2,0,charloop
22 mov_s r10,r0
23 ld_s r3,[r1,0]
24 mov r8,0x01010101
25 bbit0.d r1,2,loop_start
26 ror r12,r8
27 sub r2,r3,r8
28 bic_s r2,r2,r3
29 tst_s r2,r12
30 bne r3z
31 mov_s r4,r3
32 .balign 4
33loop:
34 ld.a r3,[r1,4]
35 st.ab r4,[r10,4]
36loop_start:
37 ld.a r4,[r1,4]
38 sub r2,r3,r8
39 bic_s r2,r2,r3
40 tst_s r2,r12
41 bne_s r3z
42 st.ab r3,[r10,4]
43 sub r2,r4,r8
44 bic r2,r2,r4
45 tst r2,r12
46 beq loop
47 mov_s r3,r4
48#ifdef __LITTLE_ENDIAN__
49r3z: bmsk.f r1,r3,7
50 lsr_s r3,r3,8
51#else
52r3z: lsr.f r1,r3,24
53 asl_s r3,r3,8
54#endif
55 bne.d r3z
56 stb.ab r1,[r10,1]
57 j_s [blink]
58
59 .balign 4
60charloop:
61 ldb.ab r3,[r1,1]
62
63
64 brne.d r3,0,charloop
65 stb.ab r3,[r10,1]
66 j [blink]
Vineet Gupta86effd02016-09-19 16:42:25 -070067END_CFI(strcpy)