[PATCH] mm: update comments to pte lock
[linux-2.6.git] / arch / cris / arch-v32 / lib / string.c
1 /*#************************************************************************#*/
2 /*#-------------------------------------------------------------------------*/
3 /*#                                                                         */
4 /*# FUNCTION NAME: memcpy()                                                 */
5 /*#                                                                         */
6 /*# PARAMETERS:  void* dst;   Destination address.                          */
7 /*#              void* src;   Source address.                               */
8 /*#              int   len;   Number of bytes to copy.                      */
9 /*#                                                                         */
10 /*# RETURNS:     dst.                                                       */
11 /*#                                                                         */
12 /*# DESCRIPTION: Copies len bytes of memory from src to dst.  No guarantees */
13 /*#              about copying of overlapping memory areas. This routine is */
14 /*#              very sensitive to compiler changes in register allocation. */
15 /*#              Should really be rewritten to avoid this problem.          */
16 /*#                                                                         */
17 /*#-------------------------------------------------------------------------*/
18 /*#                                                                         */
19 /*# HISTORY                                                                 */
20 /*#                                                                         */
21 /*# DATE      NAME            CHANGES                                       */
22 /*# ----      ----            -------                                       */
23 /*# 941007    Kenny R         Creation                                      */
24 /*# 941011    Kenny R         Lots of optimizations and inlining.           */
25 /*# 941129    Ulf A           Adapted for use in libc.                      */
26 /*# 950216    HP              N==0 forgotten if non-aligned src/dst.        */
27 /*#                           Added some optimizations.                     */
28 /*# 001025    HP              Make src and dst char *.  Align dst to        */
29 /*#                           dword, not just word-if-both-src-and-dst-     */
30 /*#                           are-misaligned.                               */
31 /*#                                                                         */
32 /*#-------------------------------------------------------------------------*/
33
34 #include <linux/types.h>
35
36 void *memcpy(void *pdst,
37              const void *psrc,
38              size_t pn)
39 {
40   /* Ok.  Now we want the parameters put in special registers.
41      Make sure the compiler is able to make something useful of this.
42       As it is now: r10 -> r13; r11 -> r11 (nop); r12 -> r12 (nop).
43
44      If gcc was allright, it really would need no temporaries, and no
45      stack space to save stuff on. */
46
47   register void *return_dst __asm__ ("r10") = pdst;
48   register char *dst __asm__ ("r13") = pdst;
49   register const char *src __asm__ ("r11") = psrc;
50   register int n __asm__ ("r12") = pn;
51
52
53   /* When src is aligned but not dst, this makes a few extra needless
54      cycles.  I believe it would take as many to check that the
55      re-alignment was unnecessary.  */
56   if (((unsigned long) dst & 3) != 0
57       /* Don't align if we wouldn't copy more than a few bytes; so we
58          don't have to check further for overflows.  */
59       && n >= 3)
60   {
61     if ((unsigned long) dst & 1)
62     {
63       n--;
64       *(char*)dst = *(char*)src;
65       src++;
66       dst++;
67     }
68
69     if ((unsigned long) dst & 2)
70     {
71       n -= 2;
72       *(short*)dst = *(short*)src;
73       src += 2;
74       dst += 2;
75     }
76   }
77
78   /* Decide which copying method to use.  Movem is dirt cheap, so the
79      overheap is low enough to always use the minimum block size as the
80      threshold.  */
81   if (n >= 44)
82   {
83     /* For large copies we use 'movem' */
84
85   /* It is not optimal to tell the compiler about clobbering any
86      registers; that will move the saving/restoring of those registers
87      to the function prologue/epilogue, and make non-movem sizes
88      suboptimal.  */
89     __asm__ volatile ("                                                 \n\
90         ;; Check that the register asm declaration got right.           \n\
91         ;; The GCC manual explicitly says TRT will happen.              \n\
92         .ifnc %0-%1-%2,$r13-$r11-$r12                                   \n\
93         .err                                                            \n\
94         .endif                                                          \n\
95                                                                         \n\
96         ;; Save the registers we'll use in the movem process            \n\
97                                                                         \n\
98         ;; on the stack.                                                \n\
99         subq    11*4,$sp                                                \n\
100         movem   $r10,[$sp]                                              \n\
101                                                                         \n\
102         ;; Now we've got this:                                          \n\
103         ;; r11 - src                                                    \n\
104         ;; r13 - dst                                                    \n\
105         ;; r12 - n                                                      \n\
106                                                                         \n\
107         ;; Update n for the first loop                                  \n\
108         subq    44,$r12                                                 \n\
109 0:                                                                      \n\
110         movem   [$r11+],$r10                                            \n\
111         subq   44,$r12                                                  \n\
112         bge     0b                                                      \n\
113         movem   $r10,[$r13+]                                            \n\
114                                                                         \n\
115         addq   44,$r12  ;; compensate for last loop underflowing n      \n\
116                                                                         \n\
117         ;; Restore registers from stack                                 \n\
118         movem [$sp+],$r10"
119
120      /* Outputs */ : "=r" (dst), "=r" (src), "=r" (n)
121      /* Inputs */ : "0" (dst), "1" (src), "2" (n));
122
123   }
124
125   /* Either we directly starts copying, using dword copying
126      in a loop, or we copy as much as possible with 'movem'
127      and then the last block (<44 bytes) is copied here.
128      This will work since 'movem' will have updated src,dst,n. */
129
130   while ( n >= 16 )
131   {
132     *((long*)dst)++ = *((long*)src)++;
133     *((long*)dst)++ = *((long*)src)++;
134     *((long*)dst)++ = *((long*)src)++;
135     *((long*)dst)++ = *((long*)src)++;
136     n -= 16;
137   }
138
139   /* A switch() is definitely the fastest although it takes a LOT of code.
140    * Particularly if you inline code this.
141    */
142   switch (n)
143   {
144     case 0:
145       break;
146     case 1:
147       *(char*)dst = *(char*)src;
148       break;
149     case 2:
150       *(short*)dst = *(short*)src;
151       break;
152     case 3:
153       *((short*)dst)++ = *((short*)src)++;
154       *(char*)dst = *(char*)src;
155       break;
156     case 4:
157       *((long*)dst)++ = *((long*)src)++;
158       break;
159     case 5:
160       *((long*)dst)++ = *((long*)src)++;
161       *(char*)dst = *(char*)src;
162       break;
163     case 6:
164       *((long*)dst)++ = *((long*)src)++;
165       *(short*)dst = *(short*)src;
166       break;
167     case 7:
168       *((long*)dst)++ = *((long*)src)++;
169       *((short*)dst)++ = *((short*)src)++;
170       *(char*)dst = *(char*)src;
171       break;
172     case 8:
173       *((long*)dst)++ = *((long*)src)++;
174       *((long*)dst)++ = *((long*)src)++;
175       break;
176     case 9:
177       *((long*)dst)++ = *((long*)src)++;
178       *((long*)dst)++ = *((long*)src)++;
179       *(char*)dst = *(char*)src;
180       break;
181     case 10:
182       *((long*)dst)++ = *((long*)src)++;
183       *((long*)dst)++ = *((long*)src)++;
184       *(short*)dst = *(short*)src;
185       break;
186     case 11:
187       *((long*)dst)++ = *((long*)src)++;
188       *((long*)dst)++ = *((long*)src)++;
189       *((short*)dst)++ = *((short*)src)++;
190       *(char*)dst = *(char*)src;
191       break;
192     case 12:
193       *((long*)dst)++ = *((long*)src)++;
194       *((long*)dst)++ = *((long*)src)++;
195       *((long*)dst)++ = *((long*)src)++;
196       break;
197     case 13:
198       *((long*)dst)++ = *((long*)src)++;
199       *((long*)dst)++ = *((long*)src)++;
200       *((long*)dst)++ = *((long*)src)++;
201       *(char*)dst = *(char*)src;
202       break;
203     case 14:
204       *((long*)dst)++ = *((long*)src)++;
205       *((long*)dst)++ = *((long*)src)++;
206       *((long*)dst)++ = *((long*)src)++;
207       *(short*)dst = *(short*)src;
208       break;
209     case 15:
210       *((long*)dst)++ = *((long*)src)++;
211       *((long*)dst)++ = *((long*)src)++;
212       *((long*)dst)++ = *((long*)src)++;
213       *((short*)dst)++ = *((short*)src)++;
214       *(char*)dst = *(char*)src;
215       break;
216   }
217
218   return return_dst; /* destination pointer. */
219 } /* memcpy() */