ce5defb259a47c2239f4e32d3f6e993d33a06273
[linux-2.6.git] / arch / microblaze / include / asm / uaccess.h
1 /*
2  * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
3  * Copyright (C) 2008-2009 PetaLogix
4  * Copyright (C) 2006 Atmark Techno, Inc.
5  *
6  * This file is subject to the terms and conditions of the GNU General Public
7  * License. See the file "COPYING" in the main directory of this archive
8  * for more details.
9  */
10
11 #ifndef _ASM_MICROBLAZE_UACCESS_H
12 #define _ASM_MICROBLAZE_UACCESS_H
13
14 #ifdef __KERNEL__
15 #ifndef __ASSEMBLY__
16
17 #include <linux/kernel.h>
18 #include <linux/errno.h>
19 #include <linux/sched.h> /* RLIMIT_FSIZE */
20 #include <linux/mm.h>
21
22 #include <asm/mmu.h>
23 #include <asm/page.h>
24 #include <asm/pgtable.h>
25 #include <linux/string.h>
26
27 #define VERIFY_READ     0
28 #define VERIFY_WRITE    1
29
30 /*
31  * On Microblaze the fs value is actually the top of the corresponding
32  * address space.
33  *
34  * The fs value determines whether argument validity checking should be
35  * performed or not. If get_fs() == USER_DS, checking is performed, with
36  * get_fs() == KERNEL_DS, checking is bypassed.
37  *
38  * For historical reasons, these macros are grossly misnamed.
39  *
40  * For non-MMU arch like Microblaze, KERNEL_DS and USER_DS is equal.
41  */
42 # define MAKE_MM_SEG(s)       ((mm_segment_t) { (s) })
43
44 #  ifndef CONFIG_MMU
45 #  define KERNEL_DS     MAKE_MM_SEG(0)
46 #  define USER_DS       KERNEL_DS
47 #  else
48 #  define KERNEL_DS     MAKE_MM_SEG(0xFFFFFFFF)
49 #  define USER_DS       MAKE_MM_SEG(TASK_SIZE - 1)
50 #  endif
51
52 # define get_ds()       (KERNEL_DS)
53 # define get_fs()       (current_thread_info()->addr_limit)
54 # define set_fs(val)    (current_thread_info()->addr_limit = (val))
55
56 # define segment_eq(a, b)       ((a).seg == (b).seg)
57
58 /*
59  * The exception table consists of pairs of addresses: the first is the
60  * address of an instruction that is allowed to fault, and the second is
61  * the address at which the program should continue. No registers are
62  * modified, so it is entirely up to the continuation code to figure out
63  * what to do.
64  *
65  * All the routines below use bits of fixup code that are out of line
66  * with the main instruction path. This means when everything is well,
67  * we don't even have to jump over them. Further, they do not intrude
68  * on our cache or tlb entries.
69  */
70 struct exception_table_entry {
71         unsigned long insn, fixup;
72 };
73
74 #define __clear_user(addr, n)   (memset((void *)(addr), 0, (n)), 0)
75
76 #ifndef CONFIG_MMU
77
78 /* Check against bounds of physical memory */
79 static inline int ___range_ok(unsigned long addr, unsigned long size)
80 {
81         return ((addr < memory_start) ||
82                 ((addr + size) > memory_end));
83 }
84
85 #define __range_ok(addr, size) \
86                 ___range_ok((unsigned long)(addr), (unsigned long)(size))
87
88 #define access_ok(type, addr, size) (__range_ok((addr), (size)) == 0)
89 #define __access_ok(add, size) (__range_ok((addr), (size)) == 0)
90
91 /* Undefined function to trigger linker error */
92 extern int bad_user_access_length(void);
93
94 /* FIXME this is function for optimalization -> memcpy */
95 #define __get_user(var, ptr)                            \
96 ({                                                      \
97         int __gu_err = 0;                               \
98         switch (sizeof(*(ptr))) {                       \
99         case 1:                                         \
100         case 2:                                         \
101         case 4:                                         \
102                 (var) = *(ptr);                         \
103                 break;                                  \
104         case 8:                                         \
105                 memcpy((void *) &(var), (ptr), 8);      \
106                 break;                                  \
107         default:                                        \
108                 (var) = 0;                              \
109                 __gu_err = __get_user_bad();            \
110                 break;                                  \
111         }                                               \
112         __gu_err;                                       \
113 })
114
115 #define __get_user_bad()        (bad_user_access_length(), (-EFAULT))
116
117 /* FIXME is not there defined __pu_val */
118 #define __put_user(var, ptr)                                    \
119 ({                                                              \
120         int __pu_err = 0;                                       \
121         switch (sizeof(*(ptr))) {                               \
122         case 1:                                                 \
123         case 2:                                                 \
124         case 4:                                                 \
125                 *(ptr) = (var);                                 \
126                 break;                                          \
127         case 8: {                                               \
128                 typeof(*(ptr)) __pu_val = (var);                \
129                 memcpy(ptr, &__pu_val, sizeof(__pu_val));       \
130                 }                                               \
131                 break;                                          \
132         default:                                                \
133                 __pu_err = __put_user_bad();                    \
134                 break;                                          \
135         }                                                       \
136         __pu_err;                                               \
137 })
138
139 #define __put_user_bad()        (bad_user_access_length(), (-EFAULT))
140
141 #define put_user(x, ptr)        __put_user((x), (ptr))
142 #define get_user(x, ptr)        __get_user((x), (ptr))
143
144 #define copy_to_user(to, from, n)       (memcpy((to), (from), (n)), 0)
145 #define copy_from_user(to, from, n)     (memcpy((to), (from), (n)), 0)
146
147 #define __copy_to_user(to, from, n)     (copy_to_user((to), (from), (n)))
148 #define __copy_from_user(to, from, n)   (copy_from_user((to), (from), (n)))
149 #define __copy_to_user_inatomic(to, from, n) \
150                         (__copy_to_user((to), (from), (n)))
151 #define __copy_from_user_inatomic(to, from, n) \
152                         (__copy_from_user((to), (from), (n)))
153
154 static inline unsigned long clear_user(void *addr, unsigned long size)
155 {
156         if (access_ok(VERIFY_WRITE, addr, size))
157                 size = __clear_user(addr, size);
158         return size;
159 }
160
161 /* Returns 0 if exception not found and fixup otherwise.  */
162 extern unsigned long search_exception_table(unsigned long);
163
164 extern long strncpy_from_user(char *dst, const char *src, long count);
165 extern long strnlen_user(const char *src, long count);
166
167 #else /* CONFIG_MMU */
168
169 /*
170  * Address is valid if:
171  *  - "addr", "addr + size" and "size" are all below the limit
172  */
173 #define access_ok(type, addr, size) \
174         (get_fs().seg > (((unsigned long)(addr)) | \
175                 (size) | ((unsigned long)(addr) + (size))))
176
177 /* || printk("access_ok failed for %s at 0x%08lx (size %d), seg 0x%08x\n",
178  type?"WRITE":"READ",addr,size,get_fs().seg)) */
179
180 /*
181  * All the __XXX versions macros/functions below do not perform
182  * access checking. It is assumed that the necessary checks have been
183  * already performed before the finction (macro) is called.
184  */
185
186 #define get_user(x, ptr)                                                \
187 ({                                                                      \
188         access_ok(VERIFY_READ, (ptr), sizeof(*(ptr)))                   \
189                 ? __get_user((x), (ptr)) : -EFAULT;                     \
190 })
191
192 #define put_user(x, ptr)                                                \
193 ({                                                                      \
194         access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr)))                  \
195                 ? __put_user((x), (ptr)) : -EFAULT;                     \
196 })
197
198 #define __get_user(x, ptr)                                              \
199 ({                                                                      \
200         unsigned long __gu_val;                                         \
201         /*unsigned long __gu_ptr = (unsigned long)(ptr);*/              \
202         long __gu_err;                                                  \
203         switch (sizeof(*(ptr))) {                                       \
204         case 1:                                                         \
205                 __get_user_asm("lbu", (ptr), __gu_val, __gu_err);       \
206                 break;                                                  \
207         case 2:                                                         \
208                 __get_user_asm("lhu", (ptr), __gu_val, __gu_err);       \
209                 break;                                                  \
210         case 4:                                                         \
211                 __get_user_asm("lw", (ptr), __gu_val, __gu_err);        \
212                 break;                                                  \
213         default:                                                        \
214                 __gu_val = 0; __gu_err = -EINVAL;                       \
215         }                                                               \
216         x = (__typeof__(*(ptr))) __gu_val;                              \
217         __gu_err;                                                       \
218 })
219
220 #define __get_user_asm(insn, __gu_ptr, __gu_val, __gu_err)              \
221 ({                                                                      \
222         __asm__ __volatile__ (                                          \
223                         "1:"    insn    " %1, %2, r0;                   \
224                                 addk    %0, r0, r0;                     \
225                         2:                                              \
226                         .section .fixup,\"ax\";                         \
227                         3:      brid    2b;                             \
228                                 addik   %0, r0, %3;                     \
229                         .previous;                                      \
230                         .section __ex_table,\"a\";                      \
231                         .word   1b,3b;                                  \
232                         .previous;"                                     \
233                 : "=r"(__gu_err), "=r"(__gu_val)                        \
234                 : "r"(__gu_ptr), "i"(-EFAULT)                           \
235         );                                                              \
236 })
237
238 #define __put_user(x, ptr)                                              \
239 ({                                                                      \
240         __typeof__(*(ptr)) volatile __gu_val = (x);                     \
241         long __gu_err = 0;                                              \
242         switch (sizeof(__gu_val)) {                                     \
243         case 1:                                                         \
244                 __put_user_asm("sb", (ptr), __gu_val, __gu_err);        \
245                 break;                                                  \
246         case 2:                                                         \
247                 __put_user_asm("sh", (ptr), __gu_val, __gu_err);        \
248                 break;                                                  \
249         case 4:                                                         \
250                 __put_user_asm("sw", (ptr), __gu_val, __gu_err);        \
251                 break;                                                  \
252         case 8:                                                         \
253                 __put_user_asm_8((ptr), __gu_val, __gu_err);            \
254                 break;                                                  \
255         default:                                                        \
256                 __gu_err = -EINVAL;                                     \
257         }                                                               \
258         __gu_err;                                                       \
259 })
260
261 #define __put_user_asm_8(__gu_ptr, __gu_val, __gu_err)  \
262 ({                                                      \
263 __asm__ __volatile__ (" lwi     %0, %1, 0;              \
264                 1:      swi     %0, %2, 0;              \
265                         lwi     %0, %1, 4;              \
266                 2:      swi     %0, %2, 4;              \
267                         addk    %0,r0,r0;               \
268                 3:                                      \
269                 .section .fixup,\"ax\";                 \
270                 4:      brid    3b;                     \
271                         addik   %0, r0, %3;             \
272                 .previous;                              \
273                 .section __ex_table,\"a\";              \
274                 .word   1b,4b,2b,4b;                    \
275                 .previous;"                             \
276         : "=&r"(__gu_err)                               \
277         : "r"(&__gu_val),                               \
278         "r"(__gu_ptr), "i"(-EFAULT)                     \
279         );                                              \
280 })
281
282 #define __put_user_asm(insn, __gu_ptr, __gu_val, __gu_err)      \
283 ({                                                              \
284         __asm__ __volatile__ (                                  \
285                         "1:"    insn    " %1, %2, r0;           \
286                                 addk    %0, r0, r0;             \
287                         2:                                      \
288                         .section .fixup,\"ax\";                 \
289                         3:      brid    2b;                     \
290                                 addik   %0, r0, %3;             \
291                         .previous;                              \
292                         .section __ex_table,\"a\";              \
293                         .word   1b,3b;                          \
294                         .previous;"                             \
295                 : "=r"(__gu_err)                                \
296                 : "r"(__gu_val), "r"(__gu_ptr), "i"(-EFAULT)    \
297         );                                                      \
298 })
299
300 /*
301  * Return: number of not copied bytes, i.e. 0 if OK or non-zero if fail.
302  */
303 static inline int clear_user(char *to, int size)
304 {
305         if (size && access_ok(VERIFY_WRITE, to, size)) {
306                 __asm__ __volatile__ ("                         \
307                                 1:                              \
308                                         sb      r0, %2, r0;     \
309                                         addik   %0, %0, -1;     \
310                                         bneid   %0, 1b;         \
311                                         addik   %2, %2, 1;      \
312                                 2:                              \
313                                 .section __ex_table,\"a\";      \
314                                 .word   1b,2b;                  \
315                                 .section .text;"                \
316                         : "=r"(size)                            \
317                         : "0"(size), "r"(to)
318                 );
319         }
320         return size;
321 }
322
323 #define __copy_from_user(to, from, n)   copy_from_user((to), (from), (n))
324 #define __copy_from_user_inatomic(to, from, n) \
325                 copy_from_user((to), (from), (n))
326
327 #define copy_to_user(to, from, n)                                       \
328         (access_ok(VERIFY_WRITE, (to), (n)) ?                           \
329                 __copy_tofrom_user((void __user *)(to),                 \
330                         (__force const void __user *)(from), (n))       \
331                 : -EFAULT)
332
333 #define __copy_to_user(to, from, n)     copy_to_user((to), (from), (n))
334 #define __copy_to_user_inatomic(to, from, n)    copy_to_user((to), (from), (n))
335
336 #define copy_from_user(to, from, n)                                     \
337         (access_ok(VERIFY_READ, (from), (n)) ?                          \
338                 __copy_tofrom_user((__force void __user *)(to),         \
339                         (void __user *)(from), (n))                     \
340                 : -EFAULT)
341
342 extern int __strncpy_user(char *to, const char __user *from, int len);
343 extern int __strnlen_user(const char __user *sstr, int len);
344
345 #define strncpy_from_user(to, from, len)        \
346                 (access_ok(VERIFY_READ, from, 1) ?      \
347                         __strncpy_user(to, from, len) : -EFAULT)
348 #define strnlen_user(str, len)  \
349                 (access_ok(VERIFY_READ, str, 1) ? __strnlen_user(str, len) : 0)
350
351 #endif /* CONFIG_MMU */
352
353 extern unsigned long __copy_tofrom_user(void __user *to,
354                 const void __user *from, unsigned long size);
355
356 #endif  /* __ASSEMBLY__ */
357 #endif /* __KERNEL__ */
358
359 #endif /* _ASM_MICROBLAZE_UACCESS_H */