Remove tas()
[linux-2.6.git] / include / asm-mips / system.h
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003, 06 by Ralf Baechle
7  * Copyright (C) 1996 by Paul M. Antoine
8  * Copyright (C) 1999 Silicon Graphics
9  * Kevin D. Kissell, kevink@mips.org and Carsten Langgaard, carstenl@mips.com
10  * Copyright (C) 2000 MIPS Technologies, Inc.
11  */
12 #ifndef _ASM_SYSTEM_H
13 #define _ASM_SYSTEM_H
14
15 #include <linux/types.h>
16 #include <linux/irqflags.h>
17
18 #include <asm/addrspace.h>
19 #include <asm/barrier.h>
20 #include <asm/cpu-features.h>
21 #include <asm/dsp.h>
22 #include <asm/war.h>
23
24
25 /*
26  * switch_to(n) should switch tasks to task nr n, first
27  * checking that n isn't the current task, in which case it does nothing.
28  */
29 extern asmlinkage void *resume(void *last, void *next, void *next_ti);
30
31 struct task_struct;
32
33 #ifdef CONFIG_MIPS_MT_FPAFF
34
35 /*
36  * Handle the scheduler resume end of FPU affinity management.  We do this
37  * inline to try to keep the overhead down. If we have been forced to run on
38  * a "CPU" with an FPU because of a previous high level of FP computation,
39  * but did not actually use the FPU during the most recent time-slice (CU1
40  * isn't set), we undo the restriction on cpus_allowed.
41  *
42  * We're not calling set_cpus_allowed() here, because we have no need to
43  * force prompt migration - we're already switching the current CPU to a
44  * different thread.
45  */
46
47 #define switch_to(prev,next,last)                                       \
48 do {                                                                    \
49         if (cpu_has_fpu &&                                              \
50             (prev->thread.mflags & MF_FPUBOUND) &&                      \
51              (!(KSTK_STATUS(prev) & ST0_CU1))) {                        \
52                 prev->thread.mflags &= ~MF_FPUBOUND;                    \
53                 prev->cpus_allowed = prev->thread.user_cpus_allowed;    \
54         }                                                               \
55         if (cpu_has_dsp)                                                \
56                 __save_dsp(prev);                                       \
57         next->thread.emulated_fp = 0;                                   \
58         (last) = resume(prev, next, next->thread_info);                 \
59         if (cpu_has_dsp)                                                \
60                 __restore_dsp(current);                                 \
61 } while(0)
62
63 #else
64 #define switch_to(prev,next,last)                                       \
65 do {                                                                    \
66         if (cpu_has_dsp)                                                \
67                 __save_dsp(prev);                                       \
68         (last) = resume(prev, next, task_thread_info(next));            \
69         if (cpu_has_dsp)                                                \
70                 __restore_dsp(current);                                 \
71 } while(0)
72 #endif
73
74 /*
75  * On SMP systems, when the scheduler does migration-cost autodetection,
76  * it needs a way to flush as much of the CPU's caches as possible.
77  *
78  * TODO: fill this in!
79  */
80 static inline void sched_cacheflush(void)
81 {
82 }
83
84 static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
85 {
86         __u32 retval;
87
88         if (cpu_has_llsc && R10000_LLSC_WAR) {
89                 unsigned long dummy;
90
91                 __asm__ __volatile__(
92                 "       .set    mips3                                   \n"
93                 "1:     ll      %0, %3                  # xchg_u32      \n"
94                 "       .set    mips0                                   \n"
95                 "       move    %2, %z4                                 \n"
96                 "       .set    mips3                                   \n"
97                 "       sc      %2, %1                                  \n"
98                 "       beqzl   %2, 1b                                  \n"
99                 "       .set    mips0                                   \n"
100                 : "=&r" (retval), "=m" (*m), "=&r" (dummy)
101                 : "R" (*m), "Jr" (val)
102                 : "memory");
103         } else if (cpu_has_llsc) {
104                 unsigned long dummy;
105
106                 __asm__ __volatile__(
107                 "       .set    mips3                                   \n"
108                 "1:     ll      %0, %3                  # xchg_u32      \n"
109                 "       .set    mips0                                   \n"
110                 "       move    %2, %z4                                 \n"
111                 "       .set    mips3                                   \n"
112                 "       sc      %2, %1                                  \n"
113                 "       beqz    %2, 2f                                  \n"
114                 "       .subsection 2                                   \n"
115                 "2:     b       1b                                      \n"
116                 "       .previous                                       \n"
117                 "       .set    mips0                                   \n"
118                 : "=&r" (retval), "=m" (*m), "=&r" (dummy)
119                 : "R" (*m), "Jr" (val)
120                 : "memory");
121         } else {
122                 unsigned long flags;
123
124                 raw_local_irq_save(flags);
125                 retval = *m;
126                 *m = val;
127                 raw_local_irq_restore(flags);   /* implies memory barrier  */
128         }
129
130         smp_mb();
131
132         return retval;
133 }
134
135 #ifdef CONFIG_64BIT
136 static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val)
137 {
138         __u64 retval;
139
140         if (cpu_has_llsc && R10000_LLSC_WAR) {
141                 unsigned long dummy;
142
143                 __asm__ __volatile__(
144                 "       .set    mips3                                   \n"
145                 "1:     lld     %0, %3                  # xchg_u64      \n"
146                 "       move    %2, %z4                                 \n"
147                 "       scd     %2, %1                                  \n"
148                 "       beqzl   %2, 1b                                  \n"
149                 "       .set    mips0                                   \n"
150                 : "=&r" (retval), "=m" (*m), "=&r" (dummy)
151                 : "R" (*m), "Jr" (val)
152                 : "memory");
153         } else if (cpu_has_llsc) {
154                 unsigned long dummy;
155
156                 __asm__ __volatile__(
157                 "       .set    mips3                                   \n"
158                 "1:     lld     %0, %3                  # xchg_u64      \n"
159                 "       move    %2, %z4                                 \n"
160                 "       scd     %2, %1                                  \n"
161                 "       beqz    %2, 2f                                  \n"
162                 "       .subsection 2                                   \n"
163                 "2:     b       1b                                      \n"
164                 "       .previous                                       \n"
165                 "       .set    mips0                                   \n"
166                 : "=&r" (retval), "=m" (*m), "=&r" (dummy)
167                 : "R" (*m), "Jr" (val)
168                 : "memory");
169         } else {
170                 unsigned long flags;
171
172                 raw_local_irq_save(flags);
173                 retval = *m;
174                 *m = val;
175                 raw_local_irq_restore(flags);   /* implies memory barrier  */
176         }
177
178         smp_mb();
179
180         return retval;
181 }
182 #else
183 extern __u64 __xchg_u64_unsupported_on_32bit_kernels(volatile __u64 * m, __u64 val);
184 #define __xchg_u64 __xchg_u64_unsupported_on_32bit_kernels
185 #endif
186
187 /* This function doesn't exist, so you'll get a linker error
188    if something tries to do an invalid xchg().  */
189 extern void __xchg_called_with_bad_pointer(void);
190
191 static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
192 {
193         switch (size) {
194         case 4:
195                 return __xchg_u32(ptr, x);
196         case 8:
197                 return __xchg_u64(ptr, x);
198         }
199         __xchg_called_with_bad_pointer();
200         return x;
201 }
202
203 #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
204
205 #define __HAVE_ARCH_CMPXCHG 1
206
207 static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old,
208         unsigned long new)
209 {
210         __u32 retval;
211
212         if (cpu_has_llsc && R10000_LLSC_WAR) {
213                 __asm__ __volatile__(
214                 "       .set    push                                    \n"
215                 "       .set    noat                                    \n"
216                 "       .set    mips3                                   \n"
217                 "1:     ll      %0, %2                  # __cmpxchg_u32 \n"
218                 "       bne     %0, %z3, 2f                             \n"
219                 "       .set    mips0                                   \n"
220                 "       move    $1, %z4                                 \n"
221                 "       .set    mips3                                   \n"
222                 "       sc      $1, %1                                  \n"
223                 "       beqzl   $1, 1b                                  \n"
224                 "2:                                                     \n"
225                 "       .set    pop                                     \n"
226                 : "=&r" (retval), "=R" (*m)
227                 : "R" (*m), "Jr" (old), "Jr" (new)
228                 : "memory");
229         } else if (cpu_has_llsc) {
230                 __asm__ __volatile__(
231                 "       .set    push                                    \n"
232                 "       .set    noat                                    \n"
233                 "       .set    mips3                                   \n"
234                 "1:     ll      %0, %2                  # __cmpxchg_u32 \n"
235                 "       bne     %0, %z3, 2f                             \n"
236                 "       .set    mips0                                   \n"
237                 "       move    $1, %z4                                 \n"
238                 "       .set    mips3                                   \n"
239                 "       sc      $1, %1                                  \n"
240                 "       beqz    $1, 3f                                  \n"
241                 "2:                                                     \n"
242                 "       .subsection 2                                   \n"
243                 "3:     b       1b                                      \n"
244                 "       .previous                                       \n"
245                 "       .set    pop                                     \n"
246                 : "=&r" (retval), "=R" (*m)
247                 : "R" (*m), "Jr" (old), "Jr" (new)
248                 : "memory");
249         } else {
250                 unsigned long flags;
251
252                 raw_local_irq_save(flags);
253                 retval = *m;
254                 if (retval == old)
255                         *m = new;
256                 raw_local_irq_restore(flags);   /* implies memory barrier  */
257         }
258
259         smp_mb();
260
261         return retval;
262 }
263
264 static inline unsigned long __cmpxchg_u32_local(volatile int * m,
265         unsigned long old, unsigned long new)
266 {
267         __u32 retval;
268
269         if (cpu_has_llsc && R10000_LLSC_WAR) {
270                 __asm__ __volatile__(
271                 "       .set    push                                    \n"
272                 "       .set    noat                                    \n"
273                 "       .set    mips3                                   \n"
274                 "1:     ll      %0, %2                  # __cmpxchg_u32 \n"
275                 "       bne     %0, %z3, 2f                             \n"
276                 "       .set    mips0                                   \n"
277                 "       move    $1, %z4                                 \n"
278                 "       .set    mips3                                   \n"
279                 "       sc      $1, %1                                  \n"
280                 "       beqzl   $1, 1b                                  \n"
281                 "2:                                                     \n"
282                 "       .set    pop                                     \n"
283                 : "=&r" (retval), "=R" (*m)
284                 : "R" (*m), "Jr" (old), "Jr" (new)
285                 : "memory");
286         } else if (cpu_has_llsc) {
287                 __asm__ __volatile__(
288                 "       .set    push                                    \n"
289                 "       .set    noat                                    \n"
290                 "       .set    mips3                                   \n"
291                 "1:     ll      %0, %2                  # __cmpxchg_u32 \n"
292                 "       bne     %0, %z3, 2f                             \n"
293                 "       .set    mips0                                   \n"
294                 "       move    $1, %z4                                 \n"
295                 "       .set    mips3                                   \n"
296                 "       sc      $1, %1                                  \n"
297                 "       beqz    $1, 1b                                  \n"
298                 "2:                                                     \n"
299                 "       .set    pop                                     \n"
300                 : "=&r" (retval), "=R" (*m)
301                 : "R" (*m), "Jr" (old), "Jr" (new)
302                 : "memory");
303         } else {
304                 unsigned long flags;
305
306                 local_irq_save(flags);
307                 retval = *m;
308                 if (retval == old)
309                         *m = new;
310                 local_irq_restore(flags);       /* implies memory barrier  */
311         }
312
313         return retval;
314 }
315
316 #ifdef CONFIG_64BIT
317 static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old,
318         unsigned long new)
319 {
320         __u64 retval;
321
322         if (cpu_has_llsc && R10000_LLSC_WAR) {
323                 __asm__ __volatile__(
324                 "       .set    push                                    \n"
325                 "       .set    noat                                    \n"
326                 "       .set    mips3                                   \n"
327                 "1:     lld     %0, %2                  # __cmpxchg_u64 \n"
328                 "       bne     %0, %z3, 2f                             \n"
329                 "       move    $1, %z4                                 \n"
330                 "       scd     $1, %1                                  \n"
331                 "       beqzl   $1, 1b                                  \n"
332                 "2:                                                     \n"
333                 "       .set    pop                                     \n"
334                 : "=&r" (retval), "=R" (*m)
335                 : "R" (*m), "Jr" (old), "Jr" (new)
336                 : "memory");
337         } else if (cpu_has_llsc) {
338                 __asm__ __volatile__(
339                 "       .set    push                                    \n"
340                 "       .set    noat                                    \n"
341                 "       .set    mips3                                   \n"
342                 "1:     lld     %0, %2                  # __cmpxchg_u64 \n"
343                 "       bne     %0, %z3, 2f                             \n"
344                 "       move    $1, %z4                                 \n"
345                 "       scd     $1, %1                                  \n"
346                 "       beqz    $1, 3f                                  \n"
347                 "2:                                                     \n"
348                 "       .subsection 2                                   \n"
349                 "3:     b       1b                                      \n"
350                 "       .previous                                       \n"
351                 "       .set    pop                                     \n"
352                 : "=&r" (retval), "=R" (*m)
353                 : "R" (*m), "Jr" (old), "Jr" (new)
354                 : "memory");
355         } else {
356                 unsigned long flags;
357
358                 raw_local_irq_save(flags);
359                 retval = *m;
360                 if (retval == old)
361                         *m = new;
362                 raw_local_irq_restore(flags);   /* implies memory barrier  */
363         }
364
365         smp_mb();
366
367         return retval;
368 }
369
370 static inline unsigned long __cmpxchg_u64_local(volatile int * m,
371         unsigned long old, unsigned long new)
372 {
373         __u64 retval;
374
375         if (cpu_has_llsc && R10000_LLSC_WAR) {
376                 __asm__ __volatile__(
377                 "       .set    push                                    \n"
378                 "       .set    noat                                    \n"
379                 "       .set    mips3                                   \n"
380                 "1:     lld     %0, %2                  # __cmpxchg_u64 \n"
381                 "       bne     %0, %z3, 2f                             \n"
382                 "       move    $1, %z4                                 \n"
383                 "       scd     $1, %1                                  \n"
384                 "       beqzl   $1, 1b                                  \n"
385                 "2:                                                     \n"
386                 "       .set    pop                                     \n"
387                 : "=&r" (retval), "=R" (*m)
388                 : "R" (*m), "Jr" (old), "Jr" (new)
389                 : "memory");
390         } else if (cpu_has_llsc) {
391                 __asm__ __volatile__(
392                 "       .set    push                                    \n"
393                 "       .set    noat                                    \n"
394                 "       .set    mips3                                   \n"
395                 "1:     lld     %0, %2                  # __cmpxchg_u64 \n"
396                 "       bne     %0, %z3, 2f                             \n"
397                 "       move    $1, %z4                                 \n"
398                 "       scd     $1, %1                                  \n"
399                 "       beqz    $1, 1b                                  \n"
400                 "2:                                                     \n"
401                 "       .set    pop                                     \n"
402                 : "=&r" (retval), "=R" (*m)
403                 : "R" (*m), "Jr" (old), "Jr" (new)
404                 : "memory");
405         } else {
406                 unsigned long flags;
407
408                 local_irq_save(flags);
409                 retval = *m;
410                 if (retval == old)
411                         *m = new;
412                 local_irq_restore(flags);       /* implies memory barrier  */
413         }
414
415         return retval;
416 }
417
418 #else
419 extern unsigned long __cmpxchg_u64_unsupported_on_32bit_kernels(
420         volatile int * m, unsigned long old, unsigned long new);
421 #define __cmpxchg_u64 __cmpxchg_u64_unsupported_on_32bit_kernels
422 extern unsigned long __cmpxchg_u64_local_unsupported_on_32bit_kernels(
423         volatile int * m, unsigned long old, unsigned long new);
424 #define __cmpxchg_u64_local __cmpxchg_u64_local_unsupported_on_32bit_kernels
425 #endif
426
427 /* This function doesn't exist, so you'll get a linker error
428    if something tries to do an invalid cmpxchg().  */
429 extern void __cmpxchg_called_with_bad_pointer(void);
430
431 static inline unsigned long __cmpxchg(volatile void * ptr, unsigned long old,
432         unsigned long new, int size)
433 {
434         switch (size) {
435         case 4:
436                 return __cmpxchg_u32(ptr, old, new);
437         case 8:
438                 return __cmpxchg_u64(ptr, old, new);
439         }
440         __cmpxchg_called_with_bad_pointer();
441         return old;
442 }
443
444 static inline unsigned long __cmpxchg_local(volatile void * ptr,
445         unsigned long old, unsigned long new, int size)
446 {
447         switch (size) {
448         case 4:
449                 return __cmpxchg_u32_local(ptr, old, new);
450         case 8:
451                 return __cmpxchg_u64_local(ptr, old, new);
452         }
453         __cmpxchg_called_with_bad_pointer();
454         return old;
455 }
456
457 #define cmpxchg(ptr,old,new) \
458         ((__typeof__(*(ptr)))__cmpxchg((ptr), \
459                 (unsigned long)(old), (unsigned long)(new),sizeof(*(ptr))))
460
461 #define cmpxchg_local(ptr,old,new) \
462         ((__typeof__(*(ptr)))__cmpxchg_local((ptr), \
463                 (unsigned long)(old), (unsigned long)(new),sizeof(*(ptr))))
464
465 extern void set_handler (unsigned long offset, void *addr, unsigned long len);
466 extern void set_uncached_handler (unsigned long offset, void *addr, unsigned long len);
467 extern void *set_vi_handler (int n, void *addr);
468 extern void *set_except_vector(int n, void *addr);
469 extern unsigned long ebase;
470 extern void per_cpu_trap_init(void);
471
472 extern int stop_a_enabled;
473
474 /*
475  * See include/asm-ia64/system.h; prevents deadlock on SMP
476  * systems.
477  */
478 #define __ARCH_WANT_UNLOCKED_CTXSW
479
480 #define arch_align_stack(x) (x)
481
482 #endif /* _ASM_SYSTEM_H */