[AVR32] Clean up exception handling code
[linux-2.6.git] / include / asm-avr32 / system.h
1 /*
2  * Copyright (C) 2004-2006 Atmel Corporation
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  */
8 #ifndef __ASM_AVR32_SYSTEM_H
9 #define __ASM_AVR32_SYSTEM_H
10
11 #include <linux/compiler.h>
12 #include <linux/linkage.h>
13 #include <linux/types.h>
14
15 #include <asm/ptrace.h>
16 #include <asm/sysreg.h>
17
18 #define xchg(ptr,x) \
19         ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
20
21 #define nop() asm volatile("nop")
22
23 #define mb()                    asm volatile("" : : : "memory")
24 #define rmb()                   mb()
25 #define wmb()                   asm volatile("sync 0" : : : "memory")
26 #define read_barrier_depends()  do { } while(0)
27 #define set_mb(var, value)      do { var = value; mb(); } while(0)
28
29 /*
30  * Help PathFinder and other Nexus-compliant debuggers keep track of
31  * the current PID by emitting an Ownership Trace Message each time we
32  * switch task.
33  */
34 #ifdef CONFIG_OWNERSHIP_TRACE
35 #include <asm/ocd.h>
36 #define finish_arch_switch(prev)                        \
37         do {                                            \
38                 __mtdr(DBGREG_PID, prev->pid);          \
39                 __mtdr(DBGREG_PID, current->pid);       \
40         } while(0)
41 #endif
42
43 /*
44  * switch_to(prev, next, last) should switch from task `prev' to task
45  * `next'. `prev' will never be the same as `next'.
46  *
47  * We just delegate everything to the __switch_to assembly function,
48  * which is implemented in arch/avr32/kernel/switch_to.S
49  *
50  * mb() tells GCC not to cache `current' across this call.
51  */
52 struct cpu_context;
53 struct task_struct;
54 extern struct task_struct *__switch_to(struct task_struct *,
55                                        struct cpu_context *,
56                                        struct cpu_context *);
57 #define switch_to(prev, next, last)                                     \
58         do {                                                            \
59                 last = __switch_to(prev, &prev->thread.cpu_context + 1, \
60                                    &next->thread.cpu_context);          \
61         } while (0)
62
63 #ifdef CONFIG_SMP
64 # error "The AVR32 port does not support SMP"
65 #else
66 # define smp_mb()               barrier()
67 # define smp_rmb()              barrier()
68 # define smp_wmb()              barrier()
69 # define smp_read_barrier_depends() do { } while(0)
70 #endif
71
72 #include <linux/irqflags.h>
73
74 extern void __xchg_called_with_bad_pointer(void);
75
76 #ifdef __CHECKER__
77 extern unsigned long __builtin_xchg(void *ptr, unsigned long x);
78 #endif
79
80 #define xchg_u32(val, m) __builtin_xchg((void *)m, val)
81
82 static inline unsigned long __xchg(unsigned long x,
83                                        volatile void *ptr,
84                                        int size)
85 {
86         switch(size) {
87         case 4:
88                 return xchg_u32(x, ptr);
89         default:
90                 __xchg_called_with_bad_pointer();
91                 return x;
92         }
93 }
94
95 static inline unsigned long __cmpxchg_u32(volatile int *m, unsigned long old,
96                                           unsigned long new)
97 {
98         __u32 ret;
99
100         asm volatile(
101                 "1:     ssrf    5\n"
102                 "       ld.w    %[ret], %[m]\n"
103                 "       cp.w    %[ret], %[old]\n"
104                 "       brne    2f\n"
105                 "       stcond  %[m], %[new]\n"
106                 "       brne    1b\n"
107                 "2:\n"
108                 : [ret] "=&r"(ret), [m] "=m"(*m)
109                 : "m"(m), [old] "ir"(old), [new] "r"(new)
110                 : "memory", "cc");
111         return ret;
112 }
113
114 extern unsigned long __cmpxchg_u64_unsupported_on_32bit_kernels(
115         volatile int * m, unsigned long old, unsigned long new);
116 #define __cmpxchg_u64 __cmpxchg_u64_unsupported_on_32bit_kernels
117
118 /* This function doesn't exist, so you'll get a linker error
119    if something tries to do an invalid cmpxchg().  */
120 extern void __cmpxchg_called_with_bad_pointer(void);
121
122 #define __HAVE_ARCH_CMPXCHG 1
123
124 static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
125                                       unsigned long new, int size)
126 {
127         switch (size) {
128         case 4:
129                 return __cmpxchg_u32(ptr, old, new);
130         case 8:
131                 return __cmpxchg_u64(ptr, old, new);
132         }
133
134         __cmpxchg_called_with_bad_pointer();
135         return old;
136 }
137
138 #define cmpxchg(ptr, old, new)                                  \
139         ((typeof(*(ptr)))__cmpxchg((ptr), (unsigned long)(old), \
140                                    (unsigned long)(new),        \
141                                    sizeof(*(ptr))))
142
143 struct pt_regs;
144 void NORET_TYPE die(const char *str, struct pt_regs *regs, long err);
145 void _exception(long signr, struct pt_regs *regs, int code,
146                 unsigned long addr);
147
148 #define arch_align_stack(x)     (x)
149
150 #endif /* __ASM_AVR32_SYSTEM_H */