atomic: cleanup asm-generic atomic*.h inclusion
[linux-2.6.git] / arch / ia64 / include / asm / atomic.h
1 #ifndef _ASM_IA64_ATOMIC_H
2 #define _ASM_IA64_ATOMIC_H
3
4 /*
5  * Atomic operations that C can't guarantee us.  Useful for
6  * resource counting etc..
7  *
8  * NOTE: don't mess with the types below!  The "unsigned long" and
9  * "int" types were carefully placed so as to ensure proper operation
10  * of the macros.
11  *
12  * Copyright (C) 1998, 1999, 2002-2003 Hewlett-Packard Co
13  *      David Mosberger-Tang <davidm@hpl.hp.com>
14  */
15 #include <linux/types.h>
16
17 #include <asm/intrinsics.h>
18 #include <asm/system.h>
19
20
21 #define ATOMIC_INIT(i)          ((atomic_t) { (i) })
22 #define ATOMIC64_INIT(i)        ((atomic64_t) { (i) })
23
24 #define atomic_read(v)          (*(volatile int *)&(v)->counter)
25 #define atomic64_read(v)        (*(volatile long *)&(v)->counter)
26
27 #define atomic_set(v,i)         (((v)->counter) = (i))
28 #define atomic64_set(v,i)       (((v)->counter) = (i))
29
30 static __inline__ int
31 ia64_atomic_add (int i, atomic_t *v)
32 {
33         __s32 old, new;
34         CMPXCHG_BUGCHECK_DECL
35
36         do {
37                 CMPXCHG_BUGCHECK(v);
38                 old = atomic_read(v);
39                 new = old + i;
40         } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old);
41         return new;
42 }
43
44 static __inline__ long
45 ia64_atomic64_add (__s64 i, atomic64_t *v)
46 {
47         __s64 old, new;
48         CMPXCHG_BUGCHECK_DECL
49
50         do {
51                 CMPXCHG_BUGCHECK(v);
52                 old = atomic64_read(v);
53                 new = old + i;
54         } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old);
55         return new;
56 }
57
58 static __inline__ int
59 ia64_atomic_sub (int i, atomic_t *v)
60 {
61         __s32 old, new;
62         CMPXCHG_BUGCHECK_DECL
63
64         do {
65                 CMPXCHG_BUGCHECK(v);
66                 old = atomic_read(v);
67                 new = old - i;
68         } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old);
69         return new;
70 }
71
72 static __inline__ long
73 ia64_atomic64_sub (__s64 i, atomic64_t *v)
74 {
75         __s64 old, new;
76         CMPXCHG_BUGCHECK_DECL
77
78         do {
79                 CMPXCHG_BUGCHECK(v);
80                 old = atomic64_read(v);
81                 new = old - i;
82         } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old);
83         return new;
84 }
85
86 #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
87 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
88
89 #define atomic64_cmpxchg(v, old, new) \
90         (cmpxchg(&((v)->counter), old, new))
91 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
92
93 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
94 {
95         int c, old;
96         c = atomic_read(v);
97         for (;;) {
98                 if (unlikely(c == (u)))
99                         break;
100                 old = atomic_cmpxchg((v), c, c + (a));
101                 if (likely(old == c))
102                         break;
103                 c = old;
104         }
105         return c;
106 }
107
108
109 static __inline__ long atomic64_add_unless(atomic64_t *v, long a, long u)
110 {
111         long c, old;
112         c = atomic64_read(v);
113         for (;;) {
114                 if (unlikely(c == (u)))
115                         break;
116                 old = atomic64_cmpxchg((v), c, c + (a));
117                 if (likely(old == c))
118                         break;
119                 c = old;
120         }
121         return c != (u);
122 }
123
124 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
125
126 #define atomic_add_return(i,v)                                          \
127 ({                                                                      \
128         int __ia64_aar_i = (i);                                         \
129         (__builtin_constant_p(i)                                        \
130          && (   (__ia64_aar_i ==  1) || (__ia64_aar_i ==   4)           \
131              || (__ia64_aar_i ==  8) || (__ia64_aar_i ==  16)           \
132              || (__ia64_aar_i == -1) || (__ia64_aar_i ==  -4)           \
133              || (__ia64_aar_i == -8) || (__ia64_aar_i == -16)))         \
134                 ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter)       \
135                 : ia64_atomic_add(__ia64_aar_i, v);                     \
136 })
137
138 #define atomic64_add_return(i,v)                                        \
139 ({                                                                      \
140         long __ia64_aar_i = (i);                                        \
141         (__builtin_constant_p(i)                                        \
142          && (   (__ia64_aar_i ==  1) || (__ia64_aar_i ==   4)           \
143              || (__ia64_aar_i ==  8) || (__ia64_aar_i ==  16)           \
144              || (__ia64_aar_i == -1) || (__ia64_aar_i ==  -4)           \
145              || (__ia64_aar_i == -8) || (__ia64_aar_i == -16)))         \
146                 ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter)       \
147                 : ia64_atomic64_add(__ia64_aar_i, v);                   \
148 })
149
150 /*
151  * Atomically add I to V and return TRUE if the resulting value is
152  * negative.
153  */
154 static __inline__ int
155 atomic_add_negative (int i, atomic_t *v)
156 {
157         return atomic_add_return(i, v) < 0;
158 }
159
160 static __inline__ long
161 atomic64_add_negative (__s64 i, atomic64_t *v)
162 {
163         return atomic64_add_return(i, v) < 0;
164 }
165
166 #define atomic_sub_return(i,v)                                          \
167 ({                                                                      \
168         int __ia64_asr_i = (i);                                         \
169         (__builtin_constant_p(i)                                        \
170          && (   (__ia64_asr_i ==   1) || (__ia64_asr_i ==   4)          \
171              || (__ia64_asr_i ==   8) || (__ia64_asr_i ==  16)          \
172              || (__ia64_asr_i ==  -1) || (__ia64_asr_i ==  -4)          \
173              || (__ia64_asr_i ==  -8) || (__ia64_asr_i == -16)))        \
174                 ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter)      \
175                 : ia64_atomic_sub(__ia64_asr_i, v);                     \
176 })
177
178 #define atomic64_sub_return(i,v)                                        \
179 ({                                                                      \
180         long __ia64_asr_i = (i);                                        \
181         (__builtin_constant_p(i)                                        \
182          && (   (__ia64_asr_i ==   1) || (__ia64_asr_i ==   4)          \
183              || (__ia64_asr_i ==   8) || (__ia64_asr_i ==  16)          \
184              || (__ia64_asr_i ==  -1) || (__ia64_asr_i ==  -4)          \
185              || (__ia64_asr_i ==  -8) || (__ia64_asr_i == -16)))        \
186                 ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter)      \
187                 : ia64_atomic64_sub(__ia64_asr_i, v);                   \
188 })
189
190 #define atomic_dec_return(v)            atomic_sub_return(1, (v))
191 #define atomic_inc_return(v)            atomic_add_return(1, (v))
192 #define atomic64_dec_return(v)          atomic64_sub_return(1, (v))
193 #define atomic64_inc_return(v)          atomic64_add_return(1, (v))
194
195 #define atomic_sub_and_test(i,v)        (atomic_sub_return((i), (v)) == 0)
196 #define atomic_dec_and_test(v)          (atomic_sub_return(1, (v)) == 0)
197 #define atomic_inc_and_test(v)          (atomic_add_return(1, (v)) == 0)
198 #define atomic64_sub_and_test(i,v)      (atomic64_sub_return((i), (v)) == 0)
199 #define atomic64_dec_and_test(v)        (atomic64_sub_return(1, (v)) == 0)
200 #define atomic64_inc_and_test(v)        (atomic64_add_return(1, (v)) == 0)
201
202 #define atomic_add(i,v)                 atomic_add_return((i), (v))
203 #define atomic_sub(i,v)                 atomic_sub_return((i), (v))
204 #define atomic_inc(v)                   atomic_add(1, (v))
205 #define atomic_dec(v)                   atomic_sub(1, (v))
206
207 #define atomic64_add(i,v)               atomic64_add_return((i), (v))
208 #define atomic64_sub(i,v)               atomic64_sub_return((i), (v))
209 #define atomic64_inc(v)                 atomic64_add(1, (v))
210 #define atomic64_dec(v)                 atomic64_sub(1, (v))
211
212 /* Atomic operations are already serializing */
213 #define smp_mb__before_atomic_dec()     barrier()
214 #define smp_mb__after_atomic_dec()      barrier()
215 #define smp_mb__before_atomic_inc()     barrier()
216 #define smp_mb__after_atomic_inc()      barrier()
217
218 #endif /* _ASM_IA64_ATOMIC_H */