blob: 2b90335194a76b1ba39ea37c460700da1fc47a72 [file] [log] [blame]
Becky Brucefeaf7cf2005-09-22 14:20:04 -05001#ifndef _ASM_POWERPC_ATOMIC_H_
2#define _ASM_POWERPC_ATOMIC_H_
3
Linus Torvalds1da177e2005-04-16 15:20:36 -07004/*
5 * PowerPC atomic operations
6 */
7
Linus Torvalds1da177e2005-04-16 15:20:36 -07008#ifdef __KERNEL__
David Howellsae3a1972012-03-28 18:30:02 +01009#include <linux/types.h>
10#include <asm/cmpxchg.h>
Peter Zijlstrac6450732014-03-13 19:00:35 +010011#include <asm/barrier.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012
Becky Brucefeaf7cf2005-09-22 14:20:04 -050013#define ATOMIC_INIT(i) { (i) }
Linus Torvalds1da177e2005-04-16 15:20:36 -070014
Boqun Fengdc536172016-01-06 10:08:25 +080015/*
16 * Since *_return_relaxed and {cmp}xchg_relaxed are implemented with
17 * a "bne-" instruction at the end, so an isync is enough as a acquire barrier
18 * on the platform without lwsync.
19 */
20#define __atomic_op_acquire(op, args...) \
21({ \
22 typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \
23 __asm__ __volatile__(PPC_ACQUIRE_BARRIER "" : : : "memory"); \
24 __ret; \
25})
26
27#define __atomic_op_release(op, args...) \
28({ \
29 __asm__ __volatile__(PPC_RELEASE_BARRIER "" : : : "memory"); \
30 op##_relaxed(args); \
31})
32
Segher Boessenkool9f0cbea2007-08-11 10:15:30 +100033static __inline__ int atomic_read(const atomic_t *v)
34{
35 int t;
36
37 __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
38
39 return t;
40}
41
42static __inline__ void atomic_set(atomic_t *v, int i)
43{
44 __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
45}
Linus Torvalds1da177e2005-04-16 15:20:36 -070046
Peter Zijlstraaf095dd2014-03-26 18:11:31 +010047#define ATOMIC_OP(op, asm_op) \
48static __inline__ void atomic_##op(int a, atomic_t *v) \
49{ \
50 int t; \
51 \
52 __asm__ __volatile__( \
53"1: lwarx %0,0,%3 # atomic_" #op "\n" \
54 #asm_op " %0,%2,%0\n" \
55 PPC405_ERR77(0,%3) \
56" stwcx. %0,0,%3 \n" \
57" bne- 1b\n" \
58 : "=&r" (t), "+m" (v->counter) \
59 : "r" (a), "r" (&v->counter) \
60 : "cc"); \
61} \
Linus Torvalds1da177e2005-04-16 15:20:36 -070062
Boqun Fengdc536172016-01-06 10:08:25 +080063#define ATOMIC_OP_RETURN_RELAXED(op, asm_op) \
64static inline int atomic_##op##_return_relaxed(int a, atomic_t *v) \
Peter Zijlstraaf095dd2014-03-26 18:11:31 +010065{ \
66 int t; \
67 \
68 __asm__ __volatile__( \
Boqun Fengdc536172016-01-06 10:08:25 +080069"1: lwarx %0,0,%3 # atomic_" #op "_return_relaxed\n" \
70 #asm_op " %0,%2,%0\n" \
71 PPC405_ERR77(0, %3) \
72" stwcx. %0,0,%3\n" \
Peter Zijlstraaf095dd2014-03-26 18:11:31 +010073" bne- 1b\n" \
Boqun Fengdc536172016-01-06 10:08:25 +080074 : "=&r" (t), "+m" (v->counter) \
Peter Zijlstraaf095dd2014-03-26 18:11:31 +010075 : "r" (a), "r" (&v->counter) \
Boqun Fengdc536172016-01-06 10:08:25 +080076 : "cc"); \
Peter Zijlstraaf095dd2014-03-26 18:11:31 +010077 \
78 return t; \
Linus Torvalds1da177e2005-04-16 15:20:36 -070079}
80
Peter Zijlstraa28cc7b2016-04-18 01:16:05 +020081#define ATOMIC_FETCH_OP_RELAXED(op, asm_op) \
82static inline int atomic_fetch_##op##_relaxed(int a, atomic_t *v) \
83{ \
84 int res, t; \
85 \
86 __asm__ __volatile__( \
87"1: lwarx %0,0,%4 # atomic_fetch_" #op "_relaxed\n" \
88 #asm_op " %1,%3,%0\n" \
89 PPC405_ERR77(0, %4) \
90" stwcx. %1,0,%4\n" \
91" bne- 1b\n" \
92 : "=&r" (res), "=&r" (t), "+m" (v->counter) \
93 : "r" (a), "r" (&v->counter) \
94 : "cc"); \
95 \
96 return res; \
97}
98
Boqun Fengdc536172016-01-06 10:08:25 +080099#define ATOMIC_OPS(op, asm_op) \
100 ATOMIC_OP(op, asm_op) \
Peter Zijlstraa28cc7b2016-04-18 01:16:05 +0200101 ATOMIC_OP_RETURN_RELAXED(op, asm_op) \
102 ATOMIC_FETCH_OP_RELAXED(op, asm_op)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103
Peter Zijlstraaf095dd2014-03-26 18:11:31 +0100104ATOMIC_OPS(add, add)
105ATOMIC_OPS(sub, subf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106
Boqun Fengdc536172016-01-06 10:08:25 +0800107#define atomic_add_return_relaxed atomic_add_return_relaxed
108#define atomic_sub_return_relaxed atomic_sub_return_relaxed
109
Peter Zijlstraa28cc7b2016-04-18 01:16:05 +0200110#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
111#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
112
Peter Zijlstraaf095dd2014-03-26 18:11:31 +0100113#undef ATOMIC_OPS
Peter Zijlstraa28cc7b2016-04-18 01:16:05 +0200114#define ATOMIC_OPS(op, asm_op) \
115 ATOMIC_OP(op, asm_op) \
116 ATOMIC_FETCH_OP_RELAXED(op, asm_op)
117
118ATOMIC_OPS(and, and)
119ATOMIC_OPS(or, or)
120ATOMIC_OPS(xor, xor)
121
122#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
123#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
124#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
125
126#undef ATOMIC_OPS
127#undef ATOMIC_FETCH_OP_RELAXED
Boqun Fengdc536172016-01-06 10:08:25 +0800128#undef ATOMIC_OP_RETURN_RELAXED
Peter Zijlstraaf095dd2014-03-26 18:11:31 +0100129#undef ATOMIC_OP
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130
131#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
132
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133static __inline__ void atomic_inc(atomic_t *v)
134{
135 int t;
136
137 __asm__ __volatile__(
138"1: lwarx %0,0,%2 # atomic_inc\n\
139 addic %0,%0,1\n"
140 PPC405_ERR77(0,%2)
141" stwcx. %0,0,%2 \n\
142 bne- 1b"
Linus Torvaldse2a3d402006-07-08 15:00:28 -0700143 : "=&r" (t), "+m" (v->counter)
144 : "r" (&v->counter)
Paul Mackerrasefc36242008-11-05 18:39:27 +0000145 : "cc", "xer");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146}
147
Boqun Fengdc536172016-01-06 10:08:25 +0800148static __inline__ int atomic_inc_return_relaxed(atomic_t *v)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149{
150 int t;
151
152 __asm__ __volatile__(
Boqun Fengdc536172016-01-06 10:08:25 +0800153"1: lwarx %0,0,%2 # atomic_inc_return_relaxed\n"
154" addic %0,%0,1\n"
155 PPC405_ERR77(0, %2)
156" stwcx. %0,0,%2\n"
157" bne- 1b"
158 : "=&r" (t), "+m" (v->counter)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159 : "r" (&v->counter)
Boqun Fengdc536172016-01-06 10:08:25 +0800160 : "cc", "xer");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161
162 return t;
163}
164
165/*
166 * atomic_inc_and_test - increment and test
167 * @v: pointer of type atomic_t
168 *
169 * Atomically increments @v by 1
170 * and returns true if the result is zero, or false for all
171 * other cases.
172 */
173#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
174
175static __inline__ void atomic_dec(atomic_t *v)
176{
177 int t;
178
179 __asm__ __volatile__(
180"1: lwarx %0,0,%2 # atomic_dec\n\
181 addic %0,%0,-1\n"
182 PPC405_ERR77(0,%2)\
183" stwcx. %0,0,%2\n\
184 bne- 1b"
Linus Torvaldse2a3d402006-07-08 15:00:28 -0700185 : "=&r" (t), "+m" (v->counter)
186 : "r" (&v->counter)
Paul Mackerrasefc36242008-11-05 18:39:27 +0000187 : "cc", "xer");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188}
189
Boqun Fengdc536172016-01-06 10:08:25 +0800190static __inline__ int atomic_dec_return_relaxed(atomic_t *v)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191{
192 int t;
193
194 __asm__ __volatile__(
Boqun Fengdc536172016-01-06 10:08:25 +0800195"1: lwarx %0,0,%2 # atomic_dec_return_relaxed\n"
196" addic %0,%0,-1\n"
197 PPC405_ERR77(0, %2)
198" stwcx. %0,0,%2\n"
199" bne- 1b"
200 : "=&r" (t), "+m" (v->counter)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201 : "r" (&v->counter)
Boqun Fengdc536172016-01-06 10:08:25 +0800202 : "cc", "xer");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203
204 return t;
205}
206
Boqun Fengdc536172016-01-06 10:08:25 +0800207#define atomic_inc_return_relaxed atomic_inc_return_relaxed
208#define atomic_dec_return_relaxed atomic_dec_return_relaxed
209
Mathieu Desnoyersf46e4772007-05-08 00:34:27 -0700210#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
Boqun Feng56c08e62015-12-15 22:24:17 +0800211#define atomic_cmpxchg_relaxed(v, o, n) \
212 cmpxchg_relaxed(&((v)->counter), (o), (n))
213#define atomic_cmpxchg_acquire(v, o, n) \
214 cmpxchg_acquire(&((v)->counter), (o), (n))
215
Ingo Molnarffbf6702006-01-09 15:59:17 -0800216#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
Boqun Feng26760fc2015-12-15 22:24:16 +0800217#define atomic_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
Nick Piggin4a6dae62005-11-13 16:07:24 -0800218
Nick Piggin8426e1f2005-11-13 16:07:25 -0800219/**
Arun Sharmaf24219b2011-07-26 16:09:07 -0700220 * __atomic_add_unless - add unless the number is a given value
Nick Piggin8426e1f2005-11-13 16:07:25 -0800221 * @v: pointer of type atomic_t
222 * @a: the amount to add to v...
223 * @u: ...unless v is equal to u.
224 *
225 * Atomically adds @a to @v, so long as it was not @u.
Arun Sharmaf24219b2011-07-26 16:09:07 -0700226 * Returns the old value of @v.
Nick Piggin8426e1f2005-11-13 16:07:25 -0800227 */
Arun Sharmaf24219b2011-07-26 16:09:07 -0700228static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
Nick Pigginf055aff2006-02-20 10:41:40 +0100229{
230 int t;
231
232 __asm__ __volatile__ (
Benjamin Herrenschmidtb97021f2011-11-15 17:11:27 +0000233 PPC_ATOMIC_ENTRY_BARRIER
Arun Sharmaf24219b2011-07-26 16:09:07 -0700234"1: lwarx %0,0,%1 # __atomic_add_unless\n\
Nick Pigginf055aff2006-02-20 10:41:40 +0100235 cmpw 0,%0,%3 \n\
Anton Blanchard61e98eb2016-10-03 17:03:03 +1100236 beq 2f \n\
Nick Pigginf055aff2006-02-20 10:41:40 +0100237 add %0,%2,%0 \n"
238 PPC405_ERR77(0,%2)
239" stwcx. %0,0,%1 \n\
240 bne- 1b \n"
Benjamin Herrenschmidtb97021f2011-11-15 17:11:27 +0000241 PPC_ATOMIC_EXIT_BARRIER
Nick Pigginf055aff2006-02-20 10:41:40 +0100242" subf %0,%2,%0 \n\
2432:"
244 : "=&r" (t)
245 : "r" (&v->counter), "r" (a), "r" (u)
246 : "cc", "memory");
247
Arun Sharmaf24219b2011-07-26 16:09:07 -0700248 return t;
Nick Pigginf055aff2006-02-20 10:41:40 +0100249}
250
Anton Blancharda6cf7ed2012-02-29 21:12:16 +0000251/**
252 * atomic_inc_not_zero - increment unless the number is zero
253 * @v: pointer of type atomic_t
254 *
255 * Atomically increments @v by 1, so long as @v is non-zero.
256 * Returns non-zero if @v was non-zero, and zero otherwise.
257 */
258static __inline__ int atomic_inc_not_zero(atomic_t *v)
259{
260 int t1, t2;
261
262 __asm__ __volatile__ (
263 PPC_ATOMIC_ENTRY_BARRIER
264"1: lwarx %0,0,%2 # atomic_inc_not_zero\n\
265 cmpwi 0,%0,0\n\
266 beq- 2f\n\
267 addic %1,%0,1\n"
268 PPC405_ERR77(0,%2)
269" stwcx. %1,0,%2\n\
270 bne- 1b\n"
271 PPC_ATOMIC_EXIT_BARRIER
272 "\n\
2732:"
274 : "=&r" (t1), "=&r" (t2)
275 : "r" (&v->counter)
276 : "cc", "xer", "memory");
277
278 return t1;
279}
280#define atomic_inc_not_zero(v) atomic_inc_not_zero((v))
Nick Piggin8426e1f2005-11-13 16:07:25 -0800281
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282#define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0)
283#define atomic_dec_and_test(v) (atomic_dec_return((v)) == 0)
284
285/*
286 * Atomically test *v and decrement if it is greater than 0.
Robert Jennings434f98c2007-01-17 10:50:20 -0600287 * The function returns the old value of *v minus 1, even if
288 * the atomic variable, v, was not decremented.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289 */
290static __inline__ int atomic_dec_if_positive(atomic_t *v)
291{
292 int t;
293
294 __asm__ __volatile__(
Benjamin Herrenschmidtb97021f2011-11-15 17:11:27 +0000295 PPC_ATOMIC_ENTRY_BARRIER
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296"1: lwarx %0,0,%1 # atomic_dec_if_positive\n\
Robert Jennings434f98c2007-01-17 10:50:20 -0600297 cmpwi %0,1\n\
298 addi %0,%0,-1\n\
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299 blt- 2f\n"
300 PPC405_ERR77(0,%1)
301" stwcx. %0,0,%1\n\
302 bne- 1b"
Benjamin Herrenschmidtb97021f2011-11-15 17:11:27 +0000303 PPC_ATOMIC_EXIT_BARRIER
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304 "\n\
Robert Jennings434f98c2007-01-17 10:50:20 -06003052:" : "=&b" (t)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306 : "r" (&v->counter)
307 : "cc", "memory");
308
309 return t;
310}
Shaohua Lie79bee22012-10-08 16:32:18 -0700311#define atomic_dec_if_positive atomic_dec_if_positive
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100313#ifdef __powerpc64__
314
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100315#define ATOMIC64_INIT(i) { (i) }
316
Segher Boessenkool9f0cbea2007-08-11 10:15:30 +1000317static __inline__ long atomic64_read(const atomic64_t *v)
318{
319 long t;
320
321 __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
322
323 return t;
324}
325
326static __inline__ void atomic64_set(atomic64_t *v, long i)
327{
328 __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
329}
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100330
Peter Zijlstraaf095dd2014-03-26 18:11:31 +0100331#define ATOMIC64_OP(op, asm_op) \
332static __inline__ void atomic64_##op(long a, atomic64_t *v) \
333{ \
334 long t; \
335 \
336 __asm__ __volatile__( \
337"1: ldarx %0,0,%3 # atomic64_" #op "\n" \
338 #asm_op " %0,%2,%0\n" \
339" stdcx. %0,0,%3 \n" \
340" bne- 1b\n" \
341 : "=&r" (t), "+m" (v->counter) \
342 : "r" (a), "r" (&v->counter) \
343 : "cc"); \
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100344}
345
Boqun Fengdc536172016-01-06 10:08:25 +0800346#define ATOMIC64_OP_RETURN_RELAXED(op, asm_op) \
347static inline long \
348atomic64_##op##_return_relaxed(long a, atomic64_t *v) \
Peter Zijlstraaf095dd2014-03-26 18:11:31 +0100349{ \
350 long t; \
351 \
352 __asm__ __volatile__( \
Boqun Fengdc536172016-01-06 10:08:25 +0800353"1: ldarx %0,0,%3 # atomic64_" #op "_return_relaxed\n" \
354 #asm_op " %0,%2,%0\n" \
355" stdcx. %0,0,%3\n" \
Peter Zijlstraaf095dd2014-03-26 18:11:31 +0100356" bne- 1b\n" \
Boqun Fengdc536172016-01-06 10:08:25 +0800357 : "=&r" (t), "+m" (v->counter) \
Peter Zijlstraaf095dd2014-03-26 18:11:31 +0100358 : "r" (a), "r" (&v->counter) \
Boqun Fengdc536172016-01-06 10:08:25 +0800359 : "cc"); \
Peter Zijlstraaf095dd2014-03-26 18:11:31 +0100360 \
361 return t; \
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100362}
363
Peter Zijlstraa28cc7b2016-04-18 01:16:05 +0200364#define ATOMIC64_FETCH_OP_RELAXED(op, asm_op) \
365static inline long \
366atomic64_fetch_##op##_relaxed(long a, atomic64_t *v) \
367{ \
368 long res, t; \
369 \
370 __asm__ __volatile__( \
371"1: ldarx %0,0,%4 # atomic64_fetch_" #op "_relaxed\n" \
372 #asm_op " %1,%3,%0\n" \
373" stdcx. %1,0,%4\n" \
374" bne- 1b\n" \
375 : "=&r" (res), "=&r" (t), "+m" (v->counter) \
376 : "r" (a), "r" (&v->counter) \
377 : "cc"); \
378 \
379 return res; \
380}
381
Boqun Fengdc536172016-01-06 10:08:25 +0800382#define ATOMIC64_OPS(op, asm_op) \
383 ATOMIC64_OP(op, asm_op) \
Peter Zijlstraa28cc7b2016-04-18 01:16:05 +0200384 ATOMIC64_OP_RETURN_RELAXED(op, asm_op) \
385 ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
Peter Zijlstraaf095dd2014-03-26 18:11:31 +0100386
387ATOMIC64_OPS(add, add)
388ATOMIC64_OPS(sub, subf)
389
Boqun Fengdc536172016-01-06 10:08:25 +0800390#define atomic64_add_return_relaxed atomic64_add_return_relaxed
391#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
392
Peter Zijlstraa28cc7b2016-04-18 01:16:05 +0200393#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
394#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
395
396#undef ATOMIC64_OPS
397#define ATOMIC64_OPS(op, asm_op) \
398 ATOMIC64_OP(op, asm_op) \
399 ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
400
401ATOMIC64_OPS(and, and)
402ATOMIC64_OPS(or, or)
403ATOMIC64_OPS(xor, xor)
404
405#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
406#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
407#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
408
Boqun Fengdc536172016-01-06 10:08:25 +0800409#undef ATOPIC64_OPS
Peter Zijlstraa28cc7b2016-04-18 01:16:05 +0200410#undef ATOMIC64_FETCH_OP_RELAXED
Boqun Fengdc536172016-01-06 10:08:25 +0800411#undef ATOMIC64_OP_RETURN_RELAXED
Peter Zijlstraaf095dd2014-03-26 18:11:31 +0100412#undef ATOMIC64_OP
413
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100414#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
415
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100416static __inline__ void atomic64_inc(atomic64_t *v)
417{
418 long t;
419
420 __asm__ __volatile__(
421"1: ldarx %0,0,%2 # atomic64_inc\n\
422 addic %0,%0,1\n\
423 stdcx. %0,0,%2 \n\
424 bne- 1b"
Linus Torvaldse2a3d402006-07-08 15:00:28 -0700425 : "=&r" (t), "+m" (v->counter)
426 : "r" (&v->counter)
Paul Mackerrasefc36242008-11-05 18:39:27 +0000427 : "cc", "xer");
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100428}
429
Boqun Fengdc536172016-01-06 10:08:25 +0800430static __inline__ long atomic64_inc_return_relaxed(atomic64_t *v)
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100431{
432 long t;
433
434 __asm__ __volatile__(
Boqun Fengdc536172016-01-06 10:08:25 +0800435"1: ldarx %0,0,%2 # atomic64_inc_return_relaxed\n"
436" addic %0,%0,1\n"
437" stdcx. %0,0,%2\n"
438" bne- 1b"
439 : "=&r" (t), "+m" (v->counter)
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100440 : "r" (&v->counter)
Boqun Fengdc536172016-01-06 10:08:25 +0800441 : "cc", "xer");
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100442
443 return t;
444}
445
446/*
447 * atomic64_inc_and_test - increment and test
448 * @v: pointer of type atomic64_t
449 *
450 * Atomically increments @v by 1
451 * and returns true if the result is zero, or false for all
452 * other cases.
453 */
454#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
455
456static __inline__ void atomic64_dec(atomic64_t *v)
457{
458 long t;
459
460 __asm__ __volatile__(
461"1: ldarx %0,0,%2 # atomic64_dec\n\
462 addic %0,%0,-1\n\
463 stdcx. %0,0,%2\n\
464 bne- 1b"
Linus Torvaldse2a3d402006-07-08 15:00:28 -0700465 : "=&r" (t), "+m" (v->counter)
466 : "r" (&v->counter)
Paul Mackerrasefc36242008-11-05 18:39:27 +0000467 : "cc", "xer");
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100468}
469
Boqun Fengdc536172016-01-06 10:08:25 +0800470static __inline__ long atomic64_dec_return_relaxed(atomic64_t *v)
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100471{
472 long t;
473
474 __asm__ __volatile__(
Boqun Fengdc536172016-01-06 10:08:25 +0800475"1: ldarx %0,0,%2 # atomic64_dec_return_relaxed\n"
476" addic %0,%0,-1\n"
477" stdcx. %0,0,%2\n"
478" bne- 1b"
479 : "=&r" (t), "+m" (v->counter)
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100480 : "r" (&v->counter)
Boqun Fengdc536172016-01-06 10:08:25 +0800481 : "cc", "xer");
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100482
483 return t;
484}
485
Boqun Fengdc536172016-01-06 10:08:25 +0800486#define atomic64_inc_return_relaxed atomic64_inc_return_relaxed
487#define atomic64_dec_return_relaxed atomic64_dec_return_relaxed
488
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100489#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
490#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
491
492/*
493 * Atomically test *v and decrement if it is greater than 0.
494 * The function returns the old value of *v minus 1.
495 */
496static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
497{
498 long t;
499
500 __asm__ __volatile__(
Benjamin Herrenschmidtb97021f2011-11-15 17:11:27 +0000501 PPC_ATOMIC_ENTRY_BARRIER
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100502"1: ldarx %0,0,%1 # atomic64_dec_if_positive\n\
503 addic. %0,%0,-1\n\
504 blt- 2f\n\
505 stdcx. %0,0,%1\n\
506 bne- 1b"
Benjamin Herrenschmidtb97021f2011-11-15 17:11:27 +0000507 PPC_ATOMIC_EXIT_BARRIER
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100508 "\n\
5092:" : "=&r" (t)
510 : "r" (&v->counter)
Paul Mackerrasefc36242008-11-05 18:39:27 +0000511 : "cc", "xer", "memory");
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100512
513 return t;
514}
515
Mathieu Desnoyersf46e4772007-05-08 00:34:27 -0700516#define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
Boqun Feng56c08e62015-12-15 22:24:17 +0800517#define atomic64_cmpxchg_relaxed(v, o, n) \
518 cmpxchg_relaxed(&((v)->counter), (o), (n))
519#define atomic64_cmpxchg_acquire(v, o, n) \
520 cmpxchg_acquire(&((v)->counter), (o), (n))
521
Mathieu Desnoyers41806ef2007-01-25 11:15:52 -0500522#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
Boqun Feng26760fc2015-12-15 22:24:16 +0800523#define atomic64_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
Mathieu Desnoyers41806ef2007-01-25 11:15:52 -0500524
525/**
526 * atomic64_add_unless - add unless the number is a given value
527 * @v: pointer of type atomic64_t
528 * @a: the amount to add to v...
529 * @u: ...unless v is equal to u.
530 *
531 * Atomically adds @a to @v, so long as it was not @u.
Arun Sharmaf24219b2011-07-26 16:09:07 -0700532 * Returns the old value of @v.
Mathieu Desnoyers41806ef2007-01-25 11:15:52 -0500533 */
534static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
535{
536 long t;
537
538 __asm__ __volatile__ (
Benjamin Herrenschmidtb97021f2011-11-15 17:11:27 +0000539 PPC_ATOMIC_ENTRY_BARRIER
Arun Sharmaf24219b2011-07-26 16:09:07 -0700540"1: ldarx %0,0,%1 # __atomic_add_unless\n\
Mathieu Desnoyers41806ef2007-01-25 11:15:52 -0500541 cmpd 0,%0,%3 \n\
Anton Blanchard61e98eb2016-10-03 17:03:03 +1100542 beq 2f \n\
Mathieu Desnoyers41806ef2007-01-25 11:15:52 -0500543 add %0,%2,%0 \n"
544" stdcx. %0,0,%1 \n\
545 bne- 1b \n"
Benjamin Herrenschmidtb97021f2011-11-15 17:11:27 +0000546 PPC_ATOMIC_EXIT_BARRIER
Mathieu Desnoyers41806ef2007-01-25 11:15:52 -0500547" subf %0,%2,%0 \n\
5482:"
549 : "=&r" (t)
550 : "r" (&v->counter), "r" (a), "r" (u)
551 : "cc", "memory");
552
553 return t != u;
554}
555
Anton Blancharda6cf7ed2012-02-29 21:12:16 +0000556/**
557 * atomic_inc64_not_zero - increment unless the number is zero
558 * @v: pointer of type atomic64_t
559 *
560 * Atomically increments @v by 1, so long as @v is non-zero.
561 * Returns non-zero if @v was non-zero, and zero otherwise.
562 */
563static __inline__ long atomic64_inc_not_zero(atomic64_t *v)
564{
565 long t1, t2;
566
567 __asm__ __volatile__ (
568 PPC_ATOMIC_ENTRY_BARRIER
569"1: ldarx %0,0,%2 # atomic64_inc_not_zero\n\
570 cmpdi 0,%0,0\n\
571 beq- 2f\n\
572 addic %1,%0,1\n\
573 stdcx. %1,0,%2\n\
574 bne- 1b\n"
575 PPC_ATOMIC_EXIT_BARRIER
576 "\n\
5772:"
578 : "=&r" (t1), "=&r" (t2)
579 : "r" (&v->counter)
580 : "cc", "xer", "memory");
581
582 return t1;
583}
Mathieu Desnoyers41806ef2007-01-25 11:15:52 -0500584
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100585#endif /* __powerpc64__ */
586
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587#endif /* __KERNEL__ */
Becky Brucefeaf7cf2005-09-22 14:20:04 -0500588#endif /* _ASM_POWERPC_ATOMIC_H_ */