blob: cbdb0b7e60a3988460ed6a877bd1a9a89c81e4b7 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Becky Brucefeaf7cf2005-09-22 14:20:04 -05002#ifndef _ASM_POWERPC_ATOMIC_H_
3#define _ASM_POWERPC_ATOMIC_H_
4
Linus Torvalds1da177e2005-04-16 15:20:36 -07005/*
6 * PowerPC atomic operations
7 */
8
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#ifdef __KERNEL__
David Howellsae3a1972012-03-28 18:30:02 +010010#include <linux/types.h>
11#include <asm/cmpxchg.h>
Peter Zijlstrac6450732014-03-13 19:00:35 +010012#include <asm/barrier.h>
Christophe Leroy36a7eea2018-07-05 16:24:55 +000013#include <asm/asm-405.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014
Becky Brucefeaf7cf2005-09-22 14:20:04 -050015#define ATOMIC_INIT(i) { (i) }
Linus Torvalds1da177e2005-04-16 15:20:36 -070016
Boqun Fengdc536172016-01-06 10:08:25 +080017/*
18 * Since *_return_relaxed and {cmp}xchg_relaxed are implemented with
19 * a "bne-" instruction at the end, so an isync is enough as a acquire barrier
20 * on the platform without lwsync.
21 */
22#define __atomic_op_acquire(op, args...) \
23({ \
24 typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \
25 __asm__ __volatile__(PPC_ACQUIRE_BARRIER "" : : : "memory"); \
26 __ret; \
27})
28
29#define __atomic_op_release(op, args...) \
30({ \
31 __asm__ __volatile__(PPC_RELEASE_BARRIER "" : : : "memory"); \
32 op##_relaxed(args); \
33})
34
Segher Boessenkool9f0cbea2007-08-11 10:15:30 +100035static __inline__ int atomic_read(const atomic_t *v)
36{
37 int t;
38
39 __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
40
41 return t;
42}
43
44static __inline__ void atomic_set(atomic_t *v, int i)
45{
46 __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
47}
Linus Torvalds1da177e2005-04-16 15:20:36 -070048
Peter Zijlstraaf095dd2014-03-26 18:11:31 +010049#define ATOMIC_OP(op, asm_op) \
50static __inline__ void atomic_##op(int a, atomic_t *v) \
51{ \
52 int t; \
53 \
54 __asm__ __volatile__( \
55"1: lwarx %0,0,%3 # atomic_" #op "\n" \
56 #asm_op " %0,%2,%0\n" \
57 PPC405_ERR77(0,%3) \
58" stwcx. %0,0,%3 \n" \
59" bne- 1b\n" \
60 : "=&r" (t), "+m" (v->counter) \
61 : "r" (a), "r" (&v->counter) \
62 : "cc"); \
63} \
Linus Torvalds1da177e2005-04-16 15:20:36 -070064
Boqun Fengdc536172016-01-06 10:08:25 +080065#define ATOMIC_OP_RETURN_RELAXED(op, asm_op) \
66static inline int atomic_##op##_return_relaxed(int a, atomic_t *v) \
Peter Zijlstraaf095dd2014-03-26 18:11:31 +010067{ \
68 int t; \
69 \
70 __asm__ __volatile__( \
Boqun Fengdc536172016-01-06 10:08:25 +080071"1: lwarx %0,0,%3 # atomic_" #op "_return_relaxed\n" \
72 #asm_op " %0,%2,%0\n" \
73 PPC405_ERR77(0, %3) \
74" stwcx. %0,0,%3\n" \
Peter Zijlstraaf095dd2014-03-26 18:11:31 +010075" bne- 1b\n" \
Boqun Fengdc536172016-01-06 10:08:25 +080076 : "=&r" (t), "+m" (v->counter) \
Peter Zijlstraaf095dd2014-03-26 18:11:31 +010077 : "r" (a), "r" (&v->counter) \
Boqun Fengdc536172016-01-06 10:08:25 +080078 : "cc"); \
Peter Zijlstraaf095dd2014-03-26 18:11:31 +010079 \
80 return t; \
Linus Torvalds1da177e2005-04-16 15:20:36 -070081}
82
Peter Zijlstraa28cc7b2016-04-18 01:16:05 +020083#define ATOMIC_FETCH_OP_RELAXED(op, asm_op) \
84static inline int atomic_fetch_##op##_relaxed(int a, atomic_t *v) \
85{ \
86 int res, t; \
87 \
88 __asm__ __volatile__( \
89"1: lwarx %0,0,%4 # atomic_fetch_" #op "_relaxed\n" \
90 #asm_op " %1,%3,%0\n" \
91 PPC405_ERR77(0, %4) \
92" stwcx. %1,0,%4\n" \
93" bne- 1b\n" \
94 : "=&r" (res), "=&r" (t), "+m" (v->counter) \
95 : "r" (a), "r" (&v->counter) \
96 : "cc"); \
97 \
98 return res; \
99}
100
Boqun Fengdc536172016-01-06 10:08:25 +0800101#define ATOMIC_OPS(op, asm_op) \
102 ATOMIC_OP(op, asm_op) \
Peter Zijlstraa28cc7b2016-04-18 01:16:05 +0200103 ATOMIC_OP_RETURN_RELAXED(op, asm_op) \
104 ATOMIC_FETCH_OP_RELAXED(op, asm_op)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105
Peter Zijlstraaf095dd2014-03-26 18:11:31 +0100106ATOMIC_OPS(add, add)
107ATOMIC_OPS(sub, subf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108
Boqun Fengdc536172016-01-06 10:08:25 +0800109#define atomic_add_return_relaxed atomic_add_return_relaxed
110#define atomic_sub_return_relaxed atomic_sub_return_relaxed
111
Peter Zijlstraa28cc7b2016-04-18 01:16:05 +0200112#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
113#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
114
Peter Zijlstraaf095dd2014-03-26 18:11:31 +0100115#undef ATOMIC_OPS
Peter Zijlstraa28cc7b2016-04-18 01:16:05 +0200116#define ATOMIC_OPS(op, asm_op) \
117 ATOMIC_OP(op, asm_op) \
118 ATOMIC_FETCH_OP_RELAXED(op, asm_op)
119
120ATOMIC_OPS(and, and)
121ATOMIC_OPS(or, or)
122ATOMIC_OPS(xor, xor)
123
124#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
125#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
126#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
127
128#undef ATOMIC_OPS
129#undef ATOMIC_FETCH_OP_RELAXED
Boqun Fengdc536172016-01-06 10:08:25 +0800130#undef ATOMIC_OP_RETURN_RELAXED
Peter Zijlstraaf095dd2014-03-26 18:11:31 +0100131#undef ATOMIC_OP
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132
133#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
134
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135static __inline__ void atomic_inc(atomic_t *v)
136{
137 int t;
138
139 __asm__ __volatile__(
140"1: lwarx %0,0,%2 # atomic_inc\n\
141 addic %0,%0,1\n"
142 PPC405_ERR77(0,%2)
143" stwcx. %0,0,%2 \n\
144 bne- 1b"
Linus Torvaldse2a3d402006-07-08 15:00:28 -0700145 : "=&r" (t), "+m" (v->counter)
146 : "r" (&v->counter)
Paul Mackerrasefc36242008-11-05 18:39:27 +0000147 : "cc", "xer");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148}
149
Boqun Fengdc536172016-01-06 10:08:25 +0800150static __inline__ int atomic_inc_return_relaxed(atomic_t *v)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151{
152 int t;
153
154 __asm__ __volatile__(
Boqun Fengdc536172016-01-06 10:08:25 +0800155"1: lwarx %0,0,%2 # atomic_inc_return_relaxed\n"
156" addic %0,%0,1\n"
157 PPC405_ERR77(0, %2)
158" stwcx. %0,0,%2\n"
159" bne- 1b"
160 : "=&r" (t), "+m" (v->counter)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161 : "r" (&v->counter)
Boqun Fengdc536172016-01-06 10:08:25 +0800162 : "cc", "xer");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163
164 return t;
165}
166
167/*
168 * atomic_inc_and_test - increment and test
169 * @v: pointer of type atomic_t
170 *
171 * Atomically increments @v by 1
172 * and returns true if the result is zero, or false for all
173 * other cases.
174 */
175#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
176
177static __inline__ void atomic_dec(atomic_t *v)
178{
179 int t;
180
181 __asm__ __volatile__(
182"1: lwarx %0,0,%2 # atomic_dec\n\
183 addic %0,%0,-1\n"
184 PPC405_ERR77(0,%2)\
185" stwcx. %0,0,%2\n\
186 bne- 1b"
Linus Torvaldse2a3d402006-07-08 15:00:28 -0700187 : "=&r" (t), "+m" (v->counter)
188 : "r" (&v->counter)
Paul Mackerrasefc36242008-11-05 18:39:27 +0000189 : "cc", "xer");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190}
191
Boqun Fengdc536172016-01-06 10:08:25 +0800192static __inline__ int atomic_dec_return_relaxed(atomic_t *v)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193{
194 int t;
195
196 __asm__ __volatile__(
Boqun Fengdc536172016-01-06 10:08:25 +0800197"1: lwarx %0,0,%2 # atomic_dec_return_relaxed\n"
198" addic %0,%0,-1\n"
199 PPC405_ERR77(0, %2)
200" stwcx. %0,0,%2\n"
201" bne- 1b"
202 : "=&r" (t), "+m" (v->counter)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203 : "r" (&v->counter)
Boqun Fengdc536172016-01-06 10:08:25 +0800204 : "cc", "xer");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205
206 return t;
207}
208
Boqun Fengdc536172016-01-06 10:08:25 +0800209#define atomic_inc_return_relaxed atomic_inc_return_relaxed
210#define atomic_dec_return_relaxed atomic_dec_return_relaxed
211
Mathieu Desnoyersf46e4772007-05-08 00:34:27 -0700212#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
Boqun Feng56c08e62015-12-15 22:24:17 +0800213#define atomic_cmpxchg_relaxed(v, o, n) \
214 cmpxchg_relaxed(&((v)->counter), (o), (n))
215#define atomic_cmpxchg_acquire(v, o, n) \
216 cmpxchg_acquire(&((v)->counter), (o), (n))
217
Ingo Molnarffbf6702006-01-09 15:59:17 -0800218#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
Boqun Feng26760fc2015-12-15 22:24:16 +0800219#define atomic_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
Nick Piggin4a6dae62005-11-13 16:07:24 -0800220
Nick Piggin8426e1f2005-11-13 16:07:25 -0800221/**
Arun Sharmaf24219b2011-07-26 16:09:07 -0700222 * __atomic_add_unless - add unless the number is a given value
Nick Piggin8426e1f2005-11-13 16:07:25 -0800223 * @v: pointer of type atomic_t
224 * @a: the amount to add to v...
225 * @u: ...unless v is equal to u.
226 *
227 * Atomically adds @a to @v, so long as it was not @u.
Arun Sharmaf24219b2011-07-26 16:09:07 -0700228 * Returns the old value of @v.
Nick Piggin8426e1f2005-11-13 16:07:25 -0800229 */
Arun Sharmaf24219b2011-07-26 16:09:07 -0700230static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
Nick Pigginf055aff2006-02-20 10:41:40 +0100231{
232 int t;
233
234 __asm__ __volatile__ (
Benjamin Herrenschmidtb97021f2011-11-15 17:11:27 +0000235 PPC_ATOMIC_ENTRY_BARRIER
Arun Sharmaf24219b2011-07-26 16:09:07 -0700236"1: lwarx %0,0,%1 # __atomic_add_unless\n\
Nick Pigginf055aff2006-02-20 10:41:40 +0100237 cmpw 0,%0,%3 \n\
Anton Blanchard61e98eb2016-10-03 17:03:03 +1100238 beq 2f \n\
Nick Pigginf055aff2006-02-20 10:41:40 +0100239 add %0,%2,%0 \n"
240 PPC405_ERR77(0,%2)
241" stwcx. %0,0,%1 \n\
242 bne- 1b \n"
Benjamin Herrenschmidtb97021f2011-11-15 17:11:27 +0000243 PPC_ATOMIC_EXIT_BARRIER
Nick Pigginf055aff2006-02-20 10:41:40 +0100244" subf %0,%2,%0 \n\
2452:"
246 : "=&r" (t)
247 : "r" (&v->counter), "r" (a), "r" (u)
248 : "cc", "memory");
249
Arun Sharmaf24219b2011-07-26 16:09:07 -0700250 return t;
Nick Pigginf055aff2006-02-20 10:41:40 +0100251}
252
Anton Blancharda6cf7ed2012-02-29 21:12:16 +0000253/**
254 * atomic_inc_not_zero - increment unless the number is zero
255 * @v: pointer of type atomic_t
256 *
257 * Atomically increments @v by 1, so long as @v is non-zero.
258 * Returns non-zero if @v was non-zero, and zero otherwise.
259 */
260static __inline__ int atomic_inc_not_zero(atomic_t *v)
261{
262 int t1, t2;
263
264 __asm__ __volatile__ (
265 PPC_ATOMIC_ENTRY_BARRIER
266"1: lwarx %0,0,%2 # atomic_inc_not_zero\n\
267 cmpwi 0,%0,0\n\
268 beq- 2f\n\
269 addic %1,%0,1\n"
270 PPC405_ERR77(0,%2)
271" stwcx. %1,0,%2\n\
272 bne- 1b\n"
273 PPC_ATOMIC_EXIT_BARRIER
274 "\n\
2752:"
276 : "=&r" (t1), "=&r" (t2)
277 : "r" (&v->counter)
278 : "cc", "xer", "memory");
279
280 return t1;
281}
282#define atomic_inc_not_zero(v) atomic_inc_not_zero((v))
Nick Piggin8426e1f2005-11-13 16:07:25 -0800283
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284#define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0)
285#define atomic_dec_and_test(v) (atomic_dec_return((v)) == 0)
286
287/*
288 * Atomically test *v and decrement if it is greater than 0.
Robert Jennings434f98c2007-01-17 10:50:20 -0600289 * The function returns the old value of *v minus 1, even if
290 * the atomic variable, v, was not decremented.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291 */
292static __inline__ int atomic_dec_if_positive(atomic_t *v)
293{
294 int t;
295
296 __asm__ __volatile__(
Benjamin Herrenschmidtb97021f2011-11-15 17:11:27 +0000297 PPC_ATOMIC_ENTRY_BARRIER
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298"1: lwarx %0,0,%1 # atomic_dec_if_positive\n\
Robert Jennings434f98c2007-01-17 10:50:20 -0600299 cmpwi %0,1\n\
300 addi %0,%0,-1\n\
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301 blt- 2f\n"
302 PPC405_ERR77(0,%1)
303" stwcx. %0,0,%1\n\
304 bne- 1b"
Benjamin Herrenschmidtb97021f2011-11-15 17:11:27 +0000305 PPC_ATOMIC_EXIT_BARRIER
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306 "\n\
Robert Jennings434f98c2007-01-17 10:50:20 -06003072:" : "=&b" (t)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308 : "r" (&v->counter)
309 : "cc", "memory");
310
311 return t;
312}
Shaohua Lie79bee22012-10-08 16:32:18 -0700313#define atomic_dec_if_positive atomic_dec_if_positive
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100315#ifdef __powerpc64__
316
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100317#define ATOMIC64_INIT(i) { (i) }
318
Segher Boessenkool9f0cbea2007-08-11 10:15:30 +1000319static __inline__ long atomic64_read(const atomic64_t *v)
320{
321 long t;
322
323 __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
324
325 return t;
326}
327
328static __inline__ void atomic64_set(atomic64_t *v, long i)
329{
330 __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
331}
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100332
Peter Zijlstraaf095dd2014-03-26 18:11:31 +0100333#define ATOMIC64_OP(op, asm_op) \
334static __inline__ void atomic64_##op(long a, atomic64_t *v) \
335{ \
336 long t; \
337 \
338 __asm__ __volatile__( \
339"1: ldarx %0,0,%3 # atomic64_" #op "\n" \
340 #asm_op " %0,%2,%0\n" \
341" stdcx. %0,0,%3 \n" \
342" bne- 1b\n" \
343 : "=&r" (t), "+m" (v->counter) \
344 : "r" (a), "r" (&v->counter) \
345 : "cc"); \
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100346}
347
Boqun Fengdc536172016-01-06 10:08:25 +0800348#define ATOMIC64_OP_RETURN_RELAXED(op, asm_op) \
349static inline long \
350atomic64_##op##_return_relaxed(long a, atomic64_t *v) \
Peter Zijlstraaf095dd2014-03-26 18:11:31 +0100351{ \
352 long t; \
353 \
354 __asm__ __volatile__( \
Boqun Fengdc536172016-01-06 10:08:25 +0800355"1: ldarx %0,0,%3 # atomic64_" #op "_return_relaxed\n" \
356 #asm_op " %0,%2,%0\n" \
357" stdcx. %0,0,%3\n" \
Peter Zijlstraaf095dd2014-03-26 18:11:31 +0100358" bne- 1b\n" \
Boqun Fengdc536172016-01-06 10:08:25 +0800359 : "=&r" (t), "+m" (v->counter) \
Peter Zijlstraaf095dd2014-03-26 18:11:31 +0100360 : "r" (a), "r" (&v->counter) \
Boqun Fengdc536172016-01-06 10:08:25 +0800361 : "cc"); \
Peter Zijlstraaf095dd2014-03-26 18:11:31 +0100362 \
363 return t; \
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100364}
365
Peter Zijlstraa28cc7b2016-04-18 01:16:05 +0200366#define ATOMIC64_FETCH_OP_RELAXED(op, asm_op) \
367static inline long \
368atomic64_fetch_##op##_relaxed(long a, atomic64_t *v) \
369{ \
370 long res, t; \
371 \
372 __asm__ __volatile__( \
373"1: ldarx %0,0,%4 # atomic64_fetch_" #op "_relaxed\n" \
374 #asm_op " %1,%3,%0\n" \
375" stdcx. %1,0,%4\n" \
376" bne- 1b\n" \
377 : "=&r" (res), "=&r" (t), "+m" (v->counter) \
378 : "r" (a), "r" (&v->counter) \
379 : "cc"); \
380 \
381 return res; \
382}
383
Boqun Fengdc536172016-01-06 10:08:25 +0800384#define ATOMIC64_OPS(op, asm_op) \
385 ATOMIC64_OP(op, asm_op) \
Peter Zijlstraa28cc7b2016-04-18 01:16:05 +0200386 ATOMIC64_OP_RETURN_RELAXED(op, asm_op) \
387 ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
Peter Zijlstraaf095dd2014-03-26 18:11:31 +0100388
389ATOMIC64_OPS(add, add)
390ATOMIC64_OPS(sub, subf)
391
Boqun Fengdc536172016-01-06 10:08:25 +0800392#define atomic64_add_return_relaxed atomic64_add_return_relaxed
393#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
394
Peter Zijlstraa28cc7b2016-04-18 01:16:05 +0200395#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
396#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
397
398#undef ATOMIC64_OPS
399#define ATOMIC64_OPS(op, asm_op) \
400 ATOMIC64_OP(op, asm_op) \
401 ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
402
403ATOMIC64_OPS(and, and)
404ATOMIC64_OPS(or, or)
405ATOMIC64_OPS(xor, xor)
406
407#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
408#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
409#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
410
Boqun Fengdc536172016-01-06 10:08:25 +0800411#undef ATOPIC64_OPS
Peter Zijlstraa28cc7b2016-04-18 01:16:05 +0200412#undef ATOMIC64_FETCH_OP_RELAXED
Boqun Fengdc536172016-01-06 10:08:25 +0800413#undef ATOMIC64_OP_RETURN_RELAXED
Peter Zijlstraaf095dd2014-03-26 18:11:31 +0100414#undef ATOMIC64_OP
415
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100416#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
417
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100418static __inline__ void atomic64_inc(atomic64_t *v)
419{
420 long t;
421
422 __asm__ __volatile__(
423"1: ldarx %0,0,%2 # atomic64_inc\n\
424 addic %0,%0,1\n\
425 stdcx. %0,0,%2 \n\
426 bne- 1b"
Linus Torvaldse2a3d402006-07-08 15:00:28 -0700427 : "=&r" (t), "+m" (v->counter)
428 : "r" (&v->counter)
Paul Mackerrasefc36242008-11-05 18:39:27 +0000429 : "cc", "xer");
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100430}
431
Boqun Fengdc536172016-01-06 10:08:25 +0800432static __inline__ long atomic64_inc_return_relaxed(atomic64_t *v)
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100433{
434 long t;
435
436 __asm__ __volatile__(
Boqun Fengdc536172016-01-06 10:08:25 +0800437"1: ldarx %0,0,%2 # atomic64_inc_return_relaxed\n"
438" addic %0,%0,1\n"
439" stdcx. %0,0,%2\n"
440" bne- 1b"
441 : "=&r" (t), "+m" (v->counter)
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100442 : "r" (&v->counter)
Boqun Fengdc536172016-01-06 10:08:25 +0800443 : "cc", "xer");
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100444
445 return t;
446}
447
448/*
449 * atomic64_inc_and_test - increment and test
450 * @v: pointer of type atomic64_t
451 *
452 * Atomically increments @v by 1
453 * and returns true if the result is zero, or false for all
454 * other cases.
455 */
456#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
457
458static __inline__ void atomic64_dec(atomic64_t *v)
459{
460 long t;
461
462 __asm__ __volatile__(
463"1: ldarx %0,0,%2 # atomic64_dec\n\
464 addic %0,%0,-1\n\
465 stdcx. %0,0,%2\n\
466 bne- 1b"
Linus Torvaldse2a3d402006-07-08 15:00:28 -0700467 : "=&r" (t), "+m" (v->counter)
468 : "r" (&v->counter)
Paul Mackerrasefc36242008-11-05 18:39:27 +0000469 : "cc", "xer");
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100470}
471
Boqun Fengdc536172016-01-06 10:08:25 +0800472static __inline__ long atomic64_dec_return_relaxed(atomic64_t *v)
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100473{
474 long t;
475
476 __asm__ __volatile__(
Boqun Fengdc536172016-01-06 10:08:25 +0800477"1: ldarx %0,0,%2 # atomic64_dec_return_relaxed\n"
478" addic %0,%0,-1\n"
479" stdcx. %0,0,%2\n"
480" bne- 1b"
481 : "=&r" (t), "+m" (v->counter)
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100482 : "r" (&v->counter)
Boqun Fengdc536172016-01-06 10:08:25 +0800483 : "cc", "xer");
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100484
485 return t;
486}
487
Boqun Fengdc536172016-01-06 10:08:25 +0800488#define atomic64_inc_return_relaxed atomic64_inc_return_relaxed
489#define atomic64_dec_return_relaxed atomic64_dec_return_relaxed
490
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100491#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
492#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
493
494/*
495 * Atomically test *v and decrement if it is greater than 0.
496 * The function returns the old value of *v minus 1.
497 */
498static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
499{
500 long t;
501
502 __asm__ __volatile__(
Benjamin Herrenschmidtb97021f2011-11-15 17:11:27 +0000503 PPC_ATOMIC_ENTRY_BARRIER
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100504"1: ldarx %0,0,%1 # atomic64_dec_if_positive\n\
505 addic. %0,%0,-1\n\
506 blt- 2f\n\
507 stdcx. %0,0,%1\n\
508 bne- 1b"
Benjamin Herrenschmidtb97021f2011-11-15 17:11:27 +0000509 PPC_ATOMIC_EXIT_BARRIER
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100510 "\n\
5112:" : "=&r" (t)
512 : "r" (&v->counter)
Paul Mackerrasefc36242008-11-05 18:39:27 +0000513 : "cc", "xer", "memory");
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100514
515 return t;
516}
517
Mathieu Desnoyersf46e4772007-05-08 00:34:27 -0700518#define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
Boqun Feng56c08e62015-12-15 22:24:17 +0800519#define atomic64_cmpxchg_relaxed(v, o, n) \
520 cmpxchg_relaxed(&((v)->counter), (o), (n))
521#define atomic64_cmpxchg_acquire(v, o, n) \
522 cmpxchg_acquire(&((v)->counter), (o), (n))
523
Mathieu Desnoyers41806ef2007-01-25 11:15:52 -0500524#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
Boqun Feng26760fc2015-12-15 22:24:16 +0800525#define atomic64_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
Mathieu Desnoyers41806ef2007-01-25 11:15:52 -0500526
527/**
528 * atomic64_add_unless - add unless the number is a given value
529 * @v: pointer of type atomic64_t
530 * @a: the amount to add to v...
531 * @u: ...unless v is equal to u.
532 *
533 * Atomically adds @a to @v, so long as it was not @u.
Arun Sharmaf24219b2011-07-26 16:09:07 -0700534 * Returns the old value of @v.
Mathieu Desnoyers41806ef2007-01-25 11:15:52 -0500535 */
536static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
537{
538 long t;
539
540 __asm__ __volatile__ (
Benjamin Herrenschmidtb97021f2011-11-15 17:11:27 +0000541 PPC_ATOMIC_ENTRY_BARRIER
Arun Sharmaf24219b2011-07-26 16:09:07 -0700542"1: ldarx %0,0,%1 # __atomic_add_unless\n\
Mathieu Desnoyers41806ef2007-01-25 11:15:52 -0500543 cmpd 0,%0,%3 \n\
Anton Blanchard61e98eb2016-10-03 17:03:03 +1100544 beq 2f \n\
Mathieu Desnoyers41806ef2007-01-25 11:15:52 -0500545 add %0,%2,%0 \n"
546" stdcx. %0,0,%1 \n\
547 bne- 1b \n"
Benjamin Herrenschmidtb97021f2011-11-15 17:11:27 +0000548 PPC_ATOMIC_EXIT_BARRIER
Mathieu Desnoyers41806ef2007-01-25 11:15:52 -0500549" subf %0,%2,%0 \n\
5502:"
551 : "=&r" (t)
552 : "r" (&v->counter), "r" (a), "r" (u)
553 : "cc", "memory");
554
555 return t != u;
556}
557
Anton Blancharda6cf7ed2012-02-29 21:12:16 +0000558/**
559 * atomic_inc64_not_zero - increment unless the number is zero
560 * @v: pointer of type atomic64_t
561 *
562 * Atomically increments @v by 1, so long as @v is non-zero.
563 * Returns non-zero if @v was non-zero, and zero otherwise.
564 */
Michael Ellerman01e6a612017-07-11 22:10:54 +1000565static __inline__ int atomic64_inc_not_zero(atomic64_t *v)
Anton Blancharda6cf7ed2012-02-29 21:12:16 +0000566{
567 long t1, t2;
568
569 __asm__ __volatile__ (
570 PPC_ATOMIC_ENTRY_BARRIER
571"1: ldarx %0,0,%2 # atomic64_inc_not_zero\n\
572 cmpdi 0,%0,0\n\
573 beq- 2f\n\
574 addic %1,%0,1\n\
575 stdcx. %1,0,%2\n\
576 bne- 1b\n"
577 PPC_ATOMIC_EXIT_BARRIER
578 "\n\
5792:"
580 : "=&r" (t1), "=&r" (t2)
581 : "r" (&v->counter)
582 : "cc", "xer", "memory");
583
Michael Ellerman01e6a612017-07-11 22:10:54 +1000584 return t1 != 0;
Anton Blancharda6cf7ed2012-02-29 21:12:16 +0000585}
Mathieu Desnoyers41806ef2007-01-25 11:15:52 -0500586
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100587#endif /* __powerpc64__ */
588
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589#endif /* __KERNEL__ */
Becky Brucefeaf7cf2005-09-22 14:20:04 -0500590#endif /* _ASM_POWERPC_ATOMIC_H_ */