blob: 682b3e6a1e212d7ab0c38246ec7168ca7fbdf805 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Becky Brucefeaf7cf2005-09-22 14:20:04 -05002#ifndef _ASM_POWERPC_ATOMIC_H_
3#define _ASM_POWERPC_ATOMIC_H_
4
Linus Torvalds1da177e2005-04-16 15:20:36 -07005/*
6 * PowerPC atomic operations
7 */
8
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#ifdef __KERNEL__
David Howellsae3a1972012-03-28 18:30:02 +010010#include <linux/types.h>
11#include <asm/cmpxchg.h>
Peter Zijlstrac6450732014-03-13 19:00:35 +010012#include <asm/barrier.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013
Becky Brucefeaf7cf2005-09-22 14:20:04 -050014#define ATOMIC_INIT(i) { (i) }
Linus Torvalds1da177e2005-04-16 15:20:36 -070015
Boqun Fengdc536172016-01-06 10:08:25 +080016/*
17 * Since *_return_relaxed and {cmp}xchg_relaxed are implemented with
18 * a "bne-" instruction at the end, so an isync is enough as a acquire barrier
19 * on the platform without lwsync.
20 */
21#define __atomic_op_acquire(op, args...) \
22({ \
23 typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \
24 __asm__ __volatile__(PPC_ACQUIRE_BARRIER "" : : : "memory"); \
25 __ret; \
26})
27
28#define __atomic_op_release(op, args...) \
29({ \
30 __asm__ __volatile__(PPC_RELEASE_BARRIER "" : : : "memory"); \
31 op##_relaxed(args); \
32})
33
Segher Boessenkool9f0cbea2007-08-11 10:15:30 +100034static __inline__ int atomic_read(const atomic_t *v)
35{
36 int t;
37
38 __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
39
40 return t;
41}
42
43static __inline__ void atomic_set(atomic_t *v, int i)
44{
45 __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
46}
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
Peter Zijlstraaf095dd2014-03-26 18:11:31 +010048#define ATOMIC_OP(op, asm_op) \
49static __inline__ void atomic_##op(int a, atomic_t *v) \
50{ \
51 int t; \
52 \
53 __asm__ __volatile__( \
54"1: lwarx %0,0,%3 # atomic_" #op "\n" \
55 #asm_op " %0,%2,%0\n" \
56 PPC405_ERR77(0,%3) \
57" stwcx. %0,0,%3 \n" \
58" bne- 1b\n" \
59 : "=&r" (t), "+m" (v->counter) \
60 : "r" (a), "r" (&v->counter) \
61 : "cc"); \
62} \
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
Boqun Fengdc536172016-01-06 10:08:25 +080064#define ATOMIC_OP_RETURN_RELAXED(op, asm_op) \
65static inline int atomic_##op##_return_relaxed(int a, atomic_t *v) \
Peter Zijlstraaf095dd2014-03-26 18:11:31 +010066{ \
67 int t; \
68 \
69 __asm__ __volatile__( \
Boqun Fengdc536172016-01-06 10:08:25 +080070"1: lwarx %0,0,%3 # atomic_" #op "_return_relaxed\n" \
71 #asm_op " %0,%2,%0\n" \
72 PPC405_ERR77(0, %3) \
73" stwcx. %0,0,%3\n" \
Peter Zijlstraaf095dd2014-03-26 18:11:31 +010074" bne- 1b\n" \
Boqun Fengdc536172016-01-06 10:08:25 +080075 : "=&r" (t), "+m" (v->counter) \
Peter Zijlstraaf095dd2014-03-26 18:11:31 +010076 : "r" (a), "r" (&v->counter) \
Boqun Fengdc536172016-01-06 10:08:25 +080077 : "cc"); \
Peter Zijlstraaf095dd2014-03-26 18:11:31 +010078 \
79 return t; \
Linus Torvalds1da177e2005-04-16 15:20:36 -070080}
81
Peter Zijlstraa28cc7b2016-04-18 01:16:05 +020082#define ATOMIC_FETCH_OP_RELAXED(op, asm_op) \
83static inline int atomic_fetch_##op##_relaxed(int a, atomic_t *v) \
84{ \
85 int res, t; \
86 \
87 __asm__ __volatile__( \
88"1: lwarx %0,0,%4 # atomic_fetch_" #op "_relaxed\n" \
89 #asm_op " %1,%3,%0\n" \
90 PPC405_ERR77(0, %4) \
91" stwcx. %1,0,%4\n" \
92" bne- 1b\n" \
93 : "=&r" (res), "=&r" (t), "+m" (v->counter) \
94 : "r" (a), "r" (&v->counter) \
95 : "cc"); \
96 \
97 return res; \
98}
99
Boqun Fengdc536172016-01-06 10:08:25 +0800100#define ATOMIC_OPS(op, asm_op) \
101 ATOMIC_OP(op, asm_op) \
Peter Zijlstraa28cc7b2016-04-18 01:16:05 +0200102 ATOMIC_OP_RETURN_RELAXED(op, asm_op) \
103 ATOMIC_FETCH_OP_RELAXED(op, asm_op)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104
Peter Zijlstraaf095dd2014-03-26 18:11:31 +0100105ATOMIC_OPS(add, add)
106ATOMIC_OPS(sub, subf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107
Boqun Fengdc536172016-01-06 10:08:25 +0800108#define atomic_add_return_relaxed atomic_add_return_relaxed
109#define atomic_sub_return_relaxed atomic_sub_return_relaxed
110
Peter Zijlstraa28cc7b2016-04-18 01:16:05 +0200111#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
112#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
113
Peter Zijlstraaf095dd2014-03-26 18:11:31 +0100114#undef ATOMIC_OPS
Peter Zijlstraa28cc7b2016-04-18 01:16:05 +0200115#define ATOMIC_OPS(op, asm_op) \
116 ATOMIC_OP(op, asm_op) \
117 ATOMIC_FETCH_OP_RELAXED(op, asm_op)
118
119ATOMIC_OPS(and, and)
120ATOMIC_OPS(or, or)
121ATOMIC_OPS(xor, xor)
122
123#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
124#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
125#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
126
127#undef ATOMIC_OPS
128#undef ATOMIC_FETCH_OP_RELAXED
Boqun Fengdc536172016-01-06 10:08:25 +0800129#undef ATOMIC_OP_RETURN_RELAXED
Peter Zijlstraaf095dd2014-03-26 18:11:31 +0100130#undef ATOMIC_OP
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131
132#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
133
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134static __inline__ void atomic_inc(atomic_t *v)
135{
136 int t;
137
138 __asm__ __volatile__(
139"1: lwarx %0,0,%2 # atomic_inc\n\
140 addic %0,%0,1\n"
141 PPC405_ERR77(0,%2)
142" stwcx. %0,0,%2 \n\
143 bne- 1b"
Linus Torvaldse2a3d402006-07-08 15:00:28 -0700144 : "=&r" (t), "+m" (v->counter)
145 : "r" (&v->counter)
Paul Mackerrasefc36242008-11-05 18:39:27 +0000146 : "cc", "xer");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147}
148
Boqun Fengdc536172016-01-06 10:08:25 +0800149static __inline__ int atomic_inc_return_relaxed(atomic_t *v)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150{
151 int t;
152
153 __asm__ __volatile__(
Boqun Fengdc536172016-01-06 10:08:25 +0800154"1: lwarx %0,0,%2 # atomic_inc_return_relaxed\n"
155" addic %0,%0,1\n"
156 PPC405_ERR77(0, %2)
157" stwcx. %0,0,%2\n"
158" bne- 1b"
159 : "=&r" (t), "+m" (v->counter)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160 : "r" (&v->counter)
Boqun Fengdc536172016-01-06 10:08:25 +0800161 : "cc", "xer");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162
163 return t;
164}
165
166/*
167 * atomic_inc_and_test - increment and test
168 * @v: pointer of type atomic_t
169 *
170 * Atomically increments @v by 1
171 * and returns true if the result is zero, or false for all
172 * other cases.
173 */
174#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
175
176static __inline__ void atomic_dec(atomic_t *v)
177{
178 int t;
179
180 __asm__ __volatile__(
181"1: lwarx %0,0,%2 # atomic_dec\n\
182 addic %0,%0,-1\n"
183 PPC405_ERR77(0,%2)\
184" stwcx. %0,0,%2\n\
185 bne- 1b"
Linus Torvaldse2a3d402006-07-08 15:00:28 -0700186 : "=&r" (t), "+m" (v->counter)
187 : "r" (&v->counter)
Paul Mackerrasefc36242008-11-05 18:39:27 +0000188 : "cc", "xer");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189}
190
Boqun Fengdc536172016-01-06 10:08:25 +0800191static __inline__ int atomic_dec_return_relaxed(atomic_t *v)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192{
193 int t;
194
195 __asm__ __volatile__(
Boqun Fengdc536172016-01-06 10:08:25 +0800196"1: lwarx %0,0,%2 # atomic_dec_return_relaxed\n"
197" addic %0,%0,-1\n"
198 PPC405_ERR77(0, %2)
199" stwcx. %0,0,%2\n"
200" bne- 1b"
201 : "=&r" (t), "+m" (v->counter)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 : "r" (&v->counter)
Boqun Fengdc536172016-01-06 10:08:25 +0800203 : "cc", "xer");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204
205 return t;
206}
207
Boqun Fengdc536172016-01-06 10:08:25 +0800208#define atomic_inc_return_relaxed atomic_inc_return_relaxed
209#define atomic_dec_return_relaxed atomic_dec_return_relaxed
210
Mathieu Desnoyersf46e4772007-05-08 00:34:27 -0700211#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
Boqun Feng56c08e62015-12-15 22:24:17 +0800212#define atomic_cmpxchg_relaxed(v, o, n) \
213 cmpxchg_relaxed(&((v)->counter), (o), (n))
214#define atomic_cmpxchg_acquire(v, o, n) \
215 cmpxchg_acquire(&((v)->counter), (o), (n))
216
Ingo Molnarffbf6702006-01-09 15:59:17 -0800217#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
Boqun Feng26760fc2015-12-15 22:24:16 +0800218#define atomic_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
Nick Piggin4a6dae62005-11-13 16:07:24 -0800219
Nick Piggin8426e1f2005-11-13 16:07:25 -0800220/**
Arun Sharmaf24219b2011-07-26 16:09:07 -0700221 * __atomic_add_unless - add unless the number is a given value
Nick Piggin8426e1f2005-11-13 16:07:25 -0800222 * @v: pointer of type atomic_t
223 * @a: the amount to add to v...
224 * @u: ...unless v is equal to u.
225 *
226 * Atomically adds @a to @v, so long as it was not @u.
Arun Sharmaf24219b2011-07-26 16:09:07 -0700227 * Returns the old value of @v.
Nick Piggin8426e1f2005-11-13 16:07:25 -0800228 */
Arun Sharmaf24219b2011-07-26 16:09:07 -0700229static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
Nick Pigginf055aff2006-02-20 10:41:40 +0100230{
231 int t;
232
233 __asm__ __volatile__ (
Benjamin Herrenschmidtb97021f2011-11-15 17:11:27 +0000234 PPC_ATOMIC_ENTRY_BARRIER
Arun Sharmaf24219b2011-07-26 16:09:07 -0700235"1: lwarx %0,0,%1 # __atomic_add_unless\n\
Nick Pigginf055aff2006-02-20 10:41:40 +0100236 cmpw 0,%0,%3 \n\
Anton Blanchard61e98eb2016-10-03 17:03:03 +1100237 beq 2f \n\
Nick Pigginf055aff2006-02-20 10:41:40 +0100238 add %0,%2,%0 \n"
239 PPC405_ERR77(0,%2)
240" stwcx. %0,0,%1 \n\
241 bne- 1b \n"
Benjamin Herrenschmidtb97021f2011-11-15 17:11:27 +0000242 PPC_ATOMIC_EXIT_BARRIER
Nick Pigginf055aff2006-02-20 10:41:40 +0100243" subf %0,%2,%0 \n\
2442:"
245 : "=&r" (t)
246 : "r" (&v->counter), "r" (a), "r" (u)
247 : "cc", "memory");
248
Arun Sharmaf24219b2011-07-26 16:09:07 -0700249 return t;
Nick Pigginf055aff2006-02-20 10:41:40 +0100250}
251
Anton Blancharda6cf7ed2012-02-29 21:12:16 +0000252/**
253 * atomic_inc_not_zero - increment unless the number is zero
254 * @v: pointer of type atomic_t
255 *
256 * Atomically increments @v by 1, so long as @v is non-zero.
257 * Returns non-zero if @v was non-zero, and zero otherwise.
258 */
259static __inline__ int atomic_inc_not_zero(atomic_t *v)
260{
261 int t1, t2;
262
263 __asm__ __volatile__ (
264 PPC_ATOMIC_ENTRY_BARRIER
265"1: lwarx %0,0,%2 # atomic_inc_not_zero\n\
266 cmpwi 0,%0,0\n\
267 beq- 2f\n\
268 addic %1,%0,1\n"
269 PPC405_ERR77(0,%2)
270" stwcx. %1,0,%2\n\
271 bne- 1b\n"
272 PPC_ATOMIC_EXIT_BARRIER
273 "\n\
2742:"
275 : "=&r" (t1), "=&r" (t2)
276 : "r" (&v->counter)
277 : "cc", "xer", "memory");
278
279 return t1;
280}
281#define atomic_inc_not_zero(v) atomic_inc_not_zero((v))
Nick Piggin8426e1f2005-11-13 16:07:25 -0800282
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283#define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0)
284#define atomic_dec_and_test(v) (atomic_dec_return((v)) == 0)
285
286/*
287 * Atomically test *v and decrement if it is greater than 0.
Robert Jennings434f98c2007-01-17 10:50:20 -0600288 * The function returns the old value of *v minus 1, even if
289 * the atomic variable, v, was not decremented.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290 */
291static __inline__ int atomic_dec_if_positive(atomic_t *v)
292{
293 int t;
294
295 __asm__ __volatile__(
Benjamin Herrenschmidtb97021f2011-11-15 17:11:27 +0000296 PPC_ATOMIC_ENTRY_BARRIER
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297"1: lwarx %0,0,%1 # atomic_dec_if_positive\n\
Robert Jennings434f98c2007-01-17 10:50:20 -0600298 cmpwi %0,1\n\
299 addi %0,%0,-1\n\
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300 blt- 2f\n"
301 PPC405_ERR77(0,%1)
302" stwcx. %0,0,%1\n\
303 bne- 1b"
Benjamin Herrenschmidtb97021f2011-11-15 17:11:27 +0000304 PPC_ATOMIC_EXIT_BARRIER
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305 "\n\
Robert Jennings434f98c2007-01-17 10:50:20 -06003062:" : "=&b" (t)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307 : "r" (&v->counter)
308 : "cc", "memory");
309
310 return t;
311}
Shaohua Lie79bee22012-10-08 16:32:18 -0700312#define atomic_dec_if_positive atomic_dec_if_positive
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100314#ifdef __powerpc64__
315
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100316#define ATOMIC64_INIT(i) { (i) }
317
Segher Boessenkool9f0cbea2007-08-11 10:15:30 +1000318static __inline__ long atomic64_read(const atomic64_t *v)
319{
320 long t;
321
322 __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
323
324 return t;
325}
326
327static __inline__ void atomic64_set(atomic64_t *v, long i)
328{
329 __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
330}
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100331
Peter Zijlstraaf095dd2014-03-26 18:11:31 +0100332#define ATOMIC64_OP(op, asm_op) \
333static __inline__ void atomic64_##op(long a, atomic64_t *v) \
334{ \
335 long t; \
336 \
337 __asm__ __volatile__( \
338"1: ldarx %0,0,%3 # atomic64_" #op "\n" \
339 #asm_op " %0,%2,%0\n" \
340" stdcx. %0,0,%3 \n" \
341" bne- 1b\n" \
342 : "=&r" (t), "+m" (v->counter) \
343 : "r" (a), "r" (&v->counter) \
344 : "cc"); \
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100345}
346
Boqun Fengdc536172016-01-06 10:08:25 +0800347#define ATOMIC64_OP_RETURN_RELAXED(op, asm_op) \
348static inline long \
349atomic64_##op##_return_relaxed(long a, atomic64_t *v) \
Peter Zijlstraaf095dd2014-03-26 18:11:31 +0100350{ \
351 long t; \
352 \
353 __asm__ __volatile__( \
Boqun Fengdc536172016-01-06 10:08:25 +0800354"1: ldarx %0,0,%3 # atomic64_" #op "_return_relaxed\n" \
355 #asm_op " %0,%2,%0\n" \
356" stdcx. %0,0,%3\n" \
Peter Zijlstraaf095dd2014-03-26 18:11:31 +0100357" bne- 1b\n" \
Boqun Fengdc536172016-01-06 10:08:25 +0800358 : "=&r" (t), "+m" (v->counter) \
Peter Zijlstraaf095dd2014-03-26 18:11:31 +0100359 : "r" (a), "r" (&v->counter) \
Boqun Fengdc536172016-01-06 10:08:25 +0800360 : "cc"); \
Peter Zijlstraaf095dd2014-03-26 18:11:31 +0100361 \
362 return t; \
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100363}
364
Peter Zijlstraa28cc7b2016-04-18 01:16:05 +0200365#define ATOMIC64_FETCH_OP_RELAXED(op, asm_op) \
366static inline long \
367atomic64_fetch_##op##_relaxed(long a, atomic64_t *v) \
368{ \
369 long res, t; \
370 \
371 __asm__ __volatile__( \
372"1: ldarx %0,0,%4 # atomic64_fetch_" #op "_relaxed\n" \
373 #asm_op " %1,%3,%0\n" \
374" stdcx. %1,0,%4\n" \
375" bne- 1b\n" \
376 : "=&r" (res), "=&r" (t), "+m" (v->counter) \
377 : "r" (a), "r" (&v->counter) \
378 : "cc"); \
379 \
380 return res; \
381}
382
Boqun Fengdc536172016-01-06 10:08:25 +0800383#define ATOMIC64_OPS(op, asm_op) \
384 ATOMIC64_OP(op, asm_op) \
Peter Zijlstraa28cc7b2016-04-18 01:16:05 +0200385 ATOMIC64_OP_RETURN_RELAXED(op, asm_op) \
386 ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
Peter Zijlstraaf095dd2014-03-26 18:11:31 +0100387
388ATOMIC64_OPS(add, add)
389ATOMIC64_OPS(sub, subf)
390
Boqun Fengdc536172016-01-06 10:08:25 +0800391#define atomic64_add_return_relaxed atomic64_add_return_relaxed
392#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
393
Peter Zijlstraa28cc7b2016-04-18 01:16:05 +0200394#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
395#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
396
397#undef ATOMIC64_OPS
398#define ATOMIC64_OPS(op, asm_op) \
399 ATOMIC64_OP(op, asm_op) \
400 ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
401
402ATOMIC64_OPS(and, and)
403ATOMIC64_OPS(or, or)
404ATOMIC64_OPS(xor, xor)
405
406#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
407#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
408#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
409
Boqun Fengdc536172016-01-06 10:08:25 +0800410#undef ATOPIC64_OPS
Peter Zijlstraa28cc7b2016-04-18 01:16:05 +0200411#undef ATOMIC64_FETCH_OP_RELAXED
Boqun Fengdc536172016-01-06 10:08:25 +0800412#undef ATOMIC64_OP_RETURN_RELAXED
Peter Zijlstraaf095dd2014-03-26 18:11:31 +0100413#undef ATOMIC64_OP
414
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100415#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
416
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100417static __inline__ void atomic64_inc(atomic64_t *v)
418{
419 long t;
420
421 __asm__ __volatile__(
422"1: ldarx %0,0,%2 # atomic64_inc\n\
423 addic %0,%0,1\n\
424 stdcx. %0,0,%2 \n\
425 bne- 1b"
Linus Torvaldse2a3d402006-07-08 15:00:28 -0700426 : "=&r" (t), "+m" (v->counter)
427 : "r" (&v->counter)
Paul Mackerrasefc36242008-11-05 18:39:27 +0000428 : "cc", "xer");
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100429}
430
Boqun Fengdc536172016-01-06 10:08:25 +0800431static __inline__ long atomic64_inc_return_relaxed(atomic64_t *v)
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100432{
433 long t;
434
435 __asm__ __volatile__(
Boqun Fengdc536172016-01-06 10:08:25 +0800436"1: ldarx %0,0,%2 # atomic64_inc_return_relaxed\n"
437" addic %0,%0,1\n"
438" stdcx. %0,0,%2\n"
439" bne- 1b"
440 : "=&r" (t), "+m" (v->counter)
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100441 : "r" (&v->counter)
Boqun Fengdc536172016-01-06 10:08:25 +0800442 : "cc", "xer");
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100443
444 return t;
445}
446
447/*
448 * atomic64_inc_and_test - increment and test
449 * @v: pointer of type atomic64_t
450 *
451 * Atomically increments @v by 1
452 * and returns true if the result is zero, or false for all
453 * other cases.
454 */
455#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
456
457static __inline__ void atomic64_dec(atomic64_t *v)
458{
459 long t;
460
461 __asm__ __volatile__(
462"1: ldarx %0,0,%2 # atomic64_dec\n\
463 addic %0,%0,-1\n\
464 stdcx. %0,0,%2\n\
465 bne- 1b"
Linus Torvaldse2a3d402006-07-08 15:00:28 -0700466 : "=&r" (t), "+m" (v->counter)
467 : "r" (&v->counter)
Paul Mackerrasefc36242008-11-05 18:39:27 +0000468 : "cc", "xer");
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100469}
470
Boqun Fengdc536172016-01-06 10:08:25 +0800471static __inline__ long atomic64_dec_return_relaxed(atomic64_t *v)
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100472{
473 long t;
474
475 __asm__ __volatile__(
Boqun Fengdc536172016-01-06 10:08:25 +0800476"1: ldarx %0,0,%2 # atomic64_dec_return_relaxed\n"
477" addic %0,%0,-1\n"
478" stdcx. %0,0,%2\n"
479" bne- 1b"
480 : "=&r" (t), "+m" (v->counter)
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100481 : "r" (&v->counter)
Boqun Fengdc536172016-01-06 10:08:25 +0800482 : "cc", "xer");
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100483
484 return t;
485}
486
Boqun Fengdc536172016-01-06 10:08:25 +0800487#define atomic64_inc_return_relaxed atomic64_inc_return_relaxed
488#define atomic64_dec_return_relaxed atomic64_dec_return_relaxed
489
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100490#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
491#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
492
493/*
494 * Atomically test *v and decrement if it is greater than 0.
495 * The function returns the old value of *v minus 1.
496 */
497static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
498{
499 long t;
500
501 __asm__ __volatile__(
Benjamin Herrenschmidtb97021f2011-11-15 17:11:27 +0000502 PPC_ATOMIC_ENTRY_BARRIER
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100503"1: ldarx %0,0,%1 # atomic64_dec_if_positive\n\
504 addic. %0,%0,-1\n\
505 blt- 2f\n\
506 stdcx. %0,0,%1\n\
507 bne- 1b"
Benjamin Herrenschmidtb97021f2011-11-15 17:11:27 +0000508 PPC_ATOMIC_EXIT_BARRIER
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100509 "\n\
5102:" : "=&r" (t)
511 : "r" (&v->counter)
Paul Mackerrasefc36242008-11-05 18:39:27 +0000512 : "cc", "xer", "memory");
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100513
514 return t;
515}
516
Mathieu Desnoyersf46e4772007-05-08 00:34:27 -0700517#define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
Boqun Feng56c08e62015-12-15 22:24:17 +0800518#define atomic64_cmpxchg_relaxed(v, o, n) \
519 cmpxchg_relaxed(&((v)->counter), (o), (n))
520#define atomic64_cmpxchg_acquire(v, o, n) \
521 cmpxchg_acquire(&((v)->counter), (o), (n))
522
Mathieu Desnoyers41806ef2007-01-25 11:15:52 -0500523#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
Boqun Feng26760fc2015-12-15 22:24:16 +0800524#define atomic64_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
Mathieu Desnoyers41806ef2007-01-25 11:15:52 -0500525
526/**
527 * atomic64_add_unless - add unless the number is a given value
528 * @v: pointer of type atomic64_t
529 * @a: the amount to add to v...
530 * @u: ...unless v is equal to u.
531 *
532 * Atomically adds @a to @v, so long as it was not @u.
Arun Sharmaf24219b2011-07-26 16:09:07 -0700533 * Returns the old value of @v.
Mathieu Desnoyers41806ef2007-01-25 11:15:52 -0500534 */
535static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
536{
537 long t;
538
539 __asm__ __volatile__ (
Benjamin Herrenschmidtb97021f2011-11-15 17:11:27 +0000540 PPC_ATOMIC_ENTRY_BARRIER
Arun Sharmaf24219b2011-07-26 16:09:07 -0700541"1: ldarx %0,0,%1 # __atomic_add_unless\n\
Mathieu Desnoyers41806ef2007-01-25 11:15:52 -0500542 cmpd 0,%0,%3 \n\
Anton Blanchard61e98eb2016-10-03 17:03:03 +1100543 beq 2f \n\
Mathieu Desnoyers41806ef2007-01-25 11:15:52 -0500544 add %0,%2,%0 \n"
545" stdcx. %0,0,%1 \n\
546 bne- 1b \n"
Benjamin Herrenschmidtb97021f2011-11-15 17:11:27 +0000547 PPC_ATOMIC_EXIT_BARRIER
Mathieu Desnoyers41806ef2007-01-25 11:15:52 -0500548" subf %0,%2,%0 \n\
5492:"
550 : "=&r" (t)
551 : "r" (&v->counter), "r" (a), "r" (u)
552 : "cc", "memory");
553
554 return t != u;
555}
556
Anton Blancharda6cf7ed2012-02-29 21:12:16 +0000557/**
558 * atomic_inc64_not_zero - increment unless the number is zero
559 * @v: pointer of type atomic64_t
560 *
561 * Atomically increments @v by 1, so long as @v is non-zero.
562 * Returns non-zero if @v was non-zero, and zero otherwise.
563 */
Michael Ellerman01e6a612017-07-11 22:10:54 +1000564static __inline__ int atomic64_inc_not_zero(atomic64_t *v)
Anton Blancharda6cf7ed2012-02-29 21:12:16 +0000565{
566 long t1, t2;
567
568 __asm__ __volatile__ (
569 PPC_ATOMIC_ENTRY_BARRIER
570"1: ldarx %0,0,%2 # atomic64_inc_not_zero\n\
571 cmpdi 0,%0,0\n\
572 beq- 2f\n\
573 addic %1,%0,1\n\
574 stdcx. %1,0,%2\n\
575 bne- 1b\n"
576 PPC_ATOMIC_EXIT_BARRIER
577 "\n\
5782:"
579 : "=&r" (t1), "=&r" (t2)
580 : "r" (&v->counter)
581 : "cc", "xer", "memory");
582
Michael Ellerman01e6a612017-07-11 22:10:54 +1000583 return t1 != 0;
Anton Blancharda6cf7ed2012-02-29 21:12:16 +0000584}
Mathieu Desnoyers41806ef2007-01-25 11:15:52 -0500585
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100586#endif /* __powerpc64__ */
587
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588#endif /* __KERNEL__ */
Becky Brucefeaf7cf2005-09-22 14:20:04 -0500589#endif /* _ASM_POWERPC_ATOMIC_H_ */