blob: a0156cb43d1f991225ac388b5bf08ca97f905b75 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Becky Brucefeaf7cf2005-09-22 14:20:04 -05002#ifndef _ASM_POWERPC_ATOMIC_H_
3#define _ASM_POWERPC_ATOMIC_H_
4
Linus Torvalds1da177e2005-04-16 15:20:36 -07005/*
6 * PowerPC atomic operations
7 */
8
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#ifdef __KERNEL__
David Howellsae3a1972012-03-28 18:30:02 +010010#include <linux/types.h>
11#include <asm/cmpxchg.h>
Peter Zijlstrac6450732014-03-13 19:00:35 +010012#include <asm/barrier.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013
Becky Brucefeaf7cf2005-09-22 14:20:04 -050014#define ATOMIC_INIT(i) { (i) }
Linus Torvalds1da177e2005-04-16 15:20:36 -070015
Boqun Fengdc536172016-01-06 10:08:25 +080016/*
17 * Since *_return_relaxed and {cmp}xchg_relaxed are implemented with
18 * a "bne-" instruction at the end, so an isync is enough as a acquire barrier
19 * on the platform without lwsync.
20 */
21#define __atomic_op_acquire(op, args...) \
22({ \
23 typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \
24 __asm__ __volatile__(PPC_ACQUIRE_BARRIER "" : : : "memory"); \
25 __ret; \
26})
27
28#define __atomic_op_release(op, args...) \
29({ \
30 __asm__ __volatile__(PPC_RELEASE_BARRIER "" : : : "memory"); \
31 op##_relaxed(args); \
32})
33
Segher Boessenkool9f0cbea2007-08-11 10:15:30 +100034static __inline__ int atomic_read(const atomic_t *v)
35{
36 int t;
37
38 __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
39
40 return t;
41}
42
43static __inline__ void atomic_set(atomic_t *v, int i)
44{
45 __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
46}
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
Peter Zijlstraaf095dd2014-03-26 18:11:31 +010048#define ATOMIC_OP(op, asm_op) \
49static __inline__ void atomic_##op(int a, atomic_t *v) \
50{ \
51 int t; \
52 \
53 __asm__ __volatile__( \
54"1: lwarx %0,0,%3 # atomic_" #op "\n" \
55 #asm_op " %0,%2,%0\n" \
56 PPC405_ERR77(0,%3) \
57" stwcx. %0,0,%3 \n" \
58" bne- 1b\n" \
59 : "=&r" (t), "+m" (v->counter) \
60 : "r" (a), "r" (&v->counter) \
61 : "cc"); \
62} \
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
Boqun Fengdc536172016-01-06 10:08:25 +080064#define ATOMIC_OP_RETURN_RELAXED(op, asm_op) \
65static inline int atomic_##op##_return_relaxed(int a, atomic_t *v) \
Peter Zijlstraaf095dd2014-03-26 18:11:31 +010066{ \
67 int t; \
68 \
69 __asm__ __volatile__( \
Boqun Fengdc536172016-01-06 10:08:25 +080070"1: lwarx %0,0,%3 # atomic_" #op "_return_relaxed\n" \
71 #asm_op " %0,%2,%0\n" \
72 PPC405_ERR77(0, %3) \
73" stwcx. %0,0,%3\n" \
Peter Zijlstraaf095dd2014-03-26 18:11:31 +010074" bne- 1b\n" \
Boqun Fengdc536172016-01-06 10:08:25 +080075 : "=&r" (t), "+m" (v->counter) \
Peter Zijlstraaf095dd2014-03-26 18:11:31 +010076 : "r" (a), "r" (&v->counter) \
Boqun Fengdc536172016-01-06 10:08:25 +080077 : "cc"); \
Peter Zijlstraaf095dd2014-03-26 18:11:31 +010078 \
79 return t; \
Linus Torvalds1da177e2005-04-16 15:20:36 -070080}
81
Peter Zijlstraa28cc7b2016-04-18 01:16:05 +020082#define ATOMIC_FETCH_OP_RELAXED(op, asm_op) \
83static inline int atomic_fetch_##op##_relaxed(int a, atomic_t *v) \
84{ \
85 int res, t; \
86 \
87 __asm__ __volatile__( \
88"1: lwarx %0,0,%4 # atomic_fetch_" #op "_relaxed\n" \
89 #asm_op " %1,%3,%0\n" \
90 PPC405_ERR77(0, %4) \
91" stwcx. %1,0,%4\n" \
92" bne- 1b\n" \
93 : "=&r" (res), "=&r" (t), "+m" (v->counter) \
94 : "r" (a), "r" (&v->counter) \
95 : "cc"); \
96 \
97 return res; \
98}
99
Boqun Fengdc536172016-01-06 10:08:25 +0800100#define ATOMIC_OPS(op, asm_op) \
101 ATOMIC_OP(op, asm_op) \
Peter Zijlstraa28cc7b2016-04-18 01:16:05 +0200102 ATOMIC_OP_RETURN_RELAXED(op, asm_op) \
103 ATOMIC_FETCH_OP_RELAXED(op, asm_op)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104
Peter Zijlstraaf095dd2014-03-26 18:11:31 +0100105ATOMIC_OPS(add, add)
106ATOMIC_OPS(sub, subf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107
Boqun Fengdc536172016-01-06 10:08:25 +0800108#define atomic_add_return_relaxed atomic_add_return_relaxed
109#define atomic_sub_return_relaxed atomic_sub_return_relaxed
110
Peter Zijlstraa28cc7b2016-04-18 01:16:05 +0200111#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
112#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
113
Peter Zijlstraaf095dd2014-03-26 18:11:31 +0100114#undef ATOMIC_OPS
Peter Zijlstraa28cc7b2016-04-18 01:16:05 +0200115#define ATOMIC_OPS(op, asm_op) \
116 ATOMIC_OP(op, asm_op) \
117 ATOMIC_FETCH_OP_RELAXED(op, asm_op)
118
119ATOMIC_OPS(and, and)
120ATOMIC_OPS(or, or)
121ATOMIC_OPS(xor, xor)
122
123#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
124#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
125#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
126
127#undef ATOMIC_OPS
128#undef ATOMIC_FETCH_OP_RELAXED
Boqun Fengdc536172016-01-06 10:08:25 +0800129#undef ATOMIC_OP_RETURN_RELAXED
Peter Zijlstraaf095dd2014-03-26 18:11:31 +0100130#undef ATOMIC_OP
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132static __inline__ void atomic_inc(atomic_t *v)
133{
134 int t;
135
136 __asm__ __volatile__(
137"1: lwarx %0,0,%2 # atomic_inc\n\
138 addic %0,%0,1\n"
139 PPC405_ERR77(0,%2)
140" stwcx. %0,0,%2 \n\
141 bne- 1b"
Linus Torvaldse2a3d402006-07-08 15:00:28 -0700142 : "=&r" (t), "+m" (v->counter)
143 : "r" (&v->counter)
Paul Mackerrasefc36242008-11-05 18:39:27 +0000144 : "cc", "xer");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145}
Mark Rutland98375592018-06-21 13:13:19 +0100146#define atomic_inc atomic_inc
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147
Boqun Fengdc536172016-01-06 10:08:25 +0800148static __inline__ int atomic_inc_return_relaxed(atomic_t *v)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149{
150 int t;
151
152 __asm__ __volatile__(
Boqun Fengdc536172016-01-06 10:08:25 +0800153"1: lwarx %0,0,%2 # atomic_inc_return_relaxed\n"
154" addic %0,%0,1\n"
155 PPC405_ERR77(0, %2)
156" stwcx. %0,0,%2\n"
157" bne- 1b"
158 : "=&r" (t), "+m" (v->counter)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159 : "r" (&v->counter)
Boqun Fengdc536172016-01-06 10:08:25 +0800160 : "cc", "xer");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161
162 return t;
163}
164
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165static __inline__ void atomic_dec(atomic_t *v)
166{
167 int t;
168
169 __asm__ __volatile__(
170"1: lwarx %0,0,%2 # atomic_dec\n\
171 addic %0,%0,-1\n"
172 PPC405_ERR77(0,%2)\
173" stwcx. %0,0,%2\n\
174 bne- 1b"
Linus Torvaldse2a3d402006-07-08 15:00:28 -0700175 : "=&r" (t), "+m" (v->counter)
176 : "r" (&v->counter)
Paul Mackerrasefc36242008-11-05 18:39:27 +0000177 : "cc", "xer");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178}
Mark Rutland98375592018-06-21 13:13:19 +0100179#define atomic_dec atomic_dec
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180
Boqun Fengdc536172016-01-06 10:08:25 +0800181static __inline__ int atomic_dec_return_relaxed(atomic_t *v)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182{
183 int t;
184
185 __asm__ __volatile__(
Boqun Fengdc536172016-01-06 10:08:25 +0800186"1: lwarx %0,0,%2 # atomic_dec_return_relaxed\n"
187" addic %0,%0,-1\n"
188 PPC405_ERR77(0, %2)
189" stwcx. %0,0,%2\n"
190" bne- 1b"
191 : "=&r" (t), "+m" (v->counter)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192 : "r" (&v->counter)
Boqun Fengdc536172016-01-06 10:08:25 +0800193 : "cc", "xer");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194
195 return t;
196}
197
Boqun Fengdc536172016-01-06 10:08:25 +0800198#define atomic_inc_return_relaxed atomic_inc_return_relaxed
199#define atomic_dec_return_relaxed atomic_dec_return_relaxed
200
Mathieu Desnoyersf46e4772007-05-08 00:34:27 -0700201#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
Boqun Feng56c08e62015-12-15 22:24:17 +0800202#define atomic_cmpxchg_relaxed(v, o, n) \
203 cmpxchg_relaxed(&((v)->counter), (o), (n))
204#define atomic_cmpxchg_acquire(v, o, n) \
205 cmpxchg_acquire(&((v)->counter), (o), (n))
206
Ingo Molnarffbf6702006-01-09 15:59:17 -0800207#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
Boqun Feng26760fc2015-12-15 22:24:16 +0800208#define atomic_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
Nick Piggin4a6dae62005-11-13 16:07:24 -0800209
Nick Piggin8426e1f2005-11-13 16:07:25 -0800210/**
Mark Rutlandbfc18e32018-06-21 13:13:04 +0100211 * atomic_fetch_add_unless - add unless the number is a given value
Nick Piggin8426e1f2005-11-13 16:07:25 -0800212 * @v: pointer of type atomic_t
213 * @a: the amount to add to v...
214 * @u: ...unless v is equal to u.
215 *
216 * Atomically adds @a to @v, so long as it was not @u.
Arun Sharmaf24219b2011-07-26 16:09:07 -0700217 * Returns the old value of @v.
Nick Piggin8426e1f2005-11-13 16:07:25 -0800218 */
Mark Rutlandbfc18e32018-06-21 13:13:04 +0100219static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
Nick Pigginf055aff2006-02-20 10:41:40 +0100220{
221 int t;
222
223 __asm__ __volatile__ (
Benjamin Herrenschmidtb97021f2011-11-15 17:11:27 +0000224 PPC_ATOMIC_ENTRY_BARRIER
Mark Rutlandbfc18e32018-06-21 13:13:04 +0100225"1: lwarx %0,0,%1 # atomic_fetch_add_unless\n\
Nick Pigginf055aff2006-02-20 10:41:40 +0100226 cmpw 0,%0,%3 \n\
Anton Blanchard61e98eb2016-10-03 17:03:03 +1100227 beq 2f \n\
Nick Pigginf055aff2006-02-20 10:41:40 +0100228 add %0,%2,%0 \n"
229 PPC405_ERR77(0,%2)
230" stwcx. %0,0,%1 \n\
231 bne- 1b \n"
Benjamin Herrenschmidtb97021f2011-11-15 17:11:27 +0000232 PPC_ATOMIC_EXIT_BARRIER
Nick Pigginf055aff2006-02-20 10:41:40 +0100233" subf %0,%2,%0 \n\
2342:"
235 : "=&r" (t)
236 : "r" (&v->counter), "r" (a), "r" (u)
237 : "cc", "memory");
238
Arun Sharmaf24219b2011-07-26 16:09:07 -0700239 return t;
Nick Pigginf055aff2006-02-20 10:41:40 +0100240}
Mark Rutlandeccc2da2018-06-21 13:13:09 +0100241#define atomic_fetch_add_unless atomic_fetch_add_unless
Nick Pigginf055aff2006-02-20 10:41:40 +0100242
Anton Blancharda6cf7ed2012-02-29 21:12:16 +0000243/**
244 * atomic_inc_not_zero - increment unless the number is zero
245 * @v: pointer of type atomic_t
246 *
247 * Atomically increments @v by 1, so long as @v is non-zero.
248 * Returns non-zero if @v was non-zero, and zero otherwise.
249 */
250static __inline__ int atomic_inc_not_zero(atomic_t *v)
251{
252 int t1, t2;
253
254 __asm__ __volatile__ (
255 PPC_ATOMIC_ENTRY_BARRIER
256"1: lwarx %0,0,%2 # atomic_inc_not_zero\n\
257 cmpwi 0,%0,0\n\
258 beq- 2f\n\
259 addic %1,%0,1\n"
260 PPC405_ERR77(0,%2)
261" stwcx. %1,0,%2\n\
262 bne- 1b\n"
263 PPC_ATOMIC_EXIT_BARRIER
264 "\n\
2652:"
266 : "=&r" (t1), "=&r" (t2)
267 : "r" (&v->counter)
268 : "cc", "xer", "memory");
269
270 return t1;
271}
272#define atomic_inc_not_zero(v) atomic_inc_not_zero((v))
Nick Piggin8426e1f2005-11-13 16:07:25 -0800273
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274/*
275 * Atomically test *v and decrement if it is greater than 0.
Robert Jennings434f98c2007-01-17 10:50:20 -0600276 * The function returns the old value of *v minus 1, even if
277 * the atomic variable, v, was not decremented.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278 */
279static __inline__ int atomic_dec_if_positive(atomic_t *v)
280{
281 int t;
282
283 __asm__ __volatile__(
Benjamin Herrenschmidtb97021f2011-11-15 17:11:27 +0000284 PPC_ATOMIC_ENTRY_BARRIER
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285"1: lwarx %0,0,%1 # atomic_dec_if_positive\n\
Robert Jennings434f98c2007-01-17 10:50:20 -0600286 cmpwi %0,1\n\
287 addi %0,%0,-1\n\
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288 blt- 2f\n"
289 PPC405_ERR77(0,%1)
290" stwcx. %0,0,%1\n\
291 bne- 1b"
Benjamin Herrenschmidtb97021f2011-11-15 17:11:27 +0000292 PPC_ATOMIC_EXIT_BARRIER
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 "\n\
Robert Jennings434f98c2007-01-17 10:50:20 -06002942:" : "=&b" (t)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295 : "r" (&v->counter)
296 : "cc", "memory");
297
298 return t;
299}
Shaohua Lie79bee22012-10-08 16:32:18 -0700300#define atomic_dec_if_positive atomic_dec_if_positive
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100302#ifdef __powerpc64__
303
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100304#define ATOMIC64_INIT(i) { (i) }
305
Segher Boessenkool9f0cbea2007-08-11 10:15:30 +1000306static __inline__ long atomic64_read(const atomic64_t *v)
307{
308 long t;
309
310 __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
311
312 return t;
313}
314
315static __inline__ void atomic64_set(atomic64_t *v, long i)
316{
317 __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
318}
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100319
Peter Zijlstraaf095dd2014-03-26 18:11:31 +0100320#define ATOMIC64_OP(op, asm_op) \
321static __inline__ void atomic64_##op(long a, atomic64_t *v) \
322{ \
323 long t; \
324 \
325 __asm__ __volatile__( \
326"1: ldarx %0,0,%3 # atomic64_" #op "\n" \
327 #asm_op " %0,%2,%0\n" \
328" stdcx. %0,0,%3 \n" \
329" bne- 1b\n" \
330 : "=&r" (t), "+m" (v->counter) \
331 : "r" (a), "r" (&v->counter) \
332 : "cc"); \
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100333}
334
Boqun Fengdc536172016-01-06 10:08:25 +0800335#define ATOMIC64_OP_RETURN_RELAXED(op, asm_op) \
336static inline long \
337atomic64_##op##_return_relaxed(long a, atomic64_t *v) \
Peter Zijlstraaf095dd2014-03-26 18:11:31 +0100338{ \
339 long t; \
340 \
341 __asm__ __volatile__( \
Boqun Fengdc536172016-01-06 10:08:25 +0800342"1: ldarx %0,0,%3 # atomic64_" #op "_return_relaxed\n" \
343 #asm_op " %0,%2,%0\n" \
344" stdcx. %0,0,%3\n" \
Peter Zijlstraaf095dd2014-03-26 18:11:31 +0100345" bne- 1b\n" \
Boqun Fengdc536172016-01-06 10:08:25 +0800346 : "=&r" (t), "+m" (v->counter) \
Peter Zijlstraaf095dd2014-03-26 18:11:31 +0100347 : "r" (a), "r" (&v->counter) \
Boqun Fengdc536172016-01-06 10:08:25 +0800348 : "cc"); \
Peter Zijlstraaf095dd2014-03-26 18:11:31 +0100349 \
350 return t; \
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100351}
352
Peter Zijlstraa28cc7b2016-04-18 01:16:05 +0200353#define ATOMIC64_FETCH_OP_RELAXED(op, asm_op) \
354static inline long \
355atomic64_fetch_##op##_relaxed(long a, atomic64_t *v) \
356{ \
357 long res, t; \
358 \
359 __asm__ __volatile__( \
360"1: ldarx %0,0,%4 # atomic64_fetch_" #op "_relaxed\n" \
361 #asm_op " %1,%3,%0\n" \
362" stdcx. %1,0,%4\n" \
363" bne- 1b\n" \
364 : "=&r" (res), "=&r" (t), "+m" (v->counter) \
365 : "r" (a), "r" (&v->counter) \
366 : "cc"); \
367 \
368 return res; \
369}
370
Boqun Fengdc536172016-01-06 10:08:25 +0800371#define ATOMIC64_OPS(op, asm_op) \
372 ATOMIC64_OP(op, asm_op) \
Peter Zijlstraa28cc7b2016-04-18 01:16:05 +0200373 ATOMIC64_OP_RETURN_RELAXED(op, asm_op) \
374 ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
Peter Zijlstraaf095dd2014-03-26 18:11:31 +0100375
376ATOMIC64_OPS(add, add)
377ATOMIC64_OPS(sub, subf)
378
Boqun Fengdc536172016-01-06 10:08:25 +0800379#define atomic64_add_return_relaxed atomic64_add_return_relaxed
380#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
381
Peter Zijlstraa28cc7b2016-04-18 01:16:05 +0200382#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
383#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
384
385#undef ATOMIC64_OPS
386#define ATOMIC64_OPS(op, asm_op) \
387 ATOMIC64_OP(op, asm_op) \
388 ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
389
390ATOMIC64_OPS(and, and)
391ATOMIC64_OPS(or, or)
392ATOMIC64_OPS(xor, xor)
393
394#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
395#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
396#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
397
Boqun Fengdc536172016-01-06 10:08:25 +0800398#undef ATOPIC64_OPS
Peter Zijlstraa28cc7b2016-04-18 01:16:05 +0200399#undef ATOMIC64_FETCH_OP_RELAXED
Boqun Fengdc536172016-01-06 10:08:25 +0800400#undef ATOMIC64_OP_RETURN_RELAXED
Peter Zijlstraaf095dd2014-03-26 18:11:31 +0100401#undef ATOMIC64_OP
402
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100403static __inline__ void atomic64_inc(atomic64_t *v)
404{
405 long t;
406
407 __asm__ __volatile__(
408"1: ldarx %0,0,%2 # atomic64_inc\n\
409 addic %0,%0,1\n\
410 stdcx. %0,0,%2 \n\
411 bne- 1b"
Linus Torvaldse2a3d402006-07-08 15:00:28 -0700412 : "=&r" (t), "+m" (v->counter)
413 : "r" (&v->counter)
Paul Mackerrasefc36242008-11-05 18:39:27 +0000414 : "cc", "xer");
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100415}
Mark Rutland98375592018-06-21 13:13:19 +0100416#define atomic64_inc atomic64_inc
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100417
Boqun Fengdc536172016-01-06 10:08:25 +0800418static __inline__ long atomic64_inc_return_relaxed(atomic64_t *v)
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100419{
420 long t;
421
422 __asm__ __volatile__(
Boqun Fengdc536172016-01-06 10:08:25 +0800423"1: ldarx %0,0,%2 # atomic64_inc_return_relaxed\n"
424" addic %0,%0,1\n"
425" stdcx. %0,0,%2\n"
426" bne- 1b"
427 : "=&r" (t), "+m" (v->counter)
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100428 : "r" (&v->counter)
Boqun Fengdc536172016-01-06 10:08:25 +0800429 : "cc", "xer");
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100430
431 return t;
432}
433
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100434static __inline__ void atomic64_dec(atomic64_t *v)
435{
436 long t;
437
438 __asm__ __volatile__(
439"1: ldarx %0,0,%2 # atomic64_dec\n\
440 addic %0,%0,-1\n\
441 stdcx. %0,0,%2\n\
442 bne- 1b"
Linus Torvaldse2a3d402006-07-08 15:00:28 -0700443 : "=&r" (t), "+m" (v->counter)
444 : "r" (&v->counter)
Paul Mackerrasefc36242008-11-05 18:39:27 +0000445 : "cc", "xer");
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100446}
Mark Rutland98375592018-06-21 13:13:19 +0100447#define atomic64_dec atomic64_dec
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100448
Boqun Fengdc536172016-01-06 10:08:25 +0800449static __inline__ long atomic64_dec_return_relaxed(atomic64_t *v)
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100450{
451 long t;
452
453 __asm__ __volatile__(
Boqun Fengdc536172016-01-06 10:08:25 +0800454"1: ldarx %0,0,%2 # atomic64_dec_return_relaxed\n"
455" addic %0,%0,-1\n"
456" stdcx. %0,0,%2\n"
457" bne- 1b"
458 : "=&r" (t), "+m" (v->counter)
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100459 : "r" (&v->counter)
Boqun Fengdc536172016-01-06 10:08:25 +0800460 : "cc", "xer");
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100461
462 return t;
463}
464
Boqun Fengdc536172016-01-06 10:08:25 +0800465#define atomic64_inc_return_relaxed atomic64_inc_return_relaxed
466#define atomic64_dec_return_relaxed atomic64_dec_return_relaxed
467
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100468/*
469 * Atomically test *v and decrement if it is greater than 0.
470 * The function returns the old value of *v minus 1.
471 */
472static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
473{
474 long t;
475
476 __asm__ __volatile__(
Benjamin Herrenschmidtb97021f2011-11-15 17:11:27 +0000477 PPC_ATOMIC_ENTRY_BARRIER
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100478"1: ldarx %0,0,%1 # atomic64_dec_if_positive\n\
479 addic. %0,%0,-1\n\
480 blt- 2f\n\
481 stdcx. %0,0,%1\n\
482 bne- 1b"
Benjamin Herrenschmidtb97021f2011-11-15 17:11:27 +0000483 PPC_ATOMIC_EXIT_BARRIER
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100484 "\n\
4852:" : "=&r" (t)
486 : "r" (&v->counter)
Paul Mackerrasefc36242008-11-05 18:39:27 +0000487 : "cc", "xer", "memory");
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100488
489 return t;
490}
Mark Rutlandb3a2a052018-06-21 13:13:20 +0100491#define atomic64_dec_if_positive atomic64_dec_if_positive
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100492
Mathieu Desnoyersf46e4772007-05-08 00:34:27 -0700493#define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
Boqun Feng56c08e62015-12-15 22:24:17 +0800494#define atomic64_cmpxchg_relaxed(v, o, n) \
495 cmpxchg_relaxed(&((v)->counter), (o), (n))
496#define atomic64_cmpxchg_acquire(v, o, n) \
497 cmpxchg_acquire(&((v)->counter), (o), (n))
498
Mathieu Desnoyers41806ef2007-01-25 11:15:52 -0500499#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
Boqun Feng26760fc2015-12-15 22:24:16 +0800500#define atomic64_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
Mathieu Desnoyers41806ef2007-01-25 11:15:52 -0500501
502/**
Mark Rutland4f44b4b2018-06-21 13:13:15 +0100503 * atomic64_fetch_add_unless - add unless the number is a given value
Mathieu Desnoyers41806ef2007-01-25 11:15:52 -0500504 * @v: pointer of type atomic64_t
505 * @a: the amount to add to v...
506 * @u: ...unless v is equal to u.
507 *
508 * Atomically adds @a to @v, so long as it was not @u.
Arun Sharmaf24219b2011-07-26 16:09:07 -0700509 * Returns the old value of @v.
Mathieu Desnoyers41806ef2007-01-25 11:15:52 -0500510 */
Mark Rutland4f44b4b2018-06-21 13:13:15 +0100511static __inline__ long atomic64_fetch_add_unless(atomic64_t *v, long a, long u)
Mathieu Desnoyers41806ef2007-01-25 11:15:52 -0500512{
513 long t;
514
515 __asm__ __volatile__ (
Benjamin Herrenschmidtb97021f2011-11-15 17:11:27 +0000516 PPC_ATOMIC_ENTRY_BARRIER
Mark Rutland4f44b4b2018-06-21 13:13:15 +0100517"1: ldarx %0,0,%1 # atomic64_fetch_add_unless\n\
Mathieu Desnoyers41806ef2007-01-25 11:15:52 -0500518 cmpd 0,%0,%3 \n\
Anton Blanchard61e98eb2016-10-03 17:03:03 +1100519 beq 2f \n\
Mathieu Desnoyers41806ef2007-01-25 11:15:52 -0500520 add %0,%2,%0 \n"
521" stdcx. %0,0,%1 \n\
522 bne- 1b \n"
Benjamin Herrenschmidtb97021f2011-11-15 17:11:27 +0000523 PPC_ATOMIC_EXIT_BARRIER
Mathieu Desnoyers41806ef2007-01-25 11:15:52 -0500524" subf %0,%2,%0 \n\
5252:"
526 : "=&r" (t)
527 : "r" (&v->counter), "r" (a), "r" (u)
528 : "cc", "memory");
529
Mark Rutland4f44b4b2018-06-21 13:13:15 +0100530 return t;
Mathieu Desnoyers41806ef2007-01-25 11:15:52 -0500531}
Mark Rutland4f44b4b2018-06-21 13:13:15 +0100532#define atomic64_fetch_add_unless atomic64_fetch_add_unless
Mathieu Desnoyers41806ef2007-01-25 11:15:52 -0500533
Anton Blancharda6cf7ed2012-02-29 21:12:16 +0000534/**
535 * atomic_inc64_not_zero - increment unless the number is zero
536 * @v: pointer of type atomic64_t
537 *
538 * Atomically increments @v by 1, so long as @v is non-zero.
539 * Returns non-zero if @v was non-zero, and zero otherwise.
540 */
Michael Ellerman01e6a612017-07-11 22:10:54 +1000541static __inline__ int atomic64_inc_not_zero(atomic64_t *v)
Anton Blancharda6cf7ed2012-02-29 21:12:16 +0000542{
543 long t1, t2;
544
545 __asm__ __volatile__ (
546 PPC_ATOMIC_ENTRY_BARRIER
547"1: ldarx %0,0,%2 # atomic64_inc_not_zero\n\
548 cmpdi 0,%0,0\n\
549 beq- 2f\n\
550 addic %1,%0,1\n\
551 stdcx. %1,0,%2\n\
552 bne- 1b\n"
553 PPC_ATOMIC_EXIT_BARRIER
554 "\n\
5552:"
556 : "=&r" (t1), "=&r" (t2)
557 : "r" (&v->counter)
558 : "cc", "xer", "memory");
559
Michael Ellerman01e6a612017-07-11 22:10:54 +1000560 return t1 != 0;
Anton Blancharda6cf7ed2012-02-29 21:12:16 +0000561}
Mark Rutlandbef82822018-06-21 13:13:08 +0100562#define atomic64_inc_not_zero(v) atomic64_inc_not_zero((v))
Mathieu Desnoyers41806ef2007-01-25 11:15:52 -0500563
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100564#endif /* __powerpc64__ */
565
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566#endif /* __KERNEL__ */
Becky Brucefeaf7cf2005-09-22 14:20:04 -0500567#endif /* _ASM_POWERPC_ATOMIC_H_ */