blob: 963abf8bf1c0e52c66478b0fe597f74ada7520d3 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Becky Brucefeaf7cf2005-09-22 14:20:04 -05002#ifndef _ASM_POWERPC_ATOMIC_H_
3#define _ASM_POWERPC_ATOMIC_H_
4
Linus Torvalds1da177e2005-04-16 15:20:36 -07005/*
6 * PowerPC atomic operations
7 */
8
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#ifdef __KERNEL__
David Howellsae3a1972012-03-28 18:30:02 +010010#include <linux/types.h>
11#include <asm/cmpxchg.h>
Peter Zijlstrac6450732014-03-13 19:00:35 +010012#include <asm/barrier.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013
Becky Brucefeaf7cf2005-09-22 14:20:04 -050014#define ATOMIC_INIT(i) { (i) }
Linus Torvalds1da177e2005-04-16 15:20:36 -070015
Boqun Fengdc536172016-01-06 10:08:25 +080016/*
17 * Since *_return_relaxed and {cmp}xchg_relaxed are implemented with
18 * a "bne-" instruction at the end, so an isync is enough as a acquire barrier
19 * on the platform without lwsync.
20 */
Mark Rutlandfd2efaa2018-07-16 12:30:11 +010021#define __atomic_acquire_fence() \
22 __asm__ __volatile__(PPC_ACQUIRE_BARRIER "" : : : "memory")
Boqun Fengdc536172016-01-06 10:08:25 +080023
Mark Rutlandfd2efaa2018-07-16 12:30:11 +010024#define __atomic_release_fence() \
25 __asm__ __volatile__(PPC_RELEASE_BARRIER "" : : : "memory")
Boqun Fengdc536172016-01-06 10:08:25 +080026
Segher Boessenkool9f0cbea2007-08-11 10:15:30 +100027static __inline__ int atomic_read(const atomic_t *v)
28{
29 int t;
30
31 __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
32
33 return t;
34}
35
36static __inline__ void atomic_set(atomic_t *v, int i)
37{
38 __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
39}
Linus Torvalds1da177e2005-04-16 15:20:36 -070040
Peter Zijlstraaf095dd2014-03-26 18:11:31 +010041#define ATOMIC_OP(op, asm_op) \
42static __inline__ void atomic_##op(int a, atomic_t *v) \
43{ \
44 int t; \
45 \
46 __asm__ __volatile__( \
47"1: lwarx %0,0,%3 # atomic_" #op "\n" \
48 #asm_op " %0,%2,%0\n" \
49 PPC405_ERR77(0,%3) \
50" stwcx. %0,0,%3 \n" \
51" bne- 1b\n" \
52 : "=&r" (t), "+m" (v->counter) \
53 : "r" (a), "r" (&v->counter) \
54 : "cc"); \
55} \
Linus Torvalds1da177e2005-04-16 15:20:36 -070056
Boqun Fengdc536172016-01-06 10:08:25 +080057#define ATOMIC_OP_RETURN_RELAXED(op, asm_op) \
58static inline int atomic_##op##_return_relaxed(int a, atomic_t *v) \
Peter Zijlstraaf095dd2014-03-26 18:11:31 +010059{ \
60 int t; \
61 \
62 __asm__ __volatile__( \
Boqun Fengdc536172016-01-06 10:08:25 +080063"1: lwarx %0,0,%3 # atomic_" #op "_return_relaxed\n" \
64 #asm_op " %0,%2,%0\n" \
65 PPC405_ERR77(0, %3) \
66" stwcx. %0,0,%3\n" \
Peter Zijlstraaf095dd2014-03-26 18:11:31 +010067" bne- 1b\n" \
Boqun Fengdc536172016-01-06 10:08:25 +080068 : "=&r" (t), "+m" (v->counter) \
Peter Zijlstraaf095dd2014-03-26 18:11:31 +010069 : "r" (a), "r" (&v->counter) \
Boqun Fengdc536172016-01-06 10:08:25 +080070 : "cc"); \
Peter Zijlstraaf095dd2014-03-26 18:11:31 +010071 \
72 return t; \
Linus Torvalds1da177e2005-04-16 15:20:36 -070073}
74
Peter Zijlstraa28cc7b2016-04-18 01:16:05 +020075#define ATOMIC_FETCH_OP_RELAXED(op, asm_op) \
76static inline int atomic_fetch_##op##_relaxed(int a, atomic_t *v) \
77{ \
78 int res, t; \
79 \
80 __asm__ __volatile__( \
81"1: lwarx %0,0,%4 # atomic_fetch_" #op "_relaxed\n" \
82 #asm_op " %1,%3,%0\n" \
83 PPC405_ERR77(0, %4) \
84" stwcx. %1,0,%4\n" \
85" bne- 1b\n" \
86 : "=&r" (res), "=&r" (t), "+m" (v->counter) \
87 : "r" (a), "r" (&v->counter) \
88 : "cc"); \
89 \
90 return res; \
91}
92
Boqun Fengdc536172016-01-06 10:08:25 +080093#define ATOMIC_OPS(op, asm_op) \
94 ATOMIC_OP(op, asm_op) \
Peter Zijlstraa28cc7b2016-04-18 01:16:05 +020095 ATOMIC_OP_RETURN_RELAXED(op, asm_op) \
96 ATOMIC_FETCH_OP_RELAXED(op, asm_op)
Linus Torvalds1da177e2005-04-16 15:20:36 -070097
Peter Zijlstraaf095dd2014-03-26 18:11:31 +010098ATOMIC_OPS(add, add)
99ATOMIC_OPS(sub, subf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100
Boqun Fengdc536172016-01-06 10:08:25 +0800101#define atomic_add_return_relaxed atomic_add_return_relaxed
102#define atomic_sub_return_relaxed atomic_sub_return_relaxed
103
Peter Zijlstraa28cc7b2016-04-18 01:16:05 +0200104#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
105#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
106
Peter Zijlstraaf095dd2014-03-26 18:11:31 +0100107#undef ATOMIC_OPS
Peter Zijlstraa28cc7b2016-04-18 01:16:05 +0200108#define ATOMIC_OPS(op, asm_op) \
109 ATOMIC_OP(op, asm_op) \
110 ATOMIC_FETCH_OP_RELAXED(op, asm_op)
111
112ATOMIC_OPS(and, and)
113ATOMIC_OPS(or, or)
114ATOMIC_OPS(xor, xor)
115
116#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
117#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
118#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
119
120#undef ATOMIC_OPS
121#undef ATOMIC_FETCH_OP_RELAXED
Boqun Fengdc536172016-01-06 10:08:25 +0800122#undef ATOMIC_OP_RETURN_RELAXED
Peter Zijlstraaf095dd2014-03-26 18:11:31 +0100123#undef ATOMIC_OP
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125static __inline__ void atomic_inc(atomic_t *v)
126{
127 int t;
128
129 __asm__ __volatile__(
130"1: lwarx %0,0,%2 # atomic_inc\n\
131 addic %0,%0,1\n"
132 PPC405_ERR77(0,%2)
133" stwcx. %0,0,%2 \n\
134 bne- 1b"
Linus Torvaldse2a3d402006-07-08 15:00:28 -0700135 : "=&r" (t), "+m" (v->counter)
136 : "r" (&v->counter)
Paul Mackerrasefc36242008-11-05 18:39:27 +0000137 : "cc", "xer");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138}
Mark Rutland98375592018-06-21 13:13:19 +0100139#define atomic_inc atomic_inc
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140
Boqun Fengdc536172016-01-06 10:08:25 +0800141static __inline__ int atomic_inc_return_relaxed(atomic_t *v)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142{
143 int t;
144
145 __asm__ __volatile__(
Boqun Fengdc536172016-01-06 10:08:25 +0800146"1: lwarx %0,0,%2 # atomic_inc_return_relaxed\n"
147" addic %0,%0,1\n"
148 PPC405_ERR77(0, %2)
149" stwcx. %0,0,%2\n"
150" bne- 1b"
151 : "=&r" (t), "+m" (v->counter)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152 : "r" (&v->counter)
Boqun Fengdc536172016-01-06 10:08:25 +0800153 : "cc", "xer");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154
155 return t;
156}
157
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158static __inline__ void atomic_dec(atomic_t *v)
159{
160 int t;
161
162 __asm__ __volatile__(
163"1: lwarx %0,0,%2 # atomic_dec\n\
164 addic %0,%0,-1\n"
165 PPC405_ERR77(0,%2)\
166" stwcx. %0,0,%2\n\
167 bne- 1b"
Linus Torvaldse2a3d402006-07-08 15:00:28 -0700168 : "=&r" (t), "+m" (v->counter)
169 : "r" (&v->counter)
Paul Mackerrasefc36242008-11-05 18:39:27 +0000170 : "cc", "xer");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171}
Mark Rutland98375592018-06-21 13:13:19 +0100172#define atomic_dec atomic_dec
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173
Boqun Fengdc536172016-01-06 10:08:25 +0800174static __inline__ int atomic_dec_return_relaxed(atomic_t *v)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175{
176 int t;
177
178 __asm__ __volatile__(
Boqun Fengdc536172016-01-06 10:08:25 +0800179"1: lwarx %0,0,%2 # atomic_dec_return_relaxed\n"
180" addic %0,%0,-1\n"
181 PPC405_ERR77(0, %2)
182" stwcx. %0,0,%2\n"
183" bne- 1b"
184 : "=&r" (t), "+m" (v->counter)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185 : "r" (&v->counter)
Boqun Fengdc536172016-01-06 10:08:25 +0800186 : "cc", "xer");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187
188 return t;
189}
190
Boqun Fengdc536172016-01-06 10:08:25 +0800191#define atomic_inc_return_relaxed atomic_inc_return_relaxed
192#define atomic_dec_return_relaxed atomic_dec_return_relaxed
193
Mathieu Desnoyersf46e4772007-05-08 00:34:27 -0700194#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
Boqun Feng56c08e62015-12-15 22:24:17 +0800195#define atomic_cmpxchg_relaxed(v, o, n) \
196 cmpxchg_relaxed(&((v)->counter), (o), (n))
197#define atomic_cmpxchg_acquire(v, o, n) \
198 cmpxchg_acquire(&((v)->counter), (o), (n))
199
Ingo Molnarffbf6702006-01-09 15:59:17 -0800200#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
Boqun Feng26760fc2015-12-15 22:24:16 +0800201#define atomic_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
Nick Piggin4a6dae62005-11-13 16:07:24 -0800202
Nick Piggin8426e1f2005-11-13 16:07:25 -0800203/**
Mark Rutlandbfc18e32018-06-21 13:13:04 +0100204 * atomic_fetch_add_unless - add unless the number is a given value
Nick Piggin8426e1f2005-11-13 16:07:25 -0800205 * @v: pointer of type atomic_t
206 * @a: the amount to add to v...
207 * @u: ...unless v is equal to u.
208 *
209 * Atomically adds @a to @v, so long as it was not @u.
Arun Sharmaf24219b2011-07-26 16:09:07 -0700210 * Returns the old value of @v.
Nick Piggin8426e1f2005-11-13 16:07:25 -0800211 */
Mark Rutlandbfc18e32018-06-21 13:13:04 +0100212static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
Nick Pigginf055aff2006-02-20 10:41:40 +0100213{
214 int t;
215
216 __asm__ __volatile__ (
Benjamin Herrenschmidtb97021f2011-11-15 17:11:27 +0000217 PPC_ATOMIC_ENTRY_BARRIER
Mark Rutlandbfc18e32018-06-21 13:13:04 +0100218"1: lwarx %0,0,%1 # atomic_fetch_add_unless\n\
Nick Pigginf055aff2006-02-20 10:41:40 +0100219 cmpw 0,%0,%3 \n\
Anton Blanchard61e98eb2016-10-03 17:03:03 +1100220 beq 2f \n\
Nick Pigginf055aff2006-02-20 10:41:40 +0100221 add %0,%2,%0 \n"
222 PPC405_ERR77(0,%2)
223" stwcx. %0,0,%1 \n\
224 bne- 1b \n"
Benjamin Herrenschmidtb97021f2011-11-15 17:11:27 +0000225 PPC_ATOMIC_EXIT_BARRIER
Nick Pigginf055aff2006-02-20 10:41:40 +0100226" subf %0,%2,%0 \n\
2272:"
228 : "=&r" (t)
229 : "r" (&v->counter), "r" (a), "r" (u)
230 : "cc", "memory");
231
Arun Sharmaf24219b2011-07-26 16:09:07 -0700232 return t;
Nick Pigginf055aff2006-02-20 10:41:40 +0100233}
Mark Rutlandeccc2da2018-06-21 13:13:09 +0100234#define atomic_fetch_add_unless atomic_fetch_add_unless
Nick Pigginf055aff2006-02-20 10:41:40 +0100235
Anton Blancharda6cf7ed2012-02-29 21:12:16 +0000236/**
237 * atomic_inc_not_zero - increment unless the number is zero
238 * @v: pointer of type atomic_t
239 *
240 * Atomically increments @v by 1, so long as @v is non-zero.
241 * Returns non-zero if @v was non-zero, and zero otherwise.
242 */
243static __inline__ int atomic_inc_not_zero(atomic_t *v)
244{
245 int t1, t2;
246
247 __asm__ __volatile__ (
248 PPC_ATOMIC_ENTRY_BARRIER
249"1: lwarx %0,0,%2 # atomic_inc_not_zero\n\
250 cmpwi 0,%0,0\n\
251 beq- 2f\n\
252 addic %1,%0,1\n"
253 PPC405_ERR77(0,%2)
254" stwcx. %1,0,%2\n\
255 bne- 1b\n"
256 PPC_ATOMIC_EXIT_BARRIER
257 "\n\
2582:"
259 : "=&r" (t1), "=&r" (t2)
260 : "r" (&v->counter)
261 : "cc", "xer", "memory");
262
263 return t1;
264}
265#define atomic_inc_not_zero(v) atomic_inc_not_zero((v))
Nick Piggin8426e1f2005-11-13 16:07:25 -0800266
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267/*
268 * Atomically test *v and decrement if it is greater than 0.
Robert Jennings434f98c2007-01-17 10:50:20 -0600269 * The function returns the old value of *v minus 1, even if
270 * the atomic variable, v, was not decremented.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271 */
272static __inline__ int atomic_dec_if_positive(atomic_t *v)
273{
274 int t;
275
276 __asm__ __volatile__(
Benjamin Herrenschmidtb97021f2011-11-15 17:11:27 +0000277 PPC_ATOMIC_ENTRY_BARRIER
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278"1: lwarx %0,0,%1 # atomic_dec_if_positive\n\
Robert Jennings434f98c2007-01-17 10:50:20 -0600279 cmpwi %0,1\n\
280 addi %0,%0,-1\n\
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281 blt- 2f\n"
282 PPC405_ERR77(0,%1)
283" stwcx. %0,0,%1\n\
284 bne- 1b"
Benjamin Herrenschmidtb97021f2011-11-15 17:11:27 +0000285 PPC_ATOMIC_EXIT_BARRIER
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286 "\n\
Robert Jennings434f98c2007-01-17 10:50:20 -06002872:" : "=&b" (t)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288 : "r" (&v->counter)
289 : "cc", "memory");
290
291 return t;
292}
Shaohua Lie79bee22012-10-08 16:32:18 -0700293#define atomic_dec_if_positive atomic_dec_if_positive
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100295#ifdef __powerpc64__
296
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100297#define ATOMIC64_INIT(i) { (i) }
298
Segher Boessenkool9f0cbea2007-08-11 10:15:30 +1000299static __inline__ long atomic64_read(const atomic64_t *v)
300{
301 long t;
302
303 __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
304
305 return t;
306}
307
308static __inline__ void atomic64_set(atomic64_t *v, long i)
309{
310 __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
311}
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100312
Peter Zijlstraaf095dd2014-03-26 18:11:31 +0100313#define ATOMIC64_OP(op, asm_op) \
314static __inline__ void atomic64_##op(long a, atomic64_t *v) \
315{ \
316 long t; \
317 \
318 __asm__ __volatile__( \
319"1: ldarx %0,0,%3 # atomic64_" #op "\n" \
320 #asm_op " %0,%2,%0\n" \
321" stdcx. %0,0,%3 \n" \
322" bne- 1b\n" \
323 : "=&r" (t), "+m" (v->counter) \
324 : "r" (a), "r" (&v->counter) \
325 : "cc"); \
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100326}
327
Boqun Fengdc536172016-01-06 10:08:25 +0800328#define ATOMIC64_OP_RETURN_RELAXED(op, asm_op) \
329static inline long \
330atomic64_##op##_return_relaxed(long a, atomic64_t *v) \
Peter Zijlstraaf095dd2014-03-26 18:11:31 +0100331{ \
332 long t; \
333 \
334 __asm__ __volatile__( \
Boqun Fengdc536172016-01-06 10:08:25 +0800335"1: ldarx %0,0,%3 # atomic64_" #op "_return_relaxed\n" \
336 #asm_op " %0,%2,%0\n" \
337" stdcx. %0,0,%3\n" \
Peter Zijlstraaf095dd2014-03-26 18:11:31 +0100338" bne- 1b\n" \
Boqun Fengdc536172016-01-06 10:08:25 +0800339 : "=&r" (t), "+m" (v->counter) \
Peter Zijlstraaf095dd2014-03-26 18:11:31 +0100340 : "r" (a), "r" (&v->counter) \
Boqun Fengdc536172016-01-06 10:08:25 +0800341 : "cc"); \
Peter Zijlstraaf095dd2014-03-26 18:11:31 +0100342 \
343 return t; \
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100344}
345
Peter Zijlstraa28cc7b2016-04-18 01:16:05 +0200346#define ATOMIC64_FETCH_OP_RELAXED(op, asm_op) \
347static inline long \
348atomic64_fetch_##op##_relaxed(long a, atomic64_t *v) \
349{ \
350 long res, t; \
351 \
352 __asm__ __volatile__( \
353"1: ldarx %0,0,%4 # atomic64_fetch_" #op "_relaxed\n" \
354 #asm_op " %1,%3,%0\n" \
355" stdcx. %1,0,%4\n" \
356" bne- 1b\n" \
357 : "=&r" (res), "=&r" (t), "+m" (v->counter) \
358 : "r" (a), "r" (&v->counter) \
359 : "cc"); \
360 \
361 return res; \
362}
363
Boqun Fengdc536172016-01-06 10:08:25 +0800364#define ATOMIC64_OPS(op, asm_op) \
365 ATOMIC64_OP(op, asm_op) \
Peter Zijlstraa28cc7b2016-04-18 01:16:05 +0200366 ATOMIC64_OP_RETURN_RELAXED(op, asm_op) \
367 ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
Peter Zijlstraaf095dd2014-03-26 18:11:31 +0100368
369ATOMIC64_OPS(add, add)
370ATOMIC64_OPS(sub, subf)
371
Boqun Fengdc536172016-01-06 10:08:25 +0800372#define atomic64_add_return_relaxed atomic64_add_return_relaxed
373#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
374
Peter Zijlstraa28cc7b2016-04-18 01:16:05 +0200375#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
376#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
377
378#undef ATOMIC64_OPS
379#define ATOMIC64_OPS(op, asm_op) \
380 ATOMIC64_OP(op, asm_op) \
381 ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
382
383ATOMIC64_OPS(and, and)
384ATOMIC64_OPS(or, or)
385ATOMIC64_OPS(xor, xor)
386
387#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
388#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
389#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
390
Boqun Fengdc536172016-01-06 10:08:25 +0800391#undef ATOPIC64_OPS
Peter Zijlstraa28cc7b2016-04-18 01:16:05 +0200392#undef ATOMIC64_FETCH_OP_RELAXED
Boqun Fengdc536172016-01-06 10:08:25 +0800393#undef ATOMIC64_OP_RETURN_RELAXED
Peter Zijlstraaf095dd2014-03-26 18:11:31 +0100394#undef ATOMIC64_OP
395
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100396static __inline__ void atomic64_inc(atomic64_t *v)
397{
398 long t;
399
400 __asm__ __volatile__(
401"1: ldarx %0,0,%2 # atomic64_inc\n\
402 addic %0,%0,1\n\
403 stdcx. %0,0,%2 \n\
404 bne- 1b"
Linus Torvaldse2a3d402006-07-08 15:00:28 -0700405 : "=&r" (t), "+m" (v->counter)
406 : "r" (&v->counter)
Paul Mackerrasefc36242008-11-05 18:39:27 +0000407 : "cc", "xer");
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100408}
Mark Rutland98375592018-06-21 13:13:19 +0100409#define atomic64_inc atomic64_inc
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100410
Boqun Fengdc536172016-01-06 10:08:25 +0800411static __inline__ long atomic64_inc_return_relaxed(atomic64_t *v)
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100412{
413 long t;
414
415 __asm__ __volatile__(
Boqun Fengdc536172016-01-06 10:08:25 +0800416"1: ldarx %0,0,%2 # atomic64_inc_return_relaxed\n"
417" addic %0,%0,1\n"
418" stdcx. %0,0,%2\n"
419" bne- 1b"
420 : "=&r" (t), "+m" (v->counter)
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100421 : "r" (&v->counter)
Boqun Fengdc536172016-01-06 10:08:25 +0800422 : "cc", "xer");
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100423
424 return t;
425}
426
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100427static __inline__ void atomic64_dec(atomic64_t *v)
428{
429 long t;
430
431 __asm__ __volatile__(
432"1: ldarx %0,0,%2 # atomic64_dec\n\
433 addic %0,%0,-1\n\
434 stdcx. %0,0,%2\n\
435 bne- 1b"
Linus Torvaldse2a3d402006-07-08 15:00:28 -0700436 : "=&r" (t), "+m" (v->counter)
437 : "r" (&v->counter)
Paul Mackerrasefc36242008-11-05 18:39:27 +0000438 : "cc", "xer");
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100439}
Mark Rutland98375592018-06-21 13:13:19 +0100440#define atomic64_dec atomic64_dec
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100441
Boqun Fengdc536172016-01-06 10:08:25 +0800442static __inline__ long atomic64_dec_return_relaxed(atomic64_t *v)
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100443{
444 long t;
445
446 __asm__ __volatile__(
Boqun Fengdc536172016-01-06 10:08:25 +0800447"1: ldarx %0,0,%2 # atomic64_dec_return_relaxed\n"
448" addic %0,%0,-1\n"
449" stdcx. %0,0,%2\n"
450" bne- 1b"
451 : "=&r" (t), "+m" (v->counter)
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100452 : "r" (&v->counter)
Boqun Fengdc536172016-01-06 10:08:25 +0800453 : "cc", "xer");
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100454
455 return t;
456}
457
Boqun Fengdc536172016-01-06 10:08:25 +0800458#define atomic64_inc_return_relaxed atomic64_inc_return_relaxed
459#define atomic64_dec_return_relaxed atomic64_dec_return_relaxed
460
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100461/*
462 * Atomically test *v and decrement if it is greater than 0.
463 * The function returns the old value of *v minus 1.
464 */
465static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
466{
467 long t;
468
469 __asm__ __volatile__(
Benjamin Herrenschmidtb97021f2011-11-15 17:11:27 +0000470 PPC_ATOMIC_ENTRY_BARRIER
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100471"1: ldarx %0,0,%1 # atomic64_dec_if_positive\n\
472 addic. %0,%0,-1\n\
473 blt- 2f\n\
474 stdcx. %0,0,%1\n\
475 bne- 1b"
Benjamin Herrenschmidtb97021f2011-11-15 17:11:27 +0000476 PPC_ATOMIC_EXIT_BARRIER
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100477 "\n\
4782:" : "=&r" (t)
479 : "r" (&v->counter)
Paul Mackerrasefc36242008-11-05 18:39:27 +0000480 : "cc", "xer", "memory");
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100481
482 return t;
483}
Mark Rutlandb3a2a052018-06-21 13:13:20 +0100484#define atomic64_dec_if_positive atomic64_dec_if_positive
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100485
Mathieu Desnoyersf46e4772007-05-08 00:34:27 -0700486#define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
Boqun Feng56c08e62015-12-15 22:24:17 +0800487#define atomic64_cmpxchg_relaxed(v, o, n) \
488 cmpxchg_relaxed(&((v)->counter), (o), (n))
489#define atomic64_cmpxchg_acquire(v, o, n) \
490 cmpxchg_acquire(&((v)->counter), (o), (n))
491
Mathieu Desnoyers41806ef2007-01-25 11:15:52 -0500492#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
Boqun Feng26760fc2015-12-15 22:24:16 +0800493#define atomic64_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
Mathieu Desnoyers41806ef2007-01-25 11:15:52 -0500494
495/**
Mark Rutland4f44b4b2018-06-21 13:13:15 +0100496 * atomic64_fetch_add_unless - add unless the number is a given value
Mathieu Desnoyers41806ef2007-01-25 11:15:52 -0500497 * @v: pointer of type atomic64_t
498 * @a: the amount to add to v...
499 * @u: ...unless v is equal to u.
500 *
501 * Atomically adds @a to @v, so long as it was not @u.
Arun Sharmaf24219b2011-07-26 16:09:07 -0700502 * Returns the old value of @v.
Mathieu Desnoyers41806ef2007-01-25 11:15:52 -0500503 */
Mark Rutland4f44b4b2018-06-21 13:13:15 +0100504static __inline__ long atomic64_fetch_add_unless(atomic64_t *v, long a, long u)
Mathieu Desnoyers41806ef2007-01-25 11:15:52 -0500505{
506 long t;
507
508 __asm__ __volatile__ (
Benjamin Herrenschmidtb97021f2011-11-15 17:11:27 +0000509 PPC_ATOMIC_ENTRY_BARRIER
Mark Rutland4f44b4b2018-06-21 13:13:15 +0100510"1: ldarx %0,0,%1 # atomic64_fetch_add_unless\n\
Mathieu Desnoyers41806ef2007-01-25 11:15:52 -0500511 cmpd 0,%0,%3 \n\
Anton Blanchard61e98eb2016-10-03 17:03:03 +1100512 beq 2f \n\
Mathieu Desnoyers41806ef2007-01-25 11:15:52 -0500513 add %0,%2,%0 \n"
514" stdcx. %0,0,%1 \n\
515 bne- 1b \n"
Benjamin Herrenschmidtb97021f2011-11-15 17:11:27 +0000516 PPC_ATOMIC_EXIT_BARRIER
Mathieu Desnoyers41806ef2007-01-25 11:15:52 -0500517" subf %0,%2,%0 \n\
5182:"
519 : "=&r" (t)
520 : "r" (&v->counter), "r" (a), "r" (u)
521 : "cc", "memory");
522
Mark Rutland4f44b4b2018-06-21 13:13:15 +0100523 return t;
Mathieu Desnoyers41806ef2007-01-25 11:15:52 -0500524}
Mark Rutland4f44b4b2018-06-21 13:13:15 +0100525#define atomic64_fetch_add_unless atomic64_fetch_add_unless
Mathieu Desnoyers41806ef2007-01-25 11:15:52 -0500526
Anton Blancharda6cf7ed2012-02-29 21:12:16 +0000527/**
528 * atomic_inc64_not_zero - increment unless the number is zero
529 * @v: pointer of type atomic64_t
530 *
531 * Atomically increments @v by 1, so long as @v is non-zero.
532 * Returns non-zero if @v was non-zero, and zero otherwise.
533 */
Michael Ellerman01e6a612017-07-11 22:10:54 +1000534static __inline__ int atomic64_inc_not_zero(atomic64_t *v)
Anton Blancharda6cf7ed2012-02-29 21:12:16 +0000535{
536 long t1, t2;
537
538 __asm__ __volatile__ (
539 PPC_ATOMIC_ENTRY_BARRIER
540"1: ldarx %0,0,%2 # atomic64_inc_not_zero\n\
541 cmpdi 0,%0,0\n\
542 beq- 2f\n\
543 addic %1,%0,1\n\
544 stdcx. %1,0,%2\n\
545 bne- 1b\n"
546 PPC_ATOMIC_EXIT_BARRIER
547 "\n\
5482:"
549 : "=&r" (t1), "=&r" (t2)
550 : "r" (&v->counter)
551 : "cc", "xer", "memory");
552
Michael Ellerman01e6a612017-07-11 22:10:54 +1000553 return t1 != 0;
Anton Blancharda6cf7ed2012-02-29 21:12:16 +0000554}
Mark Rutlandbef82822018-06-21 13:13:08 +0100555#define atomic64_inc_not_zero(v) atomic64_inc_not_zero((v))
Mathieu Desnoyers41806ef2007-01-25 11:15:52 -0500556
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100557#endif /* __powerpc64__ */
558
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559#endif /* __KERNEL__ */
Becky Brucefeaf7cf2005-09-22 14:20:04 -0500560#endif /* _ASM_POWERPC_ATOMIC_H_ */