blob: 52eafaf74054d2f3d22e0adadb8a57d61a5b89f9 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Becky Brucefeaf7cf2005-09-22 14:20:04 -05002#ifndef _ASM_POWERPC_ATOMIC_H_
3#define _ASM_POWERPC_ATOMIC_H_
4
Linus Torvalds1da177e2005-04-16 15:20:36 -07005/*
6 * PowerPC atomic operations
7 */
8
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#ifdef __KERNEL__
David Howellsae3a1972012-03-28 18:30:02 +010010#include <linux/types.h>
11#include <asm/cmpxchg.h>
Peter Zijlstrac6450732014-03-13 19:00:35 +010012#include <asm/barrier.h>
Christophe Leroy36a7eea2018-07-05 16:24:55 +000013#include <asm/asm-405.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014
Becky Brucefeaf7cf2005-09-22 14:20:04 -050015#define ATOMIC_INIT(i) { (i) }
Linus Torvalds1da177e2005-04-16 15:20:36 -070016
Boqun Fengdc536172016-01-06 10:08:25 +080017/*
18 * Since *_return_relaxed and {cmp}xchg_relaxed are implemented with
19 * a "bne-" instruction at the end, so an isync is enough as a acquire barrier
20 * on the platform without lwsync.
21 */
Mark Rutlandfd2efaa2018-07-16 12:30:11 +010022#define __atomic_acquire_fence() \
23 __asm__ __volatile__(PPC_ACQUIRE_BARRIER "" : : : "memory")
Boqun Fengdc536172016-01-06 10:08:25 +080024
Mark Rutlandfd2efaa2018-07-16 12:30:11 +010025#define __atomic_release_fence() \
26 __asm__ __volatile__(PPC_RELEASE_BARRIER "" : : : "memory")
Boqun Fengdc536172016-01-06 10:08:25 +080027
Segher Boessenkool9f0cbea2007-08-11 10:15:30 +100028static __inline__ int atomic_read(const atomic_t *v)
29{
30 int t;
31
32 __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
33
34 return t;
35}
36
37static __inline__ void atomic_set(atomic_t *v, int i)
38{
39 __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
40}
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
Peter Zijlstraaf095dd2014-03-26 18:11:31 +010042#define ATOMIC_OP(op, asm_op) \
43static __inline__ void atomic_##op(int a, atomic_t *v) \
44{ \
45 int t; \
46 \
47 __asm__ __volatile__( \
48"1: lwarx %0,0,%3 # atomic_" #op "\n" \
49 #asm_op " %0,%2,%0\n" \
50 PPC405_ERR77(0,%3) \
51" stwcx. %0,0,%3 \n" \
52" bne- 1b\n" \
53 : "=&r" (t), "+m" (v->counter) \
54 : "r" (a), "r" (&v->counter) \
55 : "cc"); \
56} \
Linus Torvalds1da177e2005-04-16 15:20:36 -070057
Boqun Fengdc536172016-01-06 10:08:25 +080058#define ATOMIC_OP_RETURN_RELAXED(op, asm_op) \
59static inline int atomic_##op##_return_relaxed(int a, atomic_t *v) \
Peter Zijlstraaf095dd2014-03-26 18:11:31 +010060{ \
61 int t; \
62 \
63 __asm__ __volatile__( \
Boqun Fengdc536172016-01-06 10:08:25 +080064"1: lwarx %0,0,%3 # atomic_" #op "_return_relaxed\n" \
65 #asm_op " %0,%2,%0\n" \
66 PPC405_ERR77(0, %3) \
67" stwcx. %0,0,%3\n" \
Peter Zijlstraaf095dd2014-03-26 18:11:31 +010068" bne- 1b\n" \
Boqun Fengdc536172016-01-06 10:08:25 +080069 : "=&r" (t), "+m" (v->counter) \
Peter Zijlstraaf095dd2014-03-26 18:11:31 +010070 : "r" (a), "r" (&v->counter) \
Boqun Fengdc536172016-01-06 10:08:25 +080071 : "cc"); \
Peter Zijlstraaf095dd2014-03-26 18:11:31 +010072 \
73 return t; \
Linus Torvalds1da177e2005-04-16 15:20:36 -070074}
75
Peter Zijlstraa28cc7b2016-04-18 01:16:05 +020076#define ATOMIC_FETCH_OP_RELAXED(op, asm_op) \
77static inline int atomic_fetch_##op##_relaxed(int a, atomic_t *v) \
78{ \
79 int res, t; \
80 \
81 __asm__ __volatile__( \
82"1: lwarx %0,0,%4 # atomic_fetch_" #op "_relaxed\n" \
83 #asm_op " %1,%3,%0\n" \
84 PPC405_ERR77(0, %4) \
85" stwcx. %1,0,%4\n" \
86" bne- 1b\n" \
87 : "=&r" (res), "=&r" (t), "+m" (v->counter) \
88 : "r" (a), "r" (&v->counter) \
89 : "cc"); \
90 \
91 return res; \
92}
93
Boqun Fengdc536172016-01-06 10:08:25 +080094#define ATOMIC_OPS(op, asm_op) \
95 ATOMIC_OP(op, asm_op) \
Peter Zijlstraa28cc7b2016-04-18 01:16:05 +020096 ATOMIC_OP_RETURN_RELAXED(op, asm_op) \
97 ATOMIC_FETCH_OP_RELAXED(op, asm_op)
Linus Torvalds1da177e2005-04-16 15:20:36 -070098
Peter Zijlstraaf095dd2014-03-26 18:11:31 +010099ATOMIC_OPS(add, add)
100ATOMIC_OPS(sub, subf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101
Boqun Fengdc536172016-01-06 10:08:25 +0800102#define atomic_add_return_relaxed atomic_add_return_relaxed
103#define atomic_sub_return_relaxed atomic_sub_return_relaxed
104
Peter Zijlstraa28cc7b2016-04-18 01:16:05 +0200105#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
106#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
107
Peter Zijlstraaf095dd2014-03-26 18:11:31 +0100108#undef ATOMIC_OPS
Peter Zijlstraa28cc7b2016-04-18 01:16:05 +0200109#define ATOMIC_OPS(op, asm_op) \
110 ATOMIC_OP(op, asm_op) \
111 ATOMIC_FETCH_OP_RELAXED(op, asm_op)
112
113ATOMIC_OPS(and, and)
114ATOMIC_OPS(or, or)
115ATOMIC_OPS(xor, xor)
116
117#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
118#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
119#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
120
121#undef ATOMIC_OPS
122#undef ATOMIC_FETCH_OP_RELAXED
Boqun Fengdc536172016-01-06 10:08:25 +0800123#undef ATOMIC_OP_RETURN_RELAXED
Peter Zijlstraaf095dd2014-03-26 18:11:31 +0100124#undef ATOMIC_OP
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126static __inline__ void atomic_inc(atomic_t *v)
127{
128 int t;
129
130 __asm__ __volatile__(
131"1: lwarx %0,0,%2 # atomic_inc\n\
132 addic %0,%0,1\n"
133 PPC405_ERR77(0,%2)
134" stwcx. %0,0,%2 \n\
135 bne- 1b"
Linus Torvaldse2a3d402006-07-08 15:00:28 -0700136 : "=&r" (t), "+m" (v->counter)
137 : "r" (&v->counter)
Paul Mackerrasefc36242008-11-05 18:39:27 +0000138 : "cc", "xer");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139}
Mark Rutland98375592018-06-21 13:13:19 +0100140#define atomic_inc atomic_inc
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141
Boqun Fengdc536172016-01-06 10:08:25 +0800142static __inline__ int atomic_inc_return_relaxed(atomic_t *v)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143{
144 int t;
145
146 __asm__ __volatile__(
Boqun Fengdc536172016-01-06 10:08:25 +0800147"1: lwarx %0,0,%2 # atomic_inc_return_relaxed\n"
148" addic %0,%0,1\n"
149 PPC405_ERR77(0, %2)
150" stwcx. %0,0,%2\n"
151" bne- 1b"
152 : "=&r" (t), "+m" (v->counter)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153 : "r" (&v->counter)
Boqun Fengdc536172016-01-06 10:08:25 +0800154 : "cc", "xer");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155
156 return t;
157}
158
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159static __inline__ void atomic_dec(atomic_t *v)
160{
161 int t;
162
163 __asm__ __volatile__(
164"1: lwarx %0,0,%2 # atomic_dec\n\
165 addic %0,%0,-1\n"
166 PPC405_ERR77(0,%2)\
167" stwcx. %0,0,%2\n\
168 bne- 1b"
Linus Torvaldse2a3d402006-07-08 15:00:28 -0700169 : "=&r" (t), "+m" (v->counter)
170 : "r" (&v->counter)
Paul Mackerrasefc36242008-11-05 18:39:27 +0000171 : "cc", "xer");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172}
Mark Rutland98375592018-06-21 13:13:19 +0100173#define atomic_dec atomic_dec
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174
Boqun Fengdc536172016-01-06 10:08:25 +0800175static __inline__ int atomic_dec_return_relaxed(atomic_t *v)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176{
177 int t;
178
179 __asm__ __volatile__(
Boqun Fengdc536172016-01-06 10:08:25 +0800180"1: lwarx %0,0,%2 # atomic_dec_return_relaxed\n"
181" addic %0,%0,-1\n"
182 PPC405_ERR77(0, %2)
183" stwcx. %0,0,%2\n"
184" bne- 1b"
185 : "=&r" (t), "+m" (v->counter)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186 : "r" (&v->counter)
Boqun Fengdc536172016-01-06 10:08:25 +0800187 : "cc", "xer");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188
189 return t;
190}
191
Boqun Fengdc536172016-01-06 10:08:25 +0800192#define atomic_inc_return_relaxed atomic_inc_return_relaxed
193#define atomic_dec_return_relaxed atomic_dec_return_relaxed
194
Mathieu Desnoyersf46e4772007-05-08 00:34:27 -0700195#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
Boqun Feng56c08e62015-12-15 22:24:17 +0800196#define atomic_cmpxchg_relaxed(v, o, n) \
197 cmpxchg_relaxed(&((v)->counter), (o), (n))
198#define atomic_cmpxchg_acquire(v, o, n) \
199 cmpxchg_acquire(&((v)->counter), (o), (n))
200
Ingo Molnarffbf6702006-01-09 15:59:17 -0800201#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
Boqun Feng26760fc2015-12-15 22:24:16 +0800202#define atomic_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
Nick Piggin4a6dae62005-11-13 16:07:24 -0800203
Nick Piggin8426e1f2005-11-13 16:07:25 -0800204/**
Mark Rutlandbfc18e32018-06-21 13:13:04 +0100205 * atomic_fetch_add_unless - add unless the number is a given value
Nick Piggin8426e1f2005-11-13 16:07:25 -0800206 * @v: pointer of type atomic_t
207 * @a: the amount to add to v...
208 * @u: ...unless v is equal to u.
209 *
210 * Atomically adds @a to @v, so long as it was not @u.
Arun Sharmaf24219b2011-07-26 16:09:07 -0700211 * Returns the old value of @v.
Nick Piggin8426e1f2005-11-13 16:07:25 -0800212 */
Mark Rutlandbfc18e32018-06-21 13:13:04 +0100213static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
Nick Pigginf055aff2006-02-20 10:41:40 +0100214{
215 int t;
216
217 __asm__ __volatile__ (
Benjamin Herrenschmidtb97021f2011-11-15 17:11:27 +0000218 PPC_ATOMIC_ENTRY_BARRIER
Mark Rutlandbfc18e32018-06-21 13:13:04 +0100219"1: lwarx %0,0,%1 # atomic_fetch_add_unless\n\
Nick Pigginf055aff2006-02-20 10:41:40 +0100220 cmpw 0,%0,%3 \n\
Anton Blanchard61e98eb2016-10-03 17:03:03 +1100221 beq 2f \n\
Nick Pigginf055aff2006-02-20 10:41:40 +0100222 add %0,%2,%0 \n"
223 PPC405_ERR77(0,%2)
224" stwcx. %0,0,%1 \n\
225 bne- 1b \n"
Benjamin Herrenschmidtb97021f2011-11-15 17:11:27 +0000226 PPC_ATOMIC_EXIT_BARRIER
Nick Pigginf055aff2006-02-20 10:41:40 +0100227" subf %0,%2,%0 \n\
2282:"
229 : "=&r" (t)
230 : "r" (&v->counter), "r" (a), "r" (u)
231 : "cc", "memory");
232
Arun Sharmaf24219b2011-07-26 16:09:07 -0700233 return t;
Nick Pigginf055aff2006-02-20 10:41:40 +0100234}
Mark Rutlandeccc2da2018-06-21 13:13:09 +0100235#define atomic_fetch_add_unless atomic_fetch_add_unless
Nick Pigginf055aff2006-02-20 10:41:40 +0100236
Anton Blancharda6cf7ed2012-02-29 21:12:16 +0000237/**
238 * atomic_inc_not_zero - increment unless the number is zero
239 * @v: pointer of type atomic_t
240 *
241 * Atomically increments @v by 1, so long as @v is non-zero.
242 * Returns non-zero if @v was non-zero, and zero otherwise.
243 */
244static __inline__ int atomic_inc_not_zero(atomic_t *v)
245{
246 int t1, t2;
247
248 __asm__ __volatile__ (
249 PPC_ATOMIC_ENTRY_BARRIER
250"1: lwarx %0,0,%2 # atomic_inc_not_zero\n\
251 cmpwi 0,%0,0\n\
252 beq- 2f\n\
253 addic %1,%0,1\n"
254 PPC405_ERR77(0,%2)
255" stwcx. %1,0,%2\n\
256 bne- 1b\n"
257 PPC_ATOMIC_EXIT_BARRIER
258 "\n\
2592:"
260 : "=&r" (t1), "=&r" (t2)
261 : "r" (&v->counter)
262 : "cc", "xer", "memory");
263
264 return t1;
265}
266#define atomic_inc_not_zero(v) atomic_inc_not_zero((v))
Nick Piggin8426e1f2005-11-13 16:07:25 -0800267
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268/*
269 * Atomically test *v and decrement if it is greater than 0.
Robert Jennings434f98c2007-01-17 10:50:20 -0600270 * The function returns the old value of *v minus 1, even if
271 * the atomic variable, v, was not decremented.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 */
273static __inline__ int atomic_dec_if_positive(atomic_t *v)
274{
275 int t;
276
277 __asm__ __volatile__(
Benjamin Herrenschmidtb97021f2011-11-15 17:11:27 +0000278 PPC_ATOMIC_ENTRY_BARRIER
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279"1: lwarx %0,0,%1 # atomic_dec_if_positive\n\
Robert Jennings434f98c2007-01-17 10:50:20 -0600280 cmpwi %0,1\n\
281 addi %0,%0,-1\n\
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282 blt- 2f\n"
283 PPC405_ERR77(0,%1)
284" stwcx. %0,0,%1\n\
285 bne- 1b"
Benjamin Herrenschmidtb97021f2011-11-15 17:11:27 +0000286 PPC_ATOMIC_EXIT_BARRIER
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287 "\n\
Robert Jennings434f98c2007-01-17 10:50:20 -06002882:" : "=&b" (t)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289 : "r" (&v->counter)
290 : "cc", "memory");
291
292 return t;
293}
Shaohua Lie79bee22012-10-08 16:32:18 -0700294#define atomic_dec_if_positive atomic_dec_if_positive
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100296#ifdef __powerpc64__
297
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100298#define ATOMIC64_INIT(i) { (i) }
299
Segher Boessenkool9f0cbea2007-08-11 10:15:30 +1000300static __inline__ long atomic64_read(const atomic64_t *v)
301{
302 long t;
303
304 __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
305
306 return t;
307}
308
309static __inline__ void atomic64_set(atomic64_t *v, long i)
310{
311 __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
312}
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100313
Peter Zijlstraaf095dd2014-03-26 18:11:31 +0100314#define ATOMIC64_OP(op, asm_op) \
315static __inline__ void atomic64_##op(long a, atomic64_t *v) \
316{ \
317 long t; \
318 \
319 __asm__ __volatile__( \
320"1: ldarx %0,0,%3 # atomic64_" #op "\n" \
321 #asm_op " %0,%2,%0\n" \
322" stdcx. %0,0,%3 \n" \
323" bne- 1b\n" \
324 : "=&r" (t), "+m" (v->counter) \
325 : "r" (a), "r" (&v->counter) \
326 : "cc"); \
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100327}
328
Boqun Fengdc536172016-01-06 10:08:25 +0800329#define ATOMIC64_OP_RETURN_RELAXED(op, asm_op) \
330static inline long \
331atomic64_##op##_return_relaxed(long a, atomic64_t *v) \
Peter Zijlstraaf095dd2014-03-26 18:11:31 +0100332{ \
333 long t; \
334 \
335 __asm__ __volatile__( \
Boqun Fengdc536172016-01-06 10:08:25 +0800336"1: ldarx %0,0,%3 # atomic64_" #op "_return_relaxed\n" \
337 #asm_op " %0,%2,%0\n" \
338" stdcx. %0,0,%3\n" \
Peter Zijlstraaf095dd2014-03-26 18:11:31 +0100339" bne- 1b\n" \
Boqun Fengdc536172016-01-06 10:08:25 +0800340 : "=&r" (t), "+m" (v->counter) \
Peter Zijlstraaf095dd2014-03-26 18:11:31 +0100341 : "r" (a), "r" (&v->counter) \
Boqun Fengdc536172016-01-06 10:08:25 +0800342 : "cc"); \
Peter Zijlstraaf095dd2014-03-26 18:11:31 +0100343 \
344 return t; \
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100345}
346
Peter Zijlstraa28cc7b2016-04-18 01:16:05 +0200347#define ATOMIC64_FETCH_OP_RELAXED(op, asm_op) \
348static inline long \
349atomic64_fetch_##op##_relaxed(long a, atomic64_t *v) \
350{ \
351 long res, t; \
352 \
353 __asm__ __volatile__( \
354"1: ldarx %0,0,%4 # atomic64_fetch_" #op "_relaxed\n" \
355 #asm_op " %1,%3,%0\n" \
356" stdcx. %1,0,%4\n" \
357" bne- 1b\n" \
358 : "=&r" (res), "=&r" (t), "+m" (v->counter) \
359 : "r" (a), "r" (&v->counter) \
360 : "cc"); \
361 \
362 return res; \
363}
364
Boqun Fengdc536172016-01-06 10:08:25 +0800365#define ATOMIC64_OPS(op, asm_op) \
366 ATOMIC64_OP(op, asm_op) \
Peter Zijlstraa28cc7b2016-04-18 01:16:05 +0200367 ATOMIC64_OP_RETURN_RELAXED(op, asm_op) \
368 ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
Peter Zijlstraaf095dd2014-03-26 18:11:31 +0100369
370ATOMIC64_OPS(add, add)
371ATOMIC64_OPS(sub, subf)
372
Boqun Fengdc536172016-01-06 10:08:25 +0800373#define atomic64_add_return_relaxed atomic64_add_return_relaxed
374#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
375
Peter Zijlstraa28cc7b2016-04-18 01:16:05 +0200376#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
377#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
378
379#undef ATOMIC64_OPS
380#define ATOMIC64_OPS(op, asm_op) \
381 ATOMIC64_OP(op, asm_op) \
382 ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
383
384ATOMIC64_OPS(and, and)
385ATOMIC64_OPS(or, or)
386ATOMIC64_OPS(xor, xor)
387
388#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
389#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
390#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
391
Boqun Fengdc536172016-01-06 10:08:25 +0800392#undef ATOPIC64_OPS
Peter Zijlstraa28cc7b2016-04-18 01:16:05 +0200393#undef ATOMIC64_FETCH_OP_RELAXED
Boqun Fengdc536172016-01-06 10:08:25 +0800394#undef ATOMIC64_OP_RETURN_RELAXED
Peter Zijlstraaf095dd2014-03-26 18:11:31 +0100395#undef ATOMIC64_OP
396
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100397static __inline__ void atomic64_inc(atomic64_t *v)
398{
399 long t;
400
401 __asm__ __volatile__(
402"1: ldarx %0,0,%2 # atomic64_inc\n\
403 addic %0,%0,1\n\
404 stdcx. %0,0,%2 \n\
405 bne- 1b"
Linus Torvaldse2a3d402006-07-08 15:00:28 -0700406 : "=&r" (t), "+m" (v->counter)
407 : "r" (&v->counter)
Paul Mackerrasefc36242008-11-05 18:39:27 +0000408 : "cc", "xer");
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100409}
Mark Rutland98375592018-06-21 13:13:19 +0100410#define atomic64_inc atomic64_inc
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100411
Boqun Fengdc536172016-01-06 10:08:25 +0800412static __inline__ long atomic64_inc_return_relaxed(atomic64_t *v)
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100413{
414 long t;
415
416 __asm__ __volatile__(
Boqun Fengdc536172016-01-06 10:08:25 +0800417"1: ldarx %0,0,%2 # atomic64_inc_return_relaxed\n"
418" addic %0,%0,1\n"
419" stdcx. %0,0,%2\n"
420" bne- 1b"
421 : "=&r" (t), "+m" (v->counter)
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100422 : "r" (&v->counter)
Boqun Fengdc536172016-01-06 10:08:25 +0800423 : "cc", "xer");
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100424
425 return t;
426}
427
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100428static __inline__ void atomic64_dec(atomic64_t *v)
429{
430 long t;
431
432 __asm__ __volatile__(
433"1: ldarx %0,0,%2 # atomic64_dec\n\
434 addic %0,%0,-1\n\
435 stdcx. %0,0,%2\n\
436 bne- 1b"
Linus Torvaldse2a3d402006-07-08 15:00:28 -0700437 : "=&r" (t), "+m" (v->counter)
438 : "r" (&v->counter)
Paul Mackerrasefc36242008-11-05 18:39:27 +0000439 : "cc", "xer");
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100440}
Mark Rutland98375592018-06-21 13:13:19 +0100441#define atomic64_dec atomic64_dec
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100442
Boqun Fengdc536172016-01-06 10:08:25 +0800443static __inline__ long atomic64_dec_return_relaxed(atomic64_t *v)
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100444{
445 long t;
446
447 __asm__ __volatile__(
Boqun Fengdc536172016-01-06 10:08:25 +0800448"1: ldarx %0,0,%2 # atomic64_dec_return_relaxed\n"
449" addic %0,%0,-1\n"
450" stdcx. %0,0,%2\n"
451" bne- 1b"
452 : "=&r" (t), "+m" (v->counter)
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100453 : "r" (&v->counter)
Boqun Fengdc536172016-01-06 10:08:25 +0800454 : "cc", "xer");
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100455
456 return t;
457}
458
Boqun Fengdc536172016-01-06 10:08:25 +0800459#define atomic64_inc_return_relaxed atomic64_inc_return_relaxed
460#define atomic64_dec_return_relaxed atomic64_dec_return_relaxed
461
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100462/*
463 * Atomically test *v and decrement if it is greater than 0.
464 * The function returns the old value of *v minus 1.
465 */
466static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
467{
468 long t;
469
470 __asm__ __volatile__(
Benjamin Herrenschmidtb97021f2011-11-15 17:11:27 +0000471 PPC_ATOMIC_ENTRY_BARRIER
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100472"1: ldarx %0,0,%1 # atomic64_dec_if_positive\n\
473 addic. %0,%0,-1\n\
474 blt- 2f\n\
475 stdcx. %0,0,%1\n\
476 bne- 1b"
Benjamin Herrenschmidtb97021f2011-11-15 17:11:27 +0000477 PPC_ATOMIC_EXIT_BARRIER
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100478 "\n\
4792:" : "=&r" (t)
480 : "r" (&v->counter)
Paul Mackerrasefc36242008-11-05 18:39:27 +0000481 : "cc", "xer", "memory");
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100482
483 return t;
484}
Mark Rutlandb3a2a052018-06-21 13:13:20 +0100485#define atomic64_dec_if_positive atomic64_dec_if_positive
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100486
Mathieu Desnoyersf46e4772007-05-08 00:34:27 -0700487#define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
Boqun Feng56c08e62015-12-15 22:24:17 +0800488#define atomic64_cmpxchg_relaxed(v, o, n) \
489 cmpxchg_relaxed(&((v)->counter), (o), (n))
490#define atomic64_cmpxchg_acquire(v, o, n) \
491 cmpxchg_acquire(&((v)->counter), (o), (n))
492
Mathieu Desnoyers41806ef2007-01-25 11:15:52 -0500493#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
Boqun Feng26760fc2015-12-15 22:24:16 +0800494#define atomic64_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
Mathieu Desnoyers41806ef2007-01-25 11:15:52 -0500495
496/**
Mark Rutland4f44b4b2018-06-21 13:13:15 +0100497 * atomic64_fetch_add_unless - add unless the number is a given value
Mathieu Desnoyers41806ef2007-01-25 11:15:52 -0500498 * @v: pointer of type atomic64_t
499 * @a: the amount to add to v...
500 * @u: ...unless v is equal to u.
501 *
502 * Atomically adds @a to @v, so long as it was not @u.
Arun Sharmaf24219b2011-07-26 16:09:07 -0700503 * Returns the old value of @v.
Mathieu Desnoyers41806ef2007-01-25 11:15:52 -0500504 */
Mark Rutland4f44b4b2018-06-21 13:13:15 +0100505static __inline__ long atomic64_fetch_add_unless(atomic64_t *v, long a, long u)
Mathieu Desnoyers41806ef2007-01-25 11:15:52 -0500506{
507 long t;
508
509 __asm__ __volatile__ (
Benjamin Herrenschmidtb97021f2011-11-15 17:11:27 +0000510 PPC_ATOMIC_ENTRY_BARRIER
Mark Rutland4f44b4b2018-06-21 13:13:15 +0100511"1: ldarx %0,0,%1 # atomic64_fetch_add_unless\n\
Mathieu Desnoyers41806ef2007-01-25 11:15:52 -0500512 cmpd 0,%0,%3 \n\
Anton Blanchard61e98eb2016-10-03 17:03:03 +1100513 beq 2f \n\
Mathieu Desnoyers41806ef2007-01-25 11:15:52 -0500514 add %0,%2,%0 \n"
515" stdcx. %0,0,%1 \n\
516 bne- 1b \n"
Benjamin Herrenschmidtb97021f2011-11-15 17:11:27 +0000517 PPC_ATOMIC_EXIT_BARRIER
Mathieu Desnoyers41806ef2007-01-25 11:15:52 -0500518" subf %0,%2,%0 \n\
5192:"
520 : "=&r" (t)
521 : "r" (&v->counter), "r" (a), "r" (u)
522 : "cc", "memory");
523
Mark Rutland4f44b4b2018-06-21 13:13:15 +0100524 return t;
Mathieu Desnoyers41806ef2007-01-25 11:15:52 -0500525}
Mark Rutland4f44b4b2018-06-21 13:13:15 +0100526#define atomic64_fetch_add_unless atomic64_fetch_add_unless
Mathieu Desnoyers41806ef2007-01-25 11:15:52 -0500527
Anton Blancharda6cf7ed2012-02-29 21:12:16 +0000528/**
529 * atomic_inc64_not_zero - increment unless the number is zero
530 * @v: pointer of type atomic64_t
531 *
532 * Atomically increments @v by 1, so long as @v is non-zero.
533 * Returns non-zero if @v was non-zero, and zero otherwise.
534 */
Michael Ellerman01e6a612017-07-11 22:10:54 +1000535static __inline__ int atomic64_inc_not_zero(atomic64_t *v)
Anton Blancharda6cf7ed2012-02-29 21:12:16 +0000536{
537 long t1, t2;
538
539 __asm__ __volatile__ (
540 PPC_ATOMIC_ENTRY_BARRIER
541"1: ldarx %0,0,%2 # atomic64_inc_not_zero\n\
542 cmpdi 0,%0,0\n\
543 beq- 2f\n\
544 addic %1,%0,1\n\
545 stdcx. %1,0,%2\n\
546 bne- 1b\n"
547 PPC_ATOMIC_EXIT_BARRIER
548 "\n\
5492:"
550 : "=&r" (t1), "=&r" (t2)
551 : "r" (&v->counter)
552 : "cc", "xer", "memory");
553
Michael Ellerman01e6a612017-07-11 22:10:54 +1000554 return t1 != 0;
Anton Blancharda6cf7ed2012-02-29 21:12:16 +0000555}
Mark Rutlandbef82822018-06-21 13:13:08 +0100556#define atomic64_inc_not_zero(v) atomic64_inc_not_zero((v))
Mathieu Desnoyers41806ef2007-01-25 11:15:52 -0500557
Stephen Rothwell06a98db2005-11-10 15:51:14 +1100558#endif /* __powerpc64__ */
559
Linus Torvalds1da177e2005-04-16 15:20:36 -0700560#endif /* __KERNEL__ */
Becky Brucefeaf7cf2005-09-22 14:20:04 -0500561#endif /* _ASM_POWERPC_ATOMIC_H_ */