blob: 50c3ac5f0809d3bd1b815211230555fb625ee667 [file] [log] [blame]
Thomas Gleixnerd2912cb2019-06-04 10:11:33 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
Russell King4baa9922008-08-02 10:55:55 +01003 * arch/arm/include/asm/atomic.h
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
5 * Copyright (C) 1996 Russell King.
6 * Copyright (C) 2002 Deep Blue Solutions Ltd.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 */
8#ifndef __ASM_ARM_ATOMIC_H
9#define __ASM_ARM_ATOMIC_H
10
Russell King8dc39b82005-11-16 17:23:57 +000011#include <linux/compiler.h>
Will Deaconf38d9992013-07-04 11:43:18 +010012#include <linux/prefetch.h>
Matthew Wilcoxea4354672009-01-06 14:40:39 -080013#include <linux/types.h>
David Howells9f97da72012-03-28 18:30:01 +010014#include <linux/irqflags.h>
15#include <asm/barrier.h>
16#include <asm/cmpxchg.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#define ATOMIC_INIT(i) { (i) }
19
20#ifdef __KERNEL__
21
Catalin Marinas200b8122009-09-18 23:27:05 +010022/*
23 * On ARM, ordinary assignment (str instruction) doesn't clear the local
24 * strex/ldrex monitor on some implementations. The reason we can use it for
25 * atomic_set() is the clrex or dummy strex done on every exception return.
26 */
Peter Zijlstra62e8a322015-09-18 11:13:10 +020027#define atomic_read(v) READ_ONCE((v)->counter)
28#define atomic_set(v,i) WRITE_ONCE(((v)->counter), (i))
Linus Torvalds1da177e2005-04-16 15:20:36 -070029
30#if __LINUX_ARM_ARCH__ >= 6
31
32/*
33 * ARMv6 UP and SMP safe atomic ops. We use load exclusive and
34 * store exclusive to ensure that these are atomic. We may loop
Catalin Marinas200b8122009-09-18 23:27:05 +010035 * to ensure that the update happens.
Linus Torvalds1da177e2005-04-16 15:20:36 -070036 */
Russell Kingbac4e962009-05-25 20:58:00 +010037
Peter Zijlstraaee9a552014-03-23 16:38:18 +010038#define ATOMIC_OP(op, c_op, asm_op) \
39static inline void atomic_##op(int i, atomic_t *v) \
40{ \
41 unsigned long tmp; \
42 int result; \
43 \
44 prefetchw(&v->counter); \
45 __asm__ __volatile__("@ atomic_" #op "\n" \
46"1: ldrex %0, [%3]\n" \
47" " #asm_op " %0, %0, %4\n" \
48" strex %1, %0, [%3]\n" \
49" teq %1, #0\n" \
50" bne 1b" \
51 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
52 : "r" (&v->counter), "Ir" (i) \
53 : "cc"); \
54} \
Russell Kingbac4e962009-05-25 20:58:00 +010055
Peter Zijlstraaee9a552014-03-23 16:38:18 +010056#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
Will Deacon0ca326d2015-08-06 17:54:44 +010057static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \
Peter Zijlstraaee9a552014-03-23 16:38:18 +010058{ \
59 unsigned long tmp; \
60 int result; \
61 \
Peter Zijlstraaee9a552014-03-23 16:38:18 +010062 prefetchw(&v->counter); \
63 \
64 __asm__ __volatile__("@ atomic_" #op "_return\n" \
65"1: ldrex %0, [%3]\n" \
66" " #asm_op " %0, %0, %4\n" \
67" strex %1, %0, [%3]\n" \
68" teq %1, #0\n" \
69" bne 1b" \
70 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
71 : "r" (&v->counter), "Ir" (i) \
72 : "cc"); \
73 \
Peter Zijlstraaee9a552014-03-23 16:38:18 +010074 return result; \
Linus Torvalds1da177e2005-04-16 15:20:36 -070075}
76
Peter Zijlstra6da068c2016-04-18 01:10:52 +020077#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
78static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
79{ \
80 unsigned long tmp; \
81 int result, val; \
82 \
83 prefetchw(&v->counter); \
84 \
85 __asm__ __volatile__("@ atomic_fetch_" #op "\n" \
86"1: ldrex %0, [%4]\n" \
87" " #asm_op " %1, %0, %5\n" \
88" strex %2, %1, [%4]\n" \
89" teq %2, #0\n" \
90" bne 1b" \
91 : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Qo" (v->counter) \
92 : "r" (&v->counter), "Ir" (i) \
93 : "cc"); \
94 \
95 return result; \
96}
97
Will Deacon0ca326d2015-08-06 17:54:44 +010098#define atomic_add_return_relaxed atomic_add_return_relaxed
99#define atomic_sub_return_relaxed atomic_sub_return_relaxed
Peter Zijlstra6da068c2016-04-18 01:10:52 +0200100#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
101#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
102
103#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
104#define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed
105#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
106#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
Will Deacon0ca326d2015-08-06 17:54:44 +0100107
108static inline int atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new)
Nick Piggin4a6dae62005-11-13 16:07:24 -0800109{
Chen Gang4dcc1cf2013-10-26 15:07:25 +0100110 int oldval;
111 unsigned long res;
Nick Piggin4a6dae62005-11-13 16:07:24 -0800112
Will Deaconc32ffce2014-02-21 17:01:48 +0100113 prefetchw(&ptr->counter);
Russell Kingbac4e962009-05-25 20:58:00 +0100114
Nick Piggin4a6dae62005-11-13 16:07:24 -0800115 do {
116 __asm__ __volatile__("@ atomic_cmpxchg\n"
Will Deacon398aa662010-07-08 10:59:16 +0100117 "ldrex %1, [%3]\n"
Nicolas Pitrea7d06832005-11-16 15:05:11 +0000118 "mov %0, #0\n"
Will Deacon398aa662010-07-08 10:59:16 +0100119 "teq %1, %4\n"
120 "strexeq %0, %5, [%3]\n"
121 : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
Nick Piggin4a6dae62005-11-13 16:07:24 -0800122 : "r" (&ptr->counter), "Ir" (old), "r" (new)
123 : "cc");
124 } while (res);
125
126 return oldval;
127}
Will Deacon0ca326d2015-08-06 17:54:44 +0100128#define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed
Nick Piggin4a6dae62005-11-13 16:07:24 -0800129
Mark Rutlandbfc18e32018-06-21 13:13:04 +0100130static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
Will Deacondb38ee82014-02-21 17:01:48 +0100131{
132 int oldval, newval;
133 unsigned long tmp;
134
135 smp_mb();
136 prefetchw(&v->counter);
137
138 __asm__ __volatile__ ("@ atomic_add_unless\n"
139"1: ldrex %0, [%4]\n"
140" teq %0, %5\n"
141" beq 2f\n"
142" add %1, %0, %6\n"
143" strex %2, %1, [%4]\n"
144" teq %2, #0\n"
145" bne 1b\n"
146"2:"
147 : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
148 : "r" (&v->counter), "r" (u), "r" (a)
149 : "cc");
150
151 if (oldval != u)
152 smp_mb();
153
154 return oldval;
155}
Mark Rutlandeccc2da2018-06-21 13:13:09 +0100156#define atomic_fetch_add_unless atomic_fetch_add_unless
Will Deacondb38ee82014-02-21 17:01:48 +0100157
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158#else /* ARM_ARCH_6 */
159
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160#ifdef CONFIG_SMP
161#error SMP not supported on pre-ARMv6 CPUs
162#endif
163
Peter Zijlstraaee9a552014-03-23 16:38:18 +0100164#define ATOMIC_OP(op, c_op, asm_op) \
165static inline void atomic_##op(int i, atomic_t *v) \
166{ \
167 unsigned long flags; \
168 \
169 raw_local_irq_save(flags); \
170 v->counter c_op i; \
171 raw_local_irq_restore(flags); \
172} \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173
Peter Zijlstraaee9a552014-03-23 16:38:18 +0100174#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
175static inline int atomic_##op##_return(int i, atomic_t *v) \
176{ \
177 unsigned long flags; \
178 int val; \
179 \
180 raw_local_irq_save(flags); \
181 v->counter c_op i; \
182 val = v->counter; \
183 raw_local_irq_restore(flags); \
184 \
185 return val; \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187
Peter Zijlstra6da068c2016-04-18 01:10:52 +0200188#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
189static inline int atomic_fetch_##op(int i, atomic_t *v) \
190{ \
191 unsigned long flags; \
192 int val; \
193 \
194 raw_local_irq_save(flags); \
195 val = v->counter; \
196 v->counter c_op i; \
197 raw_local_irq_restore(flags); \
198 \
199 return val; \
200}
201
Nick Piggin4a6dae62005-11-13 16:07:24 -0800202static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
203{
204 int ret;
205 unsigned long flags;
206
Lennert Buytenhek8dd5c842006-09-16 10:47:18 +0100207 raw_local_irq_save(flags);
Nick Piggin4a6dae62005-11-13 16:07:24 -0800208 ret = v->counter;
209 if (likely(ret == old))
210 v->counter = new;
Lennert Buytenhek8dd5c842006-09-16 10:47:18 +0100211 raw_local_irq_restore(flags);
Nick Piggin4a6dae62005-11-13 16:07:24 -0800212
213 return ret;
214}
215
Mark Rutland7cc7eaa2018-06-21 13:13:21 +0100216#define atomic_fetch_andnot atomic_fetch_andnot
217
Will Deacondb38ee82014-02-21 17:01:48 +0100218#endif /* __LINUX_ARM_ARCH__ */
219
Peter Zijlstraaee9a552014-03-23 16:38:18 +0100220#define ATOMIC_OPS(op, c_op, asm_op) \
221 ATOMIC_OP(op, c_op, asm_op) \
Peter Zijlstra6da068c2016-04-18 01:10:52 +0200222 ATOMIC_OP_RETURN(op, c_op, asm_op) \
223 ATOMIC_FETCH_OP(op, c_op, asm_op)
Peter Zijlstraaee9a552014-03-23 16:38:18 +0100224
225ATOMIC_OPS(add, +=, add)
226ATOMIC_OPS(sub, -=, sub)
227
Peter Zijlstra12589792014-04-23 20:04:39 +0200228#define atomic_andnot atomic_andnot
229
Peter Zijlstra6da068c2016-04-18 01:10:52 +0200230#undef ATOMIC_OPS
231#define ATOMIC_OPS(op, c_op, asm_op) \
232 ATOMIC_OP(op, c_op, asm_op) \
233 ATOMIC_FETCH_OP(op, c_op, asm_op)
234
235ATOMIC_OPS(and, &=, and)
236ATOMIC_OPS(andnot, &= ~, bic)
237ATOMIC_OPS(or, |=, orr)
238ATOMIC_OPS(xor, ^=, eor)
Peter Zijlstra12589792014-04-23 20:04:39 +0200239
Peter Zijlstraaee9a552014-03-23 16:38:18 +0100240#undef ATOMIC_OPS
Peter Zijlstra6da068c2016-04-18 01:10:52 +0200241#undef ATOMIC_FETCH_OP
Peter Zijlstraaee9a552014-03-23 16:38:18 +0100242#undef ATOMIC_OP_RETURN
243#undef ATOMIC_OP
244
Will Deacondb38ee82014-02-21 17:01:48 +0100245#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
246
Will Deacon24b44a62010-01-20 19:05:07 +0100247#ifndef CONFIG_GENERIC_ATOMIC64
248typedef struct {
Chen Gang237f1232013-10-26 15:07:04 +0100249 long long counter;
Will Deacon24b44a62010-01-20 19:05:07 +0100250} atomic64_t;
251
252#define ATOMIC64_INIT(i) { (i) }
253
Will Deacon4fd75912013-03-28 11:25:03 +0100254#ifdef CONFIG_ARM_LPAE
Chen Gang237f1232013-10-26 15:07:04 +0100255static inline long long atomic64_read(const atomic64_t *v)
Will Deacon4fd75912013-03-28 11:25:03 +0100256{
Chen Gang237f1232013-10-26 15:07:04 +0100257 long long result;
Will Deacon4fd75912013-03-28 11:25:03 +0100258
259 __asm__ __volatile__("@ atomic64_read\n"
260" ldrd %0, %H0, [%1]"
261 : "=&r" (result)
262 : "r" (&v->counter), "Qo" (v->counter)
263 );
264
265 return result;
266}
267
Chen Gang237f1232013-10-26 15:07:04 +0100268static inline void atomic64_set(atomic64_t *v, long long i)
Will Deacon4fd75912013-03-28 11:25:03 +0100269{
270 __asm__ __volatile__("@ atomic64_set\n"
271" strd %2, %H2, [%1]"
272 : "=Qo" (v->counter)
273 : "r" (&v->counter), "r" (i)
274 );
275}
276#else
Chen Gang237f1232013-10-26 15:07:04 +0100277static inline long long atomic64_read(const atomic64_t *v)
Will Deacon24b44a62010-01-20 19:05:07 +0100278{
Chen Gang237f1232013-10-26 15:07:04 +0100279 long long result;
Will Deacon24b44a62010-01-20 19:05:07 +0100280
281 __asm__ __volatile__("@ atomic64_read\n"
282" ldrexd %0, %H0, [%1]"
283 : "=&r" (result)
Will Deacon398aa662010-07-08 10:59:16 +0100284 : "r" (&v->counter), "Qo" (v->counter)
Will Deacon24b44a62010-01-20 19:05:07 +0100285 );
286
287 return result;
288}
289
Chen Gang237f1232013-10-26 15:07:04 +0100290static inline void atomic64_set(atomic64_t *v, long long i)
Will Deacon24b44a62010-01-20 19:05:07 +0100291{
Chen Gang237f1232013-10-26 15:07:04 +0100292 long long tmp;
Will Deacon24b44a62010-01-20 19:05:07 +0100293
Will Deaconf38d9992013-07-04 11:43:18 +0100294 prefetchw(&v->counter);
Will Deacon24b44a62010-01-20 19:05:07 +0100295 __asm__ __volatile__("@ atomic64_set\n"
Will Deacon398aa662010-07-08 10:59:16 +0100296"1: ldrexd %0, %H0, [%2]\n"
297" strexd %0, %3, %H3, [%2]\n"
Will Deacon24b44a62010-01-20 19:05:07 +0100298" teq %0, #0\n"
299" bne 1b"
Will Deacon398aa662010-07-08 10:59:16 +0100300 : "=&r" (tmp), "=Qo" (v->counter)
Will Deacon24b44a62010-01-20 19:05:07 +0100301 : "r" (&v->counter), "r" (i)
302 : "cc");
303}
Will Deacon4fd75912013-03-28 11:25:03 +0100304#endif
Will Deacon24b44a62010-01-20 19:05:07 +0100305
Peter Zijlstraaee9a552014-03-23 16:38:18 +0100306#define ATOMIC64_OP(op, op1, op2) \
307static inline void atomic64_##op(long long i, atomic64_t *v) \
308{ \
309 long long result; \
310 unsigned long tmp; \
311 \
312 prefetchw(&v->counter); \
313 __asm__ __volatile__("@ atomic64_" #op "\n" \
314"1: ldrexd %0, %H0, [%3]\n" \
315" " #op1 " %Q0, %Q0, %Q4\n" \
316" " #op2 " %R0, %R0, %R4\n" \
317" strexd %1, %0, %H0, [%3]\n" \
318" teq %1, #0\n" \
319" bne 1b" \
320 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
321 : "r" (&v->counter), "r" (i) \
322 : "cc"); \
323} \
Will Deacon24b44a62010-01-20 19:05:07 +0100324
Peter Zijlstraaee9a552014-03-23 16:38:18 +0100325#define ATOMIC64_OP_RETURN(op, op1, op2) \
Will Deacon0ca326d2015-08-06 17:54:44 +0100326static inline long long \
327atomic64_##op##_return_relaxed(long long i, atomic64_t *v) \
Peter Zijlstraaee9a552014-03-23 16:38:18 +0100328{ \
329 long long result; \
330 unsigned long tmp; \
331 \
Peter Zijlstraaee9a552014-03-23 16:38:18 +0100332 prefetchw(&v->counter); \
333 \
334 __asm__ __volatile__("@ atomic64_" #op "_return\n" \
335"1: ldrexd %0, %H0, [%3]\n" \
336" " #op1 " %Q0, %Q0, %Q4\n" \
337" " #op2 " %R0, %R0, %R4\n" \
338" strexd %1, %0, %H0, [%3]\n" \
339" teq %1, #0\n" \
340" bne 1b" \
341 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
342 : "r" (&v->counter), "r" (i) \
343 : "cc"); \
344 \
Peter Zijlstraaee9a552014-03-23 16:38:18 +0100345 return result; \
Will Deacon24b44a62010-01-20 19:05:07 +0100346}
347
Peter Zijlstra6da068c2016-04-18 01:10:52 +0200348#define ATOMIC64_FETCH_OP(op, op1, op2) \
349static inline long long \
350atomic64_fetch_##op##_relaxed(long long i, atomic64_t *v) \
351{ \
352 long long result, val; \
353 unsigned long tmp; \
354 \
355 prefetchw(&v->counter); \
356 \
357 __asm__ __volatile__("@ atomic64_fetch_" #op "\n" \
358"1: ldrexd %0, %H0, [%4]\n" \
359" " #op1 " %Q1, %Q0, %Q5\n" \
360" " #op2 " %R1, %R0, %R5\n" \
361" strexd %2, %1, %H1, [%4]\n" \
362" teq %2, #0\n" \
363" bne 1b" \
364 : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Qo" (v->counter) \
365 : "r" (&v->counter), "r" (i) \
366 : "cc"); \
367 \
368 return result; \
369}
370
Peter Zijlstraaee9a552014-03-23 16:38:18 +0100371#define ATOMIC64_OPS(op, op1, op2) \
372 ATOMIC64_OP(op, op1, op2) \
Peter Zijlstra6da068c2016-04-18 01:10:52 +0200373 ATOMIC64_OP_RETURN(op, op1, op2) \
374 ATOMIC64_FETCH_OP(op, op1, op2)
Will Deacon24b44a62010-01-20 19:05:07 +0100375
Peter Zijlstraaee9a552014-03-23 16:38:18 +0100376ATOMIC64_OPS(add, adds, adc)
377ATOMIC64_OPS(sub, subs, sbc)
Will Deacon24b44a62010-01-20 19:05:07 +0100378
Will Deacon0ca326d2015-08-06 17:54:44 +0100379#define atomic64_add_return_relaxed atomic64_add_return_relaxed
380#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
Peter Zijlstra6da068c2016-04-18 01:10:52 +0200381#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
382#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
383
384#undef ATOMIC64_OPS
385#define ATOMIC64_OPS(op, op1, op2) \
386 ATOMIC64_OP(op, op1, op2) \
387 ATOMIC64_FETCH_OP(op, op1, op2)
Will Deacon0ca326d2015-08-06 17:54:44 +0100388
Peter Zijlstra12589792014-04-23 20:04:39 +0200389#define atomic64_andnot atomic64_andnot
390
Peter Zijlstra6da068c2016-04-18 01:10:52 +0200391ATOMIC64_OPS(and, and, and)
392ATOMIC64_OPS(andnot, bic, bic)
393ATOMIC64_OPS(or, orr, orr)
394ATOMIC64_OPS(xor, eor, eor)
395
396#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
397#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed
398#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
399#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
Peter Zijlstra12589792014-04-23 20:04:39 +0200400
Peter Zijlstraaee9a552014-03-23 16:38:18 +0100401#undef ATOMIC64_OPS
Peter Zijlstra6da068c2016-04-18 01:10:52 +0200402#undef ATOMIC64_FETCH_OP
Peter Zijlstraaee9a552014-03-23 16:38:18 +0100403#undef ATOMIC64_OP_RETURN
404#undef ATOMIC64_OP
Will Deacon24b44a62010-01-20 19:05:07 +0100405
Will Deacon0ca326d2015-08-06 17:54:44 +0100406static inline long long
407atomic64_cmpxchg_relaxed(atomic64_t *ptr, long long old, long long new)
Will Deacon24b44a62010-01-20 19:05:07 +0100408{
Chen Gang237f1232013-10-26 15:07:04 +0100409 long long oldval;
Will Deacon24b44a62010-01-20 19:05:07 +0100410 unsigned long res;
411
Will Deaconc32ffce2014-02-21 17:01:48 +0100412 prefetchw(&ptr->counter);
Will Deacon24b44a62010-01-20 19:05:07 +0100413
414 do {
415 __asm__ __volatile__("@ atomic64_cmpxchg\n"
Will Deacon398aa662010-07-08 10:59:16 +0100416 "ldrexd %1, %H1, [%3]\n"
Will Deacon24b44a62010-01-20 19:05:07 +0100417 "mov %0, #0\n"
Will Deacon398aa662010-07-08 10:59:16 +0100418 "teq %1, %4\n"
419 "teqeq %H1, %H4\n"
420 "strexdeq %0, %5, %H5, [%3]"
421 : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
Will Deacon24b44a62010-01-20 19:05:07 +0100422 : "r" (&ptr->counter), "r" (old), "r" (new)
423 : "cc");
424 } while (res);
425
Will Deacon24b44a62010-01-20 19:05:07 +0100426 return oldval;
427}
Will Deacon0ca326d2015-08-06 17:54:44 +0100428#define atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed
Will Deacon24b44a62010-01-20 19:05:07 +0100429
Will Deacon0ca326d2015-08-06 17:54:44 +0100430static inline long long atomic64_xchg_relaxed(atomic64_t *ptr, long long new)
Will Deacon24b44a62010-01-20 19:05:07 +0100431{
Chen Gang237f1232013-10-26 15:07:04 +0100432 long long result;
Will Deacon24b44a62010-01-20 19:05:07 +0100433 unsigned long tmp;
434
Will Deaconc32ffce2014-02-21 17:01:48 +0100435 prefetchw(&ptr->counter);
Will Deacon24b44a62010-01-20 19:05:07 +0100436
437 __asm__ __volatile__("@ atomic64_xchg\n"
Will Deacon398aa662010-07-08 10:59:16 +0100438"1: ldrexd %0, %H0, [%3]\n"
439" strexd %1, %4, %H4, [%3]\n"
Will Deacon24b44a62010-01-20 19:05:07 +0100440" teq %1, #0\n"
441" bne 1b"
Will Deacon398aa662010-07-08 10:59:16 +0100442 : "=&r" (result), "=&r" (tmp), "+Qo" (ptr->counter)
Will Deacon24b44a62010-01-20 19:05:07 +0100443 : "r" (&ptr->counter), "r" (new)
444 : "cc");
445
Will Deacon24b44a62010-01-20 19:05:07 +0100446 return result;
447}
Will Deacon0ca326d2015-08-06 17:54:44 +0100448#define atomic64_xchg_relaxed atomic64_xchg_relaxed
Will Deacon24b44a62010-01-20 19:05:07 +0100449
Chen Gang237f1232013-10-26 15:07:04 +0100450static inline long long atomic64_dec_if_positive(atomic64_t *v)
Will Deacon24b44a62010-01-20 19:05:07 +0100451{
Chen Gang237f1232013-10-26 15:07:04 +0100452 long long result;
Will Deacon24b44a62010-01-20 19:05:07 +0100453 unsigned long tmp;
454
455 smp_mb();
Will Deaconc32ffce2014-02-21 17:01:48 +0100456 prefetchw(&v->counter);
Will Deacon24b44a62010-01-20 19:05:07 +0100457
458 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
Will Deacon398aa662010-07-08 10:59:16 +0100459"1: ldrexd %0, %H0, [%3]\n"
Victor Kamensky2245f922013-07-26 09:28:53 -0700460" subs %Q0, %Q0, #1\n"
461" sbc %R0, %R0, #0\n"
462" teq %R0, #0\n"
Will Deacon24b44a62010-01-20 19:05:07 +0100463" bmi 2f\n"
Will Deacon398aa662010-07-08 10:59:16 +0100464" strexd %1, %0, %H0, [%3]\n"
Will Deacon24b44a62010-01-20 19:05:07 +0100465" teq %1, #0\n"
466" bne 1b\n"
467"2:"
Will Deacon398aa662010-07-08 10:59:16 +0100468 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
Will Deacon24b44a62010-01-20 19:05:07 +0100469 : "r" (&v->counter)
470 : "cc");
471
472 smp_mb();
473
474 return result;
475}
Mark Rutlandb3a2a052018-06-21 13:13:20 +0100476#define atomic64_dec_if_positive atomic64_dec_if_positive
Will Deacon24b44a62010-01-20 19:05:07 +0100477
Mark Rutlandfee8ca92018-06-21 13:13:14 +0100478static inline long long atomic64_fetch_add_unless(atomic64_t *v, long long a,
479 long long u)
Will Deacon24b44a62010-01-20 19:05:07 +0100480{
Mark Rutlandfee8ca92018-06-21 13:13:14 +0100481 long long oldval, newval;
Will Deacon24b44a62010-01-20 19:05:07 +0100482 unsigned long tmp;
Will Deacon24b44a62010-01-20 19:05:07 +0100483
484 smp_mb();
Will Deaconc32ffce2014-02-21 17:01:48 +0100485 prefetchw(&v->counter);
Will Deacon24b44a62010-01-20 19:05:07 +0100486
487 __asm__ __volatile__("@ atomic64_add_unless\n"
Will Deacon398aa662010-07-08 10:59:16 +0100488"1: ldrexd %0, %H0, [%4]\n"
489" teq %0, %5\n"
490" teqeq %H0, %H5\n"
Will Deacon24b44a62010-01-20 19:05:07 +0100491" beq 2f\n"
Mark Rutlandfee8ca92018-06-21 13:13:14 +0100492" adds %Q1, %Q0, %Q6\n"
493" adc %R1, %R0, %R6\n"
494" strexd %2, %1, %H1, [%4]\n"
Will Deacon24b44a62010-01-20 19:05:07 +0100495" teq %2, #0\n"
496" bne 1b\n"
497"2:"
Mark Rutlandfee8ca92018-06-21 13:13:14 +0100498 : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
Will Deacon24b44a62010-01-20 19:05:07 +0100499 : "r" (&v->counter), "r" (u), "r" (a)
500 : "cc");
501
Mark Rutlandfee8ca92018-06-21 13:13:14 +0100502 if (oldval != u)
Will Deacon24b44a62010-01-20 19:05:07 +0100503 smp_mb();
504
Mark Rutlandfee8ca92018-06-21 13:13:14 +0100505 return oldval;
Will Deacon24b44a62010-01-20 19:05:07 +0100506}
Mark Rutlandfee8ca92018-06-21 13:13:14 +0100507#define atomic64_fetch_add_unless atomic64_fetch_add_unless
Will Deacon24b44a62010-01-20 19:05:07 +0100508
Arun Sharma78477772011-07-26 16:09:08 -0700509#endif /* !CONFIG_GENERIC_ATOMIC64 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700510#endif
511#endif