blob: f74756641410ecf6a4b261117fc1588b362e7fbe [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Russell King4baa9922008-08-02 10:55:55 +01002 * arch/arm/include/asm/atomic.h
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
4 * Copyright (C) 1996 Russell King.
5 * Copyright (C) 2002 Deep Blue Solutions Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#ifndef __ASM_ARM_ATOMIC_H
12#define __ASM_ARM_ATOMIC_H
13
Russell King8dc39b82005-11-16 17:23:57 +000014#include <linux/compiler.h>
Will Deaconf38d9992013-07-04 11:43:18 +010015#include <linux/prefetch.h>
Matthew Wilcoxea4354672009-01-06 14:40:39 -080016#include <linux/types.h>
David Howells9f97da72012-03-28 18:30:01 +010017#include <linux/irqflags.h>
18#include <asm/barrier.h>
19#include <asm/cmpxchg.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#define ATOMIC_INIT(i) { (i) }
22
23#ifdef __KERNEL__
24
Catalin Marinas200b8122009-09-18 23:27:05 +010025/*
26 * On ARM, ordinary assignment (str instruction) doesn't clear the local
27 * strex/ldrex monitor on some implementations. The reason we can use it for
28 * atomic_set() is the clrex or dummy strex done on every exception return.
29 */
Peter Zijlstra62e8a322015-09-18 11:13:10 +020030#define atomic_read(v) READ_ONCE((v)->counter)
31#define atomic_set(v,i) WRITE_ONCE(((v)->counter), (i))
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
33#if __LINUX_ARM_ARCH__ >= 6
34
35/*
36 * ARMv6 UP and SMP safe atomic ops. We use load exclusive and
37 * store exclusive to ensure that these are atomic. We may loop
Catalin Marinas200b8122009-09-18 23:27:05 +010038 * to ensure that the update happens.
Linus Torvalds1da177e2005-04-16 15:20:36 -070039 */
Russell Kingbac4e962009-05-25 20:58:00 +010040
Peter Zijlstraaee9a552014-03-23 16:38:18 +010041#define ATOMIC_OP(op, c_op, asm_op) \
42static inline void atomic_##op(int i, atomic_t *v) \
43{ \
44 unsigned long tmp; \
45 int result; \
46 \
47 prefetchw(&v->counter); \
48 __asm__ __volatile__("@ atomic_" #op "\n" \
49"1: ldrex %0, [%3]\n" \
50" " #asm_op " %0, %0, %4\n" \
51" strex %1, %0, [%3]\n" \
52" teq %1, #0\n" \
53" bne 1b" \
54 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
55 : "r" (&v->counter), "Ir" (i) \
56 : "cc"); \
57} \
Russell Kingbac4e962009-05-25 20:58:00 +010058
Peter Zijlstraaee9a552014-03-23 16:38:18 +010059#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
Will Deacon0ca326d2015-08-06 17:54:44 +010060static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \
Peter Zijlstraaee9a552014-03-23 16:38:18 +010061{ \
62 unsigned long tmp; \
63 int result; \
64 \
Peter Zijlstraaee9a552014-03-23 16:38:18 +010065 prefetchw(&v->counter); \
66 \
67 __asm__ __volatile__("@ atomic_" #op "_return\n" \
68"1: ldrex %0, [%3]\n" \
69" " #asm_op " %0, %0, %4\n" \
70" strex %1, %0, [%3]\n" \
71" teq %1, #0\n" \
72" bne 1b" \
73 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
74 : "r" (&v->counter), "Ir" (i) \
75 : "cc"); \
76 \
Peter Zijlstraaee9a552014-03-23 16:38:18 +010077 return result; \
Linus Torvalds1da177e2005-04-16 15:20:36 -070078}
79
Peter Zijlstra6da068c2016-04-18 01:10:52 +020080#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
81static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
82{ \
83 unsigned long tmp; \
84 int result, val; \
85 \
86 prefetchw(&v->counter); \
87 \
88 __asm__ __volatile__("@ atomic_fetch_" #op "\n" \
89"1: ldrex %0, [%4]\n" \
90" " #asm_op " %1, %0, %5\n" \
91" strex %2, %1, [%4]\n" \
92" teq %2, #0\n" \
93" bne 1b" \
94 : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Qo" (v->counter) \
95 : "r" (&v->counter), "Ir" (i) \
96 : "cc"); \
97 \
98 return result; \
99}
100
Will Deacon0ca326d2015-08-06 17:54:44 +0100101#define atomic_add_return_relaxed atomic_add_return_relaxed
102#define atomic_sub_return_relaxed atomic_sub_return_relaxed
Peter Zijlstra6da068c2016-04-18 01:10:52 +0200103#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
104#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
105
106#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
107#define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed
108#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
109#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
Will Deacon0ca326d2015-08-06 17:54:44 +0100110
111static inline int atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new)
Nick Piggin4a6dae62005-11-13 16:07:24 -0800112{
Chen Gang4dcc1cf2013-10-26 15:07:25 +0100113 int oldval;
114 unsigned long res;
Nick Piggin4a6dae62005-11-13 16:07:24 -0800115
Will Deaconc32ffce2014-02-21 17:01:48 +0100116 prefetchw(&ptr->counter);
Russell Kingbac4e962009-05-25 20:58:00 +0100117
Nick Piggin4a6dae62005-11-13 16:07:24 -0800118 do {
119 __asm__ __volatile__("@ atomic_cmpxchg\n"
Will Deacon398aa662010-07-08 10:59:16 +0100120 "ldrex %1, [%3]\n"
Nicolas Pitrea7d06832005-11-16 15:05:11 +0000121 "mov %0, #0\n"
Will Deacon398aa662010-07-08 10:59:16 +0100122 "teq %1, %4\n"
123 "strexeq %0, %5, [%3]\n"
124 : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
Nick Piggin4a6dae62005-11-13 16:07:24 -0800125 : "r" (&ptr->counter), "Ir" (old), "r" (new)
126 : "cc");
127 } while (res);
128
129 return oldval;
130}
Will Deacon0ca326d2015-08-06 17:54:44 +0100131#define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed
Nick Piggin4a6dae62005-11-13 16:07:24 -0800132
Mark Rutlandbfc18e32018-06-21 13:13:04 +0100133static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
Will Deacondb38ee82014-02-21 17:01:48 +0100134{
135 int oldval, newval;
136 unsigned long tmp;
137
138 smp_mb();
139 prefetchw(&v->counter);
140
141 __asm__ __volatile__ ("@ atomic_add_unless\n"
142"1: ldrex %0, [%4]\n"
143" teq %0, %5\n"
144" beq 2f\n"
145" add %1, %0, %6\n"
146" strex %2, %1, [%4]\n"
147" teq %2, #0\n"
148" bne 1b\n"
149"2:"
150 : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
151 : "r" (&v->counter), "r" (u), "r" (a)
152 : "cc");
153
154 if (oldval != u)
155 smp_mb();
156
157 return oldval;
158}
Mark Rutlandeccc2da2018-06-21 13:13:09 +0100159#define atomic_fetch_add_unless atomic_fetch_add_unless
Will Deacondb38ee82014-02-21 17:01:48 +0100160
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161#else /* ARM_ARCH_6 */
162
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163#ifdef CONFIG_SMP
164#error SMP not supported on pre-ARMv6 CPUs
165#endif
166
Peter Zijlstraaee9a552014-03-23 16:38:18 +0100167#define ATOMIC_OP(op, c_op, asm_op) \
168static inline void atomic_##op(int i, atomic_t *v) \
169{ \
170 unsigned long flags; \
171 \
172 raw_local_irq_save(flags); \
173 v->counter c_op i; \
174 raw_local_irq_restore(flags); \
175} \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176
Peter Zijlstraaee9a552014-03-23 16:38:18 +0100177#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
178static inline int atomic_##op##_return(int i, atomic_t *v) \
179{ \
180 unsigned long flags; \
181 int val; \
182 \
183 raw_local_irq_save(flags); \
184 v->counter c_op i; \
185 val = v->counter; \
186 raw_local_irq_restore(flags); \
187 \
188 return val; \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190
Peter Zijlstra6da068c2016-04-18 01:10:52 +0200191#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
192static inline int atomic_fetch_##op(int i, atomic_t *v) \
193{ \
194 unsigned long flags; \
195 int val; \
196 \
197 raw_local_irq_save(flags); \
198 val = v->counter; \
199 v->counter c_op i; \
200 raw_local_irq_restore(flags); \
201 \
202 return val; \
203}
204
Nick Piggin4a6dae62005-11-13 16:07:24 -0800205static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
206{
207 int ret;
208 unsigned long flags;
209
Lennert Buytenhek8dd5c842006-09-16 10:47:18 +0100210 raw_local_irq_save(flags);
Nick Piggin4a6dae62005-11-13 16:07:24 -0800211 ret = v->counter;
212 if (likely(ret == old))
213 v->counter = new;
Lennert Buytenhek8dd5c842006-09-16 10:47:18 +0100214 raw_local_irq_restore(flags);
Nick Piggin4a6dae62005-11-13 16:07:24 -0800215
216 return ret;
217}
218
Mark Rutland7cc7eaa2018-06-21 13:13:21 +0100219#define atomic_fetch_andnot atomic_fetch_andnot
220
Will Deacondb38ee82014-02-21 17:01:48 +0100221#endif /* __LINUX_ARM_ARCH__ */
222
Peter Zijlstraaee9a552014-03-23 16:38:18 +0100223#define ATOMIC_OPS(op, c_op, asm_op) \
224 ATOMIC_OP(op, c_op, asm_op) \
Peter Zijlstra6da068c2016-04-18 01:10:52 +0200225 ATOMIC_OP_RETURN(op, c_op, asm_op) \
226 ATOMIC_FETCH_OP(op, c_op, asm_op)
Peter Zijlstraaee9a552014-03-23 16:38:18 +0100227
228ATOMIC_OPS(add, +=, add)
229ATOMIC_OPS(sub, -=, sub)
230
Peter Zijlstra12589792014-04-23 20:04:39 +0200231#define atomic_andnot atomic_andnot
232
Peter Zijlstra6da068c2016-04-18 01:10:52 +0200233#undef ATOMIC_OPS
234#define ATOMIC_OPS(op, c_op, asm_op) \
235 ATOMIC_OP(op, c_op, asm_op) \
236 ATOMIC_FETCH_OP(op, c_op, asm_op)
237
238ATOMIC_OPS(and, &=, and)
239ATOMIC_OPS(andnot, &= ~, bic)
240ATOMIC_OPS(or, |=, orr)
241ATOMIC_OPS(xor, ^=, eor)
Peter Zijlstra12589792014-04-23 20:04:39 +0200242
Peter Zijlstraaee9a552014-03-23 16:38:18 +0100243#undef ATOMIC_OPS
Peter Zijlstra6da068c2016-04-18 01:10:52 +0200244#undef ATOMIC_FETCH_OP
Peter Zijlstraaee9a552014-03-23 16:38:18 +0100245#undef ATOMIC_OP_RETURN
246#undef ATOMIC_OP
247
Will Deacondb38ee82014-02-21 17:01:48 +0100248#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
249
Will Deacon24b44a62010-01-20 19:05:07 +0100250#ifndef CONFIG_GENERIC_ATOMIC64
251typedef struct {
Chen Gang237f1232013-10-26 15:07:04 +0100252 long long counter;
Will Deacon24b44a62010-01-20 19:05:07 +0100253} atomic64_t;
254
255#define ATOMIC64_INIT(i) { (i) }
256
Will Deacon4fd75912013-03-28 11:25:03 +0100257#ifdef CONFIG_ARM_LPAE
Chen Gang237f1232013-10-26 15:07:04 +0100258static inline long long atomic64_read(const atomic64_t *v)
Will Deacon4fd75912013-03-28 11:25:03 +0100259{
Chen Gang237f1232013-10-26 15:07:04 +0100260 long long result;
Will Deacon4fd75912013-03-28 11:25:03 +0100261
262 __asm__ __volatile__("@ atomic64_read\n"
263" ldrd %0, %H0, [%1]"
264 : "=&r" (result)
265 : "r" (&v->counter), "Qo" (v->counter)
266 );
267
268 return result;
269}
270
Chen Gang237f1232013-10-26 15:07:04 +0100271static inline void atomic64_set(atomic64_t *v, long long i)
Will Deacon4fd75912013-03-28 11:25:03 +0100272{
273 __asm__ __volatile__("@ atomic64_set\n"
274" strd %2, %H2, [%1]"
275 : "=Qo" (v->counter)
276 : "r" (&v->counter), "r" (i)
277 );
278}
279#else
Chen Gang237f1232013-10-26 15:07:04 +0100280static inline long long atomic64_read(const atomic64_t *v)
Will Deacon24b44a62010-01-20 19:05:07 +0100281{
Chen Gang237f1232013-10-26 15:07:04 +0100282 long long result;
Will Deacon24b44a62010-01-20 19:05:07 +0100283
284 __asm__ __volatile__("@ atomic64_read\n"
285" ldrexd %0, %H0, [%1]"
286 : "=&r" (result)
Will Deacon398aa662010-07-08 10:59:16 +0100287 : "r" (&v->counter), "Qo" (v->counter)
Will Deacon24b44a62010-01-20 19:05:07 +0100288 );
289
290 return result;
291}
292
Chen Gang237f1232013-10-26 15:07:04 +0100293static inline void atomic64_set(atomic64_t *v, long long i)
Will Deacon24b44a62010-01-20 19:05:07 +0100294{
Chen Gang237f1232013-10-26 15:07:04 +0100295 long long tmp;
Will Deacon24b44a62010-01-20 19:05:07 +0100296
Will Deaconf38d9992013-07-04 11:43:18 +0100297 prefetchw(&v->counter);
Will Deacon24b44a62010-01-20 19:05:07 +0100298 __asm__ __volatile__("@ atomic64_set\n"
Will Deacon398aa662010-07-08 10:59:16 +0100299"1: ldrexd %0, %H0, [%2]\n"
300" strexd %0, %3, %H3, [%2]\n"
Will Deacon24b44a62010-01-20 19:05:07 +0100301" teq %0, #0\n"
302" bne 1b"
Will Deacon398aa662010-07-08 10:59:16 +0100303 : "=&r" (tmp), "=Qo" (v->counter)
Will Deacon24b44a62010-01-20 19:05:07 +0100304 : "r" (&v->counter), "r" (i)
305 : "cc");
306}
Will Deacon4fd75912013-03-28 11:25:03 +0100307#endif
Will Deacon24b44a62010-01-20 19:05:07 +0100308
Peter Zijlstraaee9a552014-03-23 16:38:18 +0100309#define ATOMIC64_OP(op, op1, op2) \
310static inline void atomic64_##op(long long i, atomic64_t *v) \
311{ \
312 long long result; \
313 unsigned long tmp; \
314 \
315 prefetchw(&v->counter); \
316 __asm__ __volatile__("@ atomic64_" #op "\n" \
317"1: ldrexd %0, %H0, [%3]\n" \
318" " #op1 " %Q0, %Q0, %Q4\n" \
319" " #op2 " %R0, %R0, %R4\n" \
320" strexd %1, %0, %H0, [%3]\n" \
321" teq %1, #0\n" \
322" bne 1b" \
323 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
324 : "r" (&v->counter), "r" (i) \
325 : "cc"); \
326} \
Will Deacon24b44a62010-01-20 19:05:07 +0100327
Peter Zijlstraaee9a552014-03-23 16:38:18 +0100328#define ATOMIC64_OP_RETURN(op, op1, op2) \
Will Deacon0ca326d2015-08-06 17:54:44 +0100329static inline long long \
330atomic64_##op##_return_relaxed(long long i, atomic64_t *v) \
Peter Zijlstraaee9a552014-03-23 16:38:18 +0100331{ \
332 long long result; \
333 unsigned long tmp; \
334 \
Peter Zijlstraaee9a552014-03-23 16:38:18 +0100335 prefetchw(&v->counter); \
336 \
337 __asm__ __volatile__("@ atomic64_" #op "_return\n" \
338"1: ldrexd %0, %H0, [%3]\n" \
339" " #op1 " %Q0, %Q0, %Q4\n" \
340" " #op2 " %R0, %R0, %R4\n" \
341" strexd %1, %0, %H0, [%3]\n" \
342" teq %1, #0\n" \
343" bne 1b" \
344 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
345 : "r" (&v->counter), "r" (i) \
346 : "cc"); \
347 \
Peter Zijlstraaee9a552014-03-23 16:38:18 +0100348 return result; \
Will Deacon24b44a62010-01-20 19:05:07 +0100349}
350
Peter Zijlstra6da068c2016-04-18 01:10:52 +0200351#define ATOMIC64_FETCH_OP(op, op1, op2) \
352static inline long long \
353atomic64_fetch_##op##_relaxed(long long i, atomic64_t *v) \
354{ \
355 long long result, val; \
356 unsigned long tmp; \
357 \
358 prefetchw(&v->counter); \
359 \
360 __asm__ __volatile__("@ atomic64_fetch_" #op "\n" \
361"1: ldrexd %0, %H0, [%4]\n" \
362" " #op1 " %Q1, %Q0, %Q5\n" \
363" " #op2 " %R1, %R0, %R5\n" \
364" strexd %2, %1, %H1, [%4]\n" \
365" teq %2, #0\n" \
366" bne 1b" \
367 : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Qo" (v->counter) \
368 : "r" (&v->counter), "r" (i) \
369 : "cc"); \
370 \
371 return result; \
372}
373
Peter Zijlstraaee9a552014-03-23 16:38:18 +0100374#define ATOMIC64_OPS(op, op1, op2) \
375 ATOMIC64_OP(op, op1, op2) \
Peter Zijlstra6da068c2016-04-18 01:10:52 +0200376 ATOMIC64_OP_RETURN(op, op1, op2) \
377 ATOMIC64_FETCH_OP(op, op1, op2)
Will Deacon24b44a62010-01-20 19:05:07 +0100378
Peter Zijlstraaee9a552014-03-23 16:38:18 +0100379ATOMIC64_OPS(add, adds, adc)
380ATOMIC64_OPS(sub, subs, sbc)
Will Deacon24b44a62010-01-20 19:05:07 +0100381
Will Deacon0ca326d2015-08-06 17:54:44 +0100382#define atomic64_add_return_relaxed atomic64_add_return_relaxed
383#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
Peter Zijlstra6da068c2016-04-18 01:10:52 +0200384#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
385#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
386
387#undef ATOMIC64_OPS
388#define ATOMIC64_OPS(op, op1, op2) \
389 ATOMIC64_OP(op, op1, op2) \
390 ATOMIC64_FETCH_OP(op, op1, op2)
Will Deacon0ca326d2015-08-06 17:54:44 +0100391
Peter Zijlstra12589792014-04-23 20:04:39 +0200392#define atomic64_andnot atomic64_andnot
393
Peter Zijlstra6da068c2016-04-18 01:10:52 +0200394ATOMIC64_OPS(and, and, and)
395ATOMIC64_OPS(andnot, bic, bic)
396ATOMIC64_OPS(or, orr, orr)
397ATOMIC64_OPS(xor, eor, eor)
398
399#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
400#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed
401#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
402#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
Peter Zijlstra12589792014-04-23 20:04:39 +0200403
Peter Zijlstraaee9a552014-03-23 16:38:18 +0100404#undef ATOMIC64_OPS
Peter Zijlstra6da068c2016-04-18 01:10:52 +0200405#undef ATOMIC64_FETCH_OP
Peter Zijlstraaee9a552014-03-23 16:38:18 +0100406#undef ATOMIC64_OP_RETURN
407#undef ATOMIC64_OP
Will Deacon24b44a62010-01-20 19:05:07 +0100408
Will Deacon0ca326d2015-08-06 17:54:44 +0100409static inline long long
410atomic64_cmpxchg_relaxed(atomic64_t *ptr, long long old, long long new)
Will Deacon24b44a62010-01-20 19:05:07 +0100411{
Chen Gang237f1232013-10-26 15:07:04 +0100412 long long oldval;
Will Deacon24b44a62010-01-20 19:05:07 +0100413 unsigned long res;
414
Will Deaconc32ffce2014-02-21 17:01:48 +0100415 prefetchw(&ptr->counter);
Will Deacon24b44a62010-01-20 19:05:07 +0100416
417 do {
418 __asm__ __volatile__("@ atomic64_cmpxchg\n"
Will Deacon398aa662010-07-08 10:59:16 +0100419 "ldrexd %1, %H1, [%3]\n"
Will Deacon24b44a62010-01-20 19:05:07 +0100420 "mov %0, #0\n"
Will Deacon398aa662010-07-08 10:59:16 +0100421 "teq %1, %4\n"
422 "teqeq %H1, %H4\n"
423 "strexdeq %0, %5, %H5, [%3]"
424 : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
Will Deacon24b44a62010-01-20 19:05:07 +0100425 : "r" (&ptr->counter), "r" (old), "r" (new)
426 : "cc");
427 } while (res);
428
Will Deacon24b44a62010-01-20 19:05:07 +0100429 return oldval;
430}
Will Deacon0ca326d2015-08-06 17:54:44 +0100431#define atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed
Will Deacon24b44a62010-01-20 19:05:07 +0100432
Will Deacon0ca326d2015-08-06 17:54:44 +0100433static inline long long atomic64_xchg_relaxed(atomic64_t *ptr, long long new)
Will Deacon24b44a62010-01-20 19:05:07 +0100434{
Chen Gang237f1232013-10-26 15:07:04 +0100435 long long result;
Will Deacon24b44a62010-01-20 19:05:07 +0100436 unsigned long tmp;
437
Will Deaconc32ffce2014-02-21 17:01:48 +0100438 prefetchw(&ptr->counter);
Will Deacon24b44a62010-01-20 19:05:07 +0100439
440 __asm__ __volatile__("@ atomic64_xchg\n"
Will Deacon398aa662010-07-08 10:59:16 +0100441"1: ldrexd %0, %H0, [%3]\n"
442" strexd %1, %4, %H4, [%3]\n"
Will Deacon24b44a62010-01-20 19:05:07 +0100443" teq %1, #0\n"
444" bne 1b"
Will Deacon398aa662010-07-08 10:59:16 +0100445 : "=&r" (result), "=&r" (tmp), "+Qo" (ptr->counter)
Will Deacon24b44a62010-01-20 19:05:07 +0100446 : "r" (&ptr->counter), "r" (new)
447 : "cc");
448
Will Deacon24b44a62010-01-20 19:05:07 +0100449 return result;
450}
Will Deacon0ca326d2015-08-06 17:54:44 +0100451#define atomic64_xchg_relaxed atomic64_xchg_relaxed
Will Deacon24b44a62010-01-20 19:05:07 +0100452
Chen Gang237f1232013-10-26 15:07:04 +0100453static inline long long atomic64_dec_if_positive(atomic64_t *v)
Will Deacon24b44a62010-01-20 19:05:07 +0100454{
Chen Gang237f1232013-10-26 15:07:04 +0100455 long long result;
Will Deacon24b44a62010-01-20 19:05:07 +0100456 unsigned long tmp;
457
458 smp_mb();
Will Deaconc32ffce2014-02-21 17:01:48 +0100459 prefetchw(&v->counter);
Will Deacon24b44a62010-01-20 19:05:07 +0100460
461 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
Will Deacon398aa662010-07-08 10:59:16 +0100462"1: ldrexd %0, %H0, [%3]\n"
Victor Kamensky2245f922013-07-26 09:28:53 -0700463" subs %Q0, %Q0, #1\n"
464" sbc %R0, %R0, #0\n"
465" teq %R0, #0\n"
Will Deacon24b44a62010-01-20 19:05:07 +0100466" bmi 2f\n"
Will Deacon398aa662010-07-08 10:59:16 +0100467" strexd %1, %0, %H0, [%3]\n"
Will Deacon24b44a62010-01-20 19:05:07 +0100468" teq %1, #0\n"
469" bne 1b\n"
470"2:"
Will Deacon398aa662010-07-08 10:59:16 +0100471 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
Will Deacon24b44a62010-01-20 19:05:07 +0100472 : "r" (&v->counter)
473 : "cc");
474
475 smp_mb();
476
477 return result;
478}
Mark Rutlandb3a2a052018-06-21 13:13:20 +0100479#define atomic64_dec_if_positive atomic64_dec_if_positive
Will Deacon24b44a62010-01-20 19:05:07 +0100480
Mark Rutlandfee8ca92018-06-21 13:13:14 +0100481static inline long long atomic64_fetch_add_unless(atomic64_t *v, long long a,
482 long long u)
Will Deacon24b44a62010-01-20 19:05:07 +0100483{
Mark Rutlandfee8ca92018-06-21 13:13:14 +0100484 long long oldval, newval;
Will Deacon24b44a62010-01-20 19:05:07 +0100485 unsigned long tmp;
Will Deacon24b44a62010-01-20 19:05:07 +0100486
487 smp_mb();
Will Deaconc32ffce2014-02-21 17:01:48 +0100488 prefetchw(&v->counter);
Will Deacon24b44a62010-01-20 19:05:07 +0100489
490 __asm__ __volatile__("@ atomic64_add_unless\n"
Will Deacon398aa662010-07-08 10:59:16 +0100491"1: ldrexd %0, %H0, [%4]\n"
492" teq %0, %5\n"
493" teqeq %H0, %H5\n"
Will Deacon24b44a62010-01-20 19:05:07 +0100494" beq 2f\n"
Mark Rutlandfee8ca92018-06-21 13:13:14 +0100495" adds %Q1, %Q0, %Q6\n"
496" adc %R1, %R0, %R6\n"
497" strexd %2, %1, %H1, [%4]\n"
Will Deacon24b44a62010-01-20 19:05:07 +0100498" teq %2, #0\n"
499" bne 1b\n"
500"2:"
Mark Rutlandfee8ca92018-06-21 13:13:14 +0100501 : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
Will Deacon24b44a62010-01-20 19:05:07 +0100502 : "r" (&v->counter), "r" (u), "r" (a)
503 : "cc");
504
Mark Rutlandfee8ca92018-06-21 13:13:14 +0100505 if (oldval != u)
Will Deacon24b44a62010-01-20 19:05:07 +0100506 smp_mb();
507
Mark Rutlandfee8ca92018-06-21 13:13:14 +0100508 return oldval;
Will Deacon24b44a62010-01-20 19:05:07 +0100509}
Mark Rutlandfee8ca92018-06-21 13:13:14 +0100510#define atomic64_fetch_add_unless atomic64_fetch_add_unless
Will Deacon24b44a62010-01-20 19:05:07 +0100511
Arun Sharma78477772011-07-26 16:09:08 -0700512#endif /* !CONFIG_GENERIC_ATOMIC64 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513#endif
514#endif