2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
9 #ifndef _ASM_ARC_ATOMIC_H
10 #define _ASM_ARC_ATOMIC_H
14 #include <linux/types.h>
15 #include <linux/compiler.h>
16 #include <asm/cmpxchg.h>
17 #include <asm/barrier.h>
20 #ifndef CONFIG_ARC_PLAT_EZNPS
22 #define atomic_read(v) READ_ONCE((v)->counter)
24 #ifdef CONFIG_ARC_HAS_LLSC
26 #define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
28 #ifdef CONFIG_ARC_STAR_9000923308
30 #define SCOND_FAIL_RETRY_VAR_DEF \
31 unsigned int delay = 1, tmp; \
33 #define SCOND_FAIL_RETRY_ASM \
35 " ; --- scond fail delay --- \n" \
36 " mov %[tmp], %[delay] \n" /* tmp = delay */ \
37 "2: brne.d %[tmp], 0, 2b \n" /* while (tmp != 0) */ \
38 " sub %[tmp], %[tmp], 1 \n" /* tmp-- */ \
39 " asl.f %[delay], %[delay], 1 \n" /* delay *= 2 */ \
40 " mov.z %[delay], 1 \n" /* handle overflow */ \
41 " b 1b \n" /* start over */ \
42 "4: ; --- success --- \n" \
44 #define SCOND_FAIL_RETRY_VARS \
45 ,[delay] "+&r" (delay),[tmp] "=&r" (tmp) \
47 #else /* !CONFIG_ARC_STAR_9000923308 */
49 #define SCOND_FAIL_RETRY_VAR_DEF
51 #define SCOND_FAIL_RETRY_ASM \
54 #define SCOND_FAIL_RETRY_VARS
58 #define ATOMIC_OP(op, c_op, asm_op) \
59 static inline void atomic_##op(int i, atomic_t *v) \
62 SCOND_FAIL_RETRY_VAR_DEF \
64 __asm__ __volatile__( \
65 "1: llock %[val], [%[ctr]] \n" \
66 " " #asm_op " %[val], %[val], %[i] \n" \
67 " scond %[val], [%[ctr]] \n" \
69 SCOND_FAIL_RETRY_ASM \
71 : [val] "=&r" (val) /* Early clobber to prevent reg reuse */ \
72 SCOND_FAIL_RETRY_VARS \
73 : [ctr] "r" (&v->counter), /* Not "m": llock only supports reg direct addr mode */ \
78 #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
79 static inline int atomic_##op##_return(int i, atomic_t *v) \
82 SCOND_FAIL_RETRY_VAR_DEF \
85 * Explicit full memory barrier needed before/after as \
86 * LLOCK/SCOND thmeselves don't provide any such semantics \
90 __asm__ __volatile__( \
91 "1: llock %[val], [%[ctr]] \n" \
92 " " #asm_op " %[val], %[val], %[i] \n" \
93 " scond %[val], [%[ctr]] \n" \
95 SCOND_FAIL_RETRY_ASM \
98 SCOND_FAIL_RETRY_VARS \
99 : [ctr] "r" (&v->counter), \
108 #else /* !CONFIG_ARC_HAS_LLSC */
112 /* violating atomic_xxx API locking protocol in UP for optimization sake */
113 #define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
117 static inline void atomic_set(atomic_t *v, int i)
120 * Independent of hardware support, all of the atomic_xxx() APIs need
121 * to follow the same locking rules to make sure that a "hardware"
122 * atomic insn (e.g. LD) doesn't clobber an "emulated" atomic insn
125 * Thus atomic_set() despite being 1 insn (and seemingly atomic)
126 * requires the locking.
130 atomic_ops_lock(flags);
131 WRITE_ONCE(v->counter, i);
132 atomic_ops_unlock(flags);
138 * Non hardware assisted Atomic-R-M-W
139 * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
142 #define ATOMIC_OP(op, c_op, asm_op) \
143 static inline void atomic_##op(int i, atomic_t *v) \
145 unsigned long flags; \
147 atomic_ops_lock(flags); \
149 atomic_ops_unlock(flags); \
152 #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
153 static inline int atomic_##op##_return(int i, atomic_t *v) \
155 unsigned long flags; \
156 unsigned long temp; \
159 * spin lock/unlock provides the needed smp_mb() before/after \
161 atomic_ops_lock(flags); \
165 atomic_ops_unlock(flags); \
170 #endif /* !CONFIG_ARC_HAS_LLSC */
172 #define ATOMIC_OPS(op, c_op, asm_op) \
173 ATOMIC_OP(op, c_op, asm_op) \
174 ATOMIC_OP_RETURN(op, c_op, asm_op)
176 ATOMIC_OPS(add, +=, add)
177 ATOMIC_OPS(sub, -=, sub)
179 #define atomic_andnot atomic_andnot
181 ATOMIC_OP(and, &=, and)
182 ATOMIC_OP(andnot, &= ~, bic)
183 ATOMIC_OP(or, |=, or)
184 ATOMIC_OP(xor, ^=, xor)
186 #undef SCOND_FAIL_RETRY_VAR_DEF
187 #undef SCOND_FAIL_RETRY_ASM
188 #undef SCOND_FAIL_RETRY_VARS
190 #else /* CONFIG_ARC_PLAT_EZNPS */
192 static inline int atomic_read(const atomic_t *v)
196 __asm__ __volatile__(
204 static inline void atomic_set(atomic_t *v, int i)
206 __asm__ __volatile__(
209 : "r"(i), "r"(&v->counter)
213 #define ATOMIC_OP(op, c_op, asm_op) \
214 static inline void atomic_##op(int i, atomic_t *v) \
216 __asm__ __volatile__( \
221 : "r"(i), "r"(&v->counter), "i"(asm_op) \
222 : "r2", "r3", "memory"); \
225 #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
226 static inline int atomic_##op##_return(int i, atomic_t *v) \
228 unsigned int temp = i; \
230 /* Explicit full memory barrier needed before/after */ \
233 __asm__ __volatile__( \
239 : "r"(&v->counter), "i"(asm_op) \
240 : "r2", "r3", "memory"); \
249 #define ATOMIC_OPS(op, c_op, asm_op) \
250 ATOMIC_OP(op, c_op, asm_op) \
251 ATOMIC_OP_RETURN(op, c_op, asm_op)
253 ATOMIC_OPS(add, +=, CTOP_INST_AADD_DI_R2_R2_R3)
254 #define atomic_sub(i, v) atomic_add(-(i), (v))
255 #define atomic_sub_return(i, v) atomic_add_return(-(i), (v))
257 ATOMIC_OP(and, &=, CTOP_INST_AAND_DI_R2_R2_R3)
258 #define atomic_andnot(mask, v) atomic_and(~(mask), (v))
259 ATOMIC_OP(or, |=, CTOP_INST_AOR_DI_R2_R2_R3)
260 ATOMIC_OP(xor, ^=, CTOP_INST_AXOR_DI_R2_R2_R3)
262 #endif /* CONFIG_ARC_PLAT_EZNPS */
265 #undef ATOMIC_OP_RETURN
269 * __atomic_add_unless - add unless the number is a given value
270 * @v: pointer of type atomic_t
271 * @a: the amount to add to v...
272 * @u: ...unless v is equal to u.
274 * Atomically adds @a to @v, so long as it was not @u.
275 * Returns the old value of @v
277 #define __atomic_add_unless(v, a, u) \
282 * Explicit full memory barrier needed before/after as \
283 * LLOCK/SCOND thmeselves don't provide any such semantics \
287 c = atomic_read(v); \
288 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c)\
296 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
298 #define atomic_inc(v) atomic_add(1, v)
299 #define atomic_dec(v) atomic_sub(1, v)
301 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
302 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
303 #define atomic_inc_return(v) atomic_add_return(1, (v))
304 #define atomic_dec_return(v) atomic_sub_return(1, (v))
305 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
307 #define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0)
309 #define ATOMIC_INIT(i) { (i) }
311 #include <asm-generic/atomic64.h>