Revert "ARCv2: spinlock/rwlock/atomics: reduce 1 instruction in exponential backoff"
[cascardo/linux.git] / arch / arc / include / asm / atomic.h
1 /*
2  * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  */
8
9 #ifndef _ASM_ARC_ATOMIC_H
10 #define _ASM_ARC_ATOMIC_H
11
12 #ifndef __ASSEMBLY__
13
14 #include <linux/types.h>
15 #include <linux/compiler.h>
16 #include <asm/cmpxchg.h>
17 #include <asm/barrier.h>
18 #include <asm/smp.h>
19
20 #ifndef CONFIG_ARC_PLAT_EZNPS
21
22 #define atomic_read(v)  READ_ONCE((v)->counter)
23
24 #ifdef CONFIG_ARC_HAS_LLSC
25
26 #define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
27
28 #ifdef CONFIG_ARC_STAR_9000923308
29
30 #define SCOND_FAIL_RETRY_VAR_DEF                                                \
31         unsigned int delay = 1, tmp;                                            \
32
33 #define SCOND_FAIL_RETRY_ASM                                                    \
34         "       bz      4f                      \n"                             \
35         "   ; --- scond fail delay ---          \n"                             \
36         "       mov     %[tmp], %[delay]        \n"     /* tmp = delay */       \
37         "2:     brne.d  %[tmp], 0, 2b           \n"     /* while (tmp != 0) */  \
38         "       sub     %[tmp], %[tmp], 1       \n"     /* tmp-- */             \
39         "       asl.f   %[delay], %[delay], 1   \n"     /* delay *= 2 */        \
40         "       mov.z   %[delay], 1             \n"     /* handle overflow */   \
41         "       b       1b                      \n"     /* start over */        \
42         "4: ; --- success ---                   \n"                             \
43
44 #define SCOND_FAIL_RETRY_VARS                                                   \
45           ,[delay] "+&r" (delay),[tmp] "=&r"    (tmp)                           \
46
47 #else   /* !CONFIG_ARC_STAR_9000923308 */
48
49 #define SCOND_FAIL_RETRY_VAR_DEF
50
51 #define SCOND_FAIL_RETRY_ASM                                                    \
52         "       bnz     1b                      \n"                             \
53
54 #define SCOND_FAIL_RETRY_VARS
55
56 #endif
57
58 #define ATOMIC_OP(op, c_op, asm_op)                                     \
59 static inline void atomic_##op(int i, atomic_t *v)                      \
60 {                                                                       \
61         unsigned int val;                                               \
62         SCOND_FAIL_RETRY_VAR_DEF                                        \
63                                                                         \
64         __asm__ __volatile__(                                           \
65         "1:     llock   %[val], [%[ctr]]                \n"             \
66         "       " #asm_op " %[val], %[val], %[i]        \n"             \
67         "       scond   %[val], [%[ctr]]                \n"             \
68         "                                               \n"             \
69         SCOND_FAIL_RETRY_ASM                                            \
70                                                                         \
71         : [val] "=&r"   (val) /* Early clobber to prevent reg reuse */  \
72           SCOND_FAIL_RETRY_VARS                                         \
73         : [ctr] "r"     (&v->counter), /* Not "m": llock only supports reg direct addr mode */  \
74           [i]   "ir"    (i)                                             \
75         : "cc");                                                        \
76 }                                                                       \
77
78 #define ATOMIC_OP_RETURN(op, c_op, asm_op)                              \
79 static inline int atomic_##op##_return(int i, atomic_t *v)              \
80 {                                                                       \
81         unsigned int val;                                               \
82         SCOND_FAIL_RETRY_VAR_DEF                                        \
83                                                                         \
84         /*                                                              \
85          * Explicit full memory barrier needed before/after as          \
86          * LLOCK/SCOND thmeselves don't provide any such semantics      \
87          */                                                             \
88         smp_mb();                                                       \
89                                                                         \
90         __asm__ __volatile__(                                           \
91         "1:     llock   %[val], [%[ctr]]                \n"             \
92         "       " #asm_op " %[val], %[val], %[i]        \n"             \
93         "       scond   %[val], [%[ctr]]                \n"             \
94         "                                               \n"             \
95         SCOND_FAIL_RETRY_ASM                                            \
96                                                                         \
97         : [val] "=&r"   (val)                                           \
98           SCOND_FAIL_RETRY_VARS                                         \
99         : [ctr] "r"     (&v->counter),                                  \
100           [i]   "ir"    (i)                                             \
101         : "cc");                                                        \
102                                                                         \
103         smp_mb();                                                       \
104                                                                         \
105         return val;                                                     \
106 }
107
108 #else   /* !CONFIG_ARC_HAS_LLSC */
109
110 #ifndef CONFIG_SMP
111
112  /* violating atomic_xxx API locking protocol in UP for optimization sake */
113 #define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
114
115 #else
116
117 static inline void atomic_set(atomic_t *v, int i)
118 {
119         /*
120          * Independent of hardware support, all of the atomic_xxx() APIs need
121          * to follow the same locking rules to make sure that a "hardware"
122          * atomic insn (e.g. LD) doesn't clobber an "emulated" atomic insn
123          * sequence
124          *
125          * Thus atomic_set() despite being 1 insn (and seemingly atomic)
126          * requires the locking.
127          */
128         unsigned long flags;
129
130         atomic_ops_lock(flags);
131         WRITE_ONCE(v->counter, i);
132         atomic_ops_unlock(flags);
133 }
134
135 #endif
136
137 /*
138  * Non hardware assisted Atomic-R-M-W
139  * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
140  */
141
142 #define ATOMIC_OP(op, c_op, asm_op)                                     \
143 static inline void atomic_##op(int i, atomic_t *v)                      \
144 {                                                                       \
145         unsigned long flags;                                            \
146                                                                         \
147         atomic_ops_lock(flags);                                         \
148         v->counter c_op i;                                              \
149         atomic_ops_unlock(flags);                                       \
150 }
151
152 #define ATOMIC_OP_RETURN(op, c_op, asm_op)                              \
153 static inline int atomic_##op##_return(int i, atomic_t *v)              \
154 {                                                                       \
155         unsigned long flags;                                            \
156         unsigned long temp;                                             \
157                                                                         \
158         /*                                                              \
159          * spin lock/unlock provides the needed smp_mb() before/after   \
160          */                                                             \
161         atomic_ops_lock(flags);                                         \
162         temp = v->counter;                                              \
163         temp c_op i;                                                    \
164         v->counter = temp;                                              \
165         atomic_ops_unlock(flags);                                       \
166                                                                         \
167         return temp;                                                    \
168 }
169
170 #endif /* !CONFIG_ARC_HAS_LLSC */
171
172 #define ATOMIC_OPS(op, c_op, asm_op)                                    \
173         ATOMIC_OP(op, c_op, asm_op)                                     \
174         ATOMIC_OP_RETURN(op, c_op, asm_op)
175
176 ATOMIC_OPS(add, +=, add)
177 ATOMIC_OPS(sub, -=, sub)
178
179 #define atomic_andnot atomic_andnot
180
181 ATOMIC_OP(and, &=, and)
182 ATOMIC_OP(andnot, &= ~, bic)
183 ATOMIC_OP(or, |=, or)
184 ATOMIC_OP(xor, ^=, xor)
185
186 #undef SCOND_FAIL_RETRY_VAR_DEF
187 #undef SCOND_FAIL_RETRY_ASM
188 #undef SCOND_FAIL_RETRY_VARS
189
190 #else /* CONFIG_ARC_PLAT_EZNPS */
191
192 static inline int atomic_read(const atomic_t *v)
193 {
194         int temp;
195
196         __asm__ __volatile__(
197         "       ld.di %0, [%1]"
198         : "=r"(temp)
199         : "r"(&v->counter)
200         : "memory");
201         return temp;
202 }
203
204 static inline void atomic_set(atomic_t *v, int i)
205 {
206         __asm__ __volatile__(
207         "       st.di %0,[%1]"
208         :
209         : "r"(i), "r"(&v->counter)
210         : "memory");
211 }
212
213 #define ATOMIC_OP(op, c_op, asm_op)                                     \
214 static inline void atomic_##op(int i, atomic_t *v)                      \
215 {                                                                       \
216         __asm__ __volatile__(                                           \
217         "       mov r2, %0\n"                                           \
218         "       mov r3, %1\n"                                           \
219         "       .word %2\n"                                             \
220         :                                                               \
221         : "r"(i), "r"(&v->counter), "i"(asm_op)                         \
222         : "r2", "r3", "memory");                                        \
223 }                                                                       \
224
225 #define ATOMIC_OP_RETURN(op, c_op, asm_op)                              \
226 static inline int atomic_##op##_return(int i, atomic_t *v)              \
227 {                                                                       \
228         unsigned int temp = i;                                          \
229                                                                         \
230         /* Explicit full memory barrier needed before/after */          \
231         smp_mb();                                                       \
232                                                                         \
233         __asm__ __volatile__(                                           \
234         "       mov r2, %0\n"                                           \
235         "       mov r3, %1\n"                                           \
236         "       .word %2\n"                                             \
237         "       mov %0, r2"                                             \
238         : "+r"(temp)                                                    \
239         : "r"(&v->counter), "i"(asm_op)                                 \
240         : "r2", "r3", "memory");                                        \
241                                                                         \
242         smp_mb();                                                       \
243                                                                         \
244         temp c_op i;                                                    \
245                                                                         \
246         return temp;                                                    \
247 }
248
249 #define ATOMIC_OPS(op, c_op, asm_op)                                    \
250         ATOMIC_OP(op, c_op, asm_op)                                     \
251         ATOMIC_OP_RETURN(op, c_op, asm_op)
252
253 ATOMIC_OPS(add, +=, CTOP_INST_AADD_DI_R2_R2_R3)
254 #define atomic_sub(i, v) atomic_add(-(i), (v))
255 #define atomic_sub_return(i, v) atomic_add_return(-(i), (v))
256
257 ATOMIC_OP(and, &=, CTOP_INST_AAND_DI_R2_R2_R3)
258 #define atomic_andnot(mask, v) atomic_and(~(mask), (v))
259 ATOMIC_OP(or, |=, CTOP_INST_AOR_DI_R2_R2_R3)
260 ATOMIC_OP(xor, ^=, CTOP_INST_AXOR_DI_R2_R2_R3)
261
262 #endif /* CONFIG_ARC_PLAT_EZNPS */
263
264 #undef ATOMIC_OPS
265 #undef ATOMIC_OP_RETURN
266 #undef ATOMIC_OP
267
268 /**
269  * __atomic_add_unless - add unless the number is a given value
270  * @v: pointer of type atomic_t
271  * @a: the amount to add to v...
272  * @u: ...unless v is equal to u.
273  *
274  * Atomically adds @a to @v, so long as it was not @u.
275  * Returns the old value of @v
276  */
277 #define __atomic_add_unless(v, a, u)                                    \
278 ({                                                                      \
279         int c, old;                                                     \
280                                                                         \
281         /*                                                              \
282          * Explicit full memory barrier needed before/after as          \
283          * LLOCK/SCOND thmeselves don't provide any such semantics      \
284          */                                                             \
285         smp_mb();                                                       \
286                                                                         \
287         c = atomic_read(v);                                             \
288         while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c)\
289                 c = old;                                                \
290                                                                         \
291         smp_mb();                                                       \
292                                                                         \
293         c;                                                              \
294 })
295
296 #define atomic_inc_not_zero(v)          atomic_add_unless((v), 1, 0)
297
298 #define atomic_inc(v)                   atomic_add(1, v)
299 #define atomic_dec(v)                   atomic_sub(1, v)
300
301 #define atomic_inc_and_test(v)          (atomic_add_return(1, v) == 0)
302 #define atomic_dec_and_test(v)          (atomic_sub_return(1, v) == 0)
303 #define atomic_inc_return(v)            atomic_add_return(1, (v))
304 #define atomic_dec_return(v)            atomic_sub_return(1, (v))
305 #define atomic_sub_and_test(i, v)       (atomic_sub_return(i, v) == 0)
306
307 #define atomic_add_negative(i, v)       (atomic_add_return(i, v) < 0)
308
309 #define ATOMIC_INIT(i)                  { (i) }
310
311 #include <asm-generic/atomic64.h>
312
313 #endif
314
315 #endif