1 #ifndef _ALPHA_ATOMIC_H
2 #define _ALPHA_ATOMIC_H
4 #include <linux/types.h>
5 #include <asm/barrier.h>
6 #include <asm/cmpxchg.h>
9 * Atomic operations that C can't guarantee us. Useful for
10 * resource counting etc...
12 * But use these as seldom as possible since they are much slower
13 * than regular operations.
17 #define ATOMIC_INIT(i) { (i) }
18 #define ATOMIC64_INIT(i) { (i) }
20 #define atomic_read(v) READ_ONCE((v)->counter)
21 #define atomic64_read(v) READ_ONCE((v)->counter)
23 #define atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
24 #define atomic64_set(v,i) WRITE_ONCE((v)->counter, (i))
27 * To get proper branch prediction for the main line, we must branch
28 * forward to code at the end of this object's .text section, then
29 * branch back to restart the operation.
32 #define ATOMIC_OP(op, asm_op) \
33 static __inline__ void atomic_##op(int i, atomic_t * v) \
36 __asm__ __volatile__( \
38 " " #asm_op " %0,%2,%0\n" \
44 :"=&r" (temp), "=m" (v->counter) \
45 :"Ir" (i), "m" (v->counter)); \
48 #define ATOMIC_OP_RETURN(op, asm_op) \
49 static inline int atomic_##op##_return(int i, atomic_t *v) \
53 __asm__ __volatile__( \
55 " " #asm_op " %0,%3,%2\n" \
56 " " #asm_op " %0,%3,%0\n" \
62 :"=&r" (temp), "=m" (v->counter), "=&r" (result) \
63 :"Ir" (i), "m" (v->counter) : "memory"); \
68 #define ATOMIC_FETCH_OP(op, asm_op) \
69 static inline int atomic_fetch_##op(int i, atomic_t *v) \
73 __asm__ __volatile__( \
75 " " #asm_op " %2,%3,%0\n" \
81 :"=&r" (temp), "=m" (v->counter), "=&r" (result) \
82 :"Ir" (i), "m" (v->counter) : "memory"); \
87 #define ATOMIC64_OP(op, asm_op) \
88 static __inline__ void atomic64_##op(long i, atomic64_t * v) \
91 __asm__ __volatile__( \
93 " " #asm_op " %0,%2,%0\n" \
99 :"=&r" (temp), "=m" (v->counter) \
100 :"Ir" (i), "m" (v->counter)); \
103 #define ATOMIC64_OP_RETURN(op, asm_op) \
104 static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
108 __asm__ __volatile__( \
110 " " #asm_op " %0,%3,%2\n" \
111 " " #asm_op " %0,%3,%0\n" \
117 :"=&r" (temp), "=m" (v->counter), "=&r" (result) \
118 :"Ir" (i), "m" (v->counter) : "memory"); \
123 #define ATOMIC64_FETCH_OP(op, asm_op) \
124 static __inline__ long atomic64_fetch_##op(long i, atomic64_t * v) \
128 __asm__ __volatile__( \
130 " " #asm_op " %2,%3,%0\n" \
136 :"=&r" (temp), "=m" (v->counter), "=&r" (result) \
137 :"Ir" (i), "m" (v->counter) : "memory"); \
142 #define ATOMIC_OPS(op) \
143 ATOMIC_OP(op, op##l) \
144 ATOMIC_OP_RETURN(op, op##l) \
145 ATOMIC_FETCH_OP(op, op##l) \
146 ATOMIC64_OP(op, op##q) \
147 ATOMIC64_OP_RETURN(op, op##q) \
148 ATOMIC64_FETCH_OP(op, op##q)
153 #define atomic_andnot atomic_andnot
154 #define atomic64_andnot atomic64_andnot
156 #define atomic_fetch_or atomic_fetch_or
159 #define ATOMIC_OPS(op, asm) \
161 ATOMIC_FETCH_OP(op, asm) \
162 ATOMIC64_OP(op, asm) \
163 ATOMIC64_FETCH_OP(op, asm)
166 ATOMIC_OPS(andnot, bic)
171 #undef ATOMIC64_FETCH_OP
172 #undef ATOMIC64_OP_RETURN
174 #undef ATOMIC_FETCH_OP
175 #undef ATOMIC_OP_RETURN
178 #define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
179 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
181 #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
182 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
185 * __atomic_add_unless - add unless the number is a given value
186 * @v: pointer of type atomic_t
187 * @a: the amount to add to v...
188 * @u: ...unless v is equal to u.
190 * Atomically adds @a to @v, so long as it was not @u.
191 * Returns the old value of @v.
193 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
197 __asm__ __volatile__(
198 "1: ldl_l %[old],%[mem]\n"
199 " cmpeq %[old],%[u],%[c]\n"
200 " addl %[old],%[a],%[new]\n"
202 " stl_c %[new],%[mem]\n"
208 : [old] "=&r"(old), [new] "=&r"(new), [c] "=&r"(c)
209 : [mem] "m"(*v), [a] "rI"(a), [u] "rI"((long)u)
217 * atomic64_add_unless - add unless the number is a given value
218 * @v: pointer of type atomic64_t
219 * @a: the amount to add to v...
220 * @u: ...unless v is equal to u.
222 * Atomically adds @a to @v, so long as it was not @u.
223 * Returns true iff @v was not @u.
225 static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
229 __asm__ __volatile__(
230 "1: ldq_l %[tmp],%[mem]\n"
231 " cmpeq %[tmp],%[u],%[c]\n"
232 " addq %[tmp],%[a],%[tmp]\n"
234 " stq_c %[tmp],%[mem]\n"
240 : [tmp] "=&r"(tmp), [c] "=&r"(c)
241 : [mem] "m"(*v), [a] "rI"(a), [u] "rI"(u)
248 * atomic64_dec_if_positive - decrement by 1 if old value positive
249 * @v: pointer of type atomic_t
251 * The function returns the old value of *v minus 1, even if
252 * the atomic variable, v, was not decremented.
254 static inline long atomic64_dec_if_positive(atomic64_t *v)
258 __asm__ __volatile__(
259 "1: ldq_l %[old],%[mem]\n"
260 " subq %[old],1,%[tmp]\n"
262 " stq_c %[tmp],%[mem]\n"
268 : [old] "=&r"(old), [tmp] "=&r"(tmp)
275 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
277 #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
278 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
280 #define atomic_dec_return(v) atomic_sub_return(1,(v))
281 #define atomic64_dec_return(v) atomic64_sub_return(1,(v))
283 #define atomic_inc_return(v) atomic_add_return(1,(v))
284 #define atomic64_inc_return(v) atomic64_add_return(1,(v))
286 #define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
287 #define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
289 #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
290 #define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0)
292 #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
293 #define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
295 #define atomic_inc(v) atomic_add(1,(v))
296 #define atomic64_inc(v) atomic64_add(1,(v))
298 #define atomic_dec(v) atomic_sub(1,(v))
299 #define atomic64_dec(v) atomic64_sub(1,(v))
301 #endif /* _ALPHA_ATOMIC_H */