1 /* Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
2 * Copyright (C) 2006 Kyle McMartin <kyle@parisc-linux.org>
5 #ifndef _ASM_PARISC_ATOMIC_H_
6 #define _ASM_PARISC_ATOMIC_H_
8 #include <linux/types.h>
9 #include <asm/cmpxchg.h>
10 #include <asm/barrier.h>
13 * Atomic operations that C can't guarantee us. Useful for
14 * resource counting etc..
16 * And probably incredibly slow on parisc. OTOH, we don't
17 * have to write any serious assembly. prumpf
21 #include <asm/spinlock.h>
22 #include <asm/cache.h> /* we use L1_CACHE_BYTES */
24 /* Use an array of spinlocks for our atomic_ts.
25 * Hash function to index into a different SPINLOCK.
26 * Since "a" is usually an address, use one spinlock per cacheline.
28 # define ATOMIC_HASH_SIZE 4
29 # define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) (a))/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
31 extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
33 /* Can't use raw_spin_lock_irq because of #include problems, so
34 * this is the substitute */
35 #define _atomic_spin_lock_irqsave(l,f) do { \
36 arch_spinlock_t *s = ATOMIC_HASH(l); \
41 #define _atomic_spin_unlock_irqrestore(l,f) do { \
42 arch_spinlock_t *s = ATOMIC_HASH(l); \
43 arch_spin_unlock(s); \
44 local_irq_restore(f); \
49 # define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)
50 # define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
54 * Note that we need not lock read accesses - aligned word writes/reads
55 * are atomic, so a reader never sees inconsistent values.
58 static __inline__ void atomic_set(atomic_t *v, int i)
61 _atomic_spin_lock_irqsave(v, flags);
65 _atomic_spin_unlock_irqrestore(v, flags);
68 static __inline__ int atomic_read(const atomic_t *v)
70 return READ_ONCE((v)->counter);
73 /* exported interface */
74 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
75 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
78 * __atomic_add_unless - add unless the number is a given value
79 * @v: pointer of type atomic_t
80 * @a: the amount to add to v...
81 * @u: ...unless v is equal to u.
83 * Atomically adds @a to @v, so long as it was not @u.
84 * Returns the old value of @v.
86 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
91 if (unlikely(c == (u)))
93 old = atomic_cmpxchg((v), c, c + (a));
101 #define ATOMIC_OP(op, c_op) \
102 static __inline__ void atomic_##op(int i, atomic_t *v) \
104 unsigned long flags; \
106 _atomic_spin_lock_irqsave(v, flags); \
108 _atomic_spin_unlock_irqrestore(v, flags); \
111 #define ATOMIC_OP_RETURN(op, c_op) \
112 static __inline__ int atomic_##op##_return(int i, atomic_t *v) \
114 unsigned long flags; \
117 _atomic_spin_lock_irqsave(v, flags); \
118 ret = (v->counter c_op i); \
119 _atomic_spin_unlock_irqrestore(v, flags); \
124 #define ATOMIC_FETCH_OP(op, c_op) \
125 static __inline__ int atomic_fetch_##op(int i, atomic_t *v) \
127 unsigned long flags; \
130 _atomic_spin_lock_irqsave(v, flags); \
133 _atomic_spin_unlock_irqrestore(v, flags); \
138 #define ATOMIC_OPS(op, c_op) \
139 ATOMIC_OP(op, c_op) \
140 ATOMIC_OP_RETURN(op, c_op) \
141 ATOMIC_FETCH_OP(op, c_op)
147 #define ATOMIC_OPS(op, c_op) \
148 ATOMIC_OP(op, c_op) \
149 ATOMIC_FETCH_OP(op, c_op)
156 #undef ATOMIC_FETCH_OP
157 #undef ATOMIC_OP_RETURN
160 #define atomic_inc(v) (atomic_add( 1,(v)))
161 #define atomic_dec(v) (atomic_add( -1,(v)))
163 #define atomic_inc_return(v) (atomic_add_return( 1,(v)))
164 #define atomic_dec_return(v) (atomic_add_return( -1,(v)))
166 #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
169 * atomic_inc_and_test - increment and test
170 * @v: pointer of type atomic_t
172 * Atomically increments @v by 1
173 * and returns true if the result is zero, or false for all
176 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
178 #define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
180 #define atomic_sub_and_test(i,v) (atomic_sub_return((i),(v)) == 0)
182 #define ATOMIC_INIT(i) { (i) }
186 #define ATOMIC64_INIT(i) { (i) }
188 #define ATOMIC64_OP(op, c_op) \
189 static __inline__ void atomic64_##op(s64 i, atomic64_t *v) \
191 unsigned long flags; \
193 _atomic_spin_lock_irqsave(v, flags); \
195 _atomic_spin_unlock_irqrestore(v, flags); \
198 #define ATOMIC64_OP_RETURN(op, c_op) \
199 static __inline__ s64 atomic64_##op##_return(s64 i, atomic64_t *v) \
201 unsigned long flags; \
204 _atomic_spin_lock_irqsave(v, flags); \
205 ret = (v->counter c_op i); \
206 _atomic_spin_unlock_irqrestore(v, flags); \
211 #define ATOMIC64_FETCH_OP(op, c_op) \
212 static __inline__ s64 atomic64_fetch_##op(s64 i, atomic64_t *v) \
214 unsigned long flags; \
217 _atomic_spin_lock_irqsave(v, flags); \
220 _atomic_spin_unlock_irqrestore(v, flags); \
225 #define ATOMIC64_OPS(op, c_op) \
226 ATOMIC64_OP(op, c_op) \
227 ATOMIC64_OP_RETURN(op, c_op) \
228 ATOMIC64_FETCH_OP(op, c_op)
230 ATOMIC64_OPS(add, +=)
231 ATOMIC64_OPS(sub, -=)
234 #define ATOMIC64_OPS(op, c_op) \
235 ATOMIC64_OP(op, c_op) \
236 ATOMIC64_FETCH_OP(op, c_op)
238 ATOMIC64_OPS(and, &=)
240 ATOMIC64_OPS(xor, ^=)
243 #undef ATOMIC64_FETCH_OP
244 #undef ATOMIC64_OP_RETURN
247 static __inline__ void
248 atomic64_set(atomic64_t *v, s64 i)
251 _atomic_spin_lock_irqsave(v, flags);
255 _atomic_spin_unlock_irqrestore(v, flags);
258 static __inline__ s64
259 atomic64_read(const atomic64_t *v)
261 return ACCESS_ONCE((v)->counter);
264 #define atomic64_inc(v) (atomic64_add( 1,(v)))
265 #define atomic64_dec(v) (atomic64_add( -1,(v)))
267 #define atomic64_inc_return(v) (atomic64_add_return( 1,(v)))
268 #define atomic64_dec_return(v) (atomic64_add_return( -1,(v)))
270 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
272 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
273 #define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0)
274 #define atomic64_sub_and_test(i,v) (atomic64_sub_return((i),(v)) == 0)
276 /* exported interface */
277 #define atomic64_cmpxchg(v, o, n) \
278 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
279 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
282 * atomic64_add_unless - add unless the number is a given value
283 * @v: pointer of type atomic64_t
284 * @a: the amount to add to v...
285 * @u: ...unless v is equal to u.
287 * Atomically adds @a to @v, so long as it was not @u.
288 * Returns the old value of @v.
290 static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
293 c = atomic64_read(v);
295 if (unlikely(c == (u)))
297 old = atomic64_cmpxchg((v), c, c + (a));
298 if (likely(old == c))
305 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
308 * atomic64_dec_if_positive - decrement by 1 if old value positive
309 * @v: pointer of type atomic_t
311 * The function returns the old value of *v minus 1, even if
312 * the atomic variable, v, was not decremented.
314 static inline long atomic64_dec_if_positive(atomic64_t *v)
317 c = atomic64_read(v);
320 if (unlikely(dec < 0))
322 old = atomic64_cmpxchg((v), c, dec);
323 if (likely(old == c))
330 #endif /* !CONFIG_64BIT */
333 #endif /* _ASM_PARISC_ATOMIC_H_ */