2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
14 * Do not include directly; use <linux/atomic.h>.
17 #ifndef _ASM_TILE_ATOMIC_64_H
18 #define _ASM_TILE_ATOMIC_64_H
22 #include <asm/barrier.h>
23 #include <arch/spr_def.h>
25 /* First, the 32-bit atomic ops that are "real" on our 64-bit platform. */
27 #define atomic_set(v, i) WRITE_ONCE((v)->counter, (i))
30 * The smp_mb() operations throughout are to support the fact that
31 * Linux requires memory barriers before and after the operation,
32 * on any routine which updates memory and returns a value.
35 static inline void atomic_add(int i, atomic_t *v)
37 __insn_fetchadd4((void *)&v->counter, i);
41 * Note a subtlety of the locking here. We are required to provide a
42 * full memory barrier before and after the operation. However, we
43 * only provide an explicit mb before the operation. After the
44 * operation, we use barrier() to get a full mb for free, because:
46 * (1) The barrier directive to the compiler prohibits any instructions
47 * being statically hoisted before the barrier;
48 * (2) the microarchitecture will not issue any further instructions
49 * until the fetchadd result is available for the "+ i" add instruction;
50 * (3) the smb_mb before the fetchadd ensures that no other memory
51 * operations are in flight at this point.
53 static inline int atomic_add_return(int i, atomic_t *v)
56 smp_mb(); /* barrier for proper semantics */
57 val = __insn_fetchadd4((void *)&v->counter, i) + i;
58 barrier(); /* equivalent to smp_mb(); see block comment above */
62 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
64 int guess, oldval = v->counter;
69 oldval = cmpxchg(&v->counter, guess, guess + a);
70 } while (guess != oldval);
74 static inline void atomic_and(int i, atomic_t *v)
76 __insn_fetchand4((void *)&v->counter, i);
79 static inline void atomic_or(int i, atomic_t *v)
81 __insn_fetchor4((void *)&v->counter, i);
84 static inline void atomic_xor(int i, atomic_t *v)
86 int guess, oldval = v->counter;
89 __insn_mtspr(SPR_CMPEXCH_VALUE, guess);
90 oldval = __insn_cmpexch4(&v->counter, guess ^ i);
91 } while (guess != oldval);
94 /* Now the true 64-bit operations. */
96 #define ATOMIC64_INIT(i) { (i) }
98 #define atomic64_read(v) READ_ONCE((v)->counter)
99 #define atomic64_set(v, i) WRITE_ONCE((v)->counter, (i))
101 static inline void atomic64_add(long i, atomic64_t *v)
103 __insn_fetchadd((void *)&v->counter, i);
106 static inline long atomic64_add_return(long i, atomic64_t *v)
109 smp_mb(); /* barrier for proper semantics */
110 val = __insn_fetchadd((void *)&v->counter, i) + i;
111 barrier(); /* equivalent to smp_mb; see atomic_add_return() */
115 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
117 long guess, oldval = v->counter;
122 oldval = cmpxchg(&v->counter, guess, guess + a);
123 } while (guess != oldval);
127 static inline void atomic64_and(long i, atomic64_t *v)
129 __insn_fetchand((void *)&v->counter, i);
132 static inline void atomic64_or(long i, atomic64_t *v)
134 __insn_fetchor((void *)&v->counter, i);
137 static inline void atomic64_xor(long i, atomic64_t *v)
139 long guess, oldval = v->counter;
142 __insn_mtspr(SPR_CMPEXCH_VALUE, guess);
143 oldval = __insn_cmpexch(&v->counter, guess ^ i);
144 } while (guess != oldval);
147 #define atomic64_sub_return(i, v) atomic64_add_return(-(i), (v))
148 #define atomic64_sub(i, v) atomic64_add(-(i), (v))
149 #define atomic64_inc_return(v) atomic64_add_return(1, (v))
150 #define atomic64_dec_return(v) atomic64_sub_return(1, (v))
151 #define atomic64_inc(v) atomic64_add(1, (v))
152 #define atomic64_dec(v) atomic64_sub(1, (v))
154 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
155 #define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0)
156 #define atomic64_sub_and_test(i, v) (atomic64_sub_return((i), (v)) == 0)
157 #define atomic64_add_negative(i, v) (atomic64_add_return((i), (v)) < 0)
159 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
161 #endif /* !__ASSEMBLY__ */
163 #endif /* _ASM_TILE_ATOMIC_64_H */