2 * Copyright (C) 2012 ARM Ltd.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 #ifndef __ASM_SPINLOCK_H
17 #define __ASM_SPINLOCK_H
20 #include <asm/spinlock_types.h>
21 #include <asm/processor.h>
24 * Spinlock implementation.
26 * The memory barriers are implicit with the load-acquire and store-release
29 static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
32 arch_spinlock_t lockval;
35 * Ensure prior spin_lock operations to other locks have completed
36 * on this CPU before we test whether "lock" is locked.
44 " eor %w1, %w0, %w0, ror #16\n"
46 /* Serialise against any concurrent lockers */
47 ARM64_LSE_ATOMIC_INSN(
49 " stxr %w1, %w0, %2\n"
55 " eor %w1, %w1, %w0\n")
57 : "=&r" (lockval), "=&r" (tmp), "+Q" (*lock)
62 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
64 static inline void arch_spin_lock(arch_spinlock_t *lock)
67 arch_spinlock_t lockval, newval;
70 /* Atomically increment the next ticket. */
71 ARM64_LSE_ATOMIC_INSN(
73 " prfm pstl1strm, %3\n"
75 " add %w1, %w0, %w5\n"
76 " stxr %w2, %w1, %3\n"
80 " ldadda %w2, %w0, %3\n"
86 /* Did we get the lock? */
87 " eor %w1, %w0, %w0, ror #16\n"
90 * No: spin on the owner. Send a local event to avoid missing an
91 * unlock before the exclusive load.
96 " eor %w1, %w2, %w0, lsr #16\n"
98 /* We got the lock. Critical section starts here. */
100 : "=&r" (lockval), "=&r" (newval), "=&r" (tmp), "+Q" (*lock)
101 : "Q" (lock->owner), "I" (1 << TICKET_SHIFT)
105 static inline int arch_spin_trylock(arch_spinlock_t *lock)
108 arch_spinlock_t lockval;
110 asm volatile(ARM64_LSE_ATOMIC_INSN(
112 " prfm pstl1strm, %2\n"
114 " eor %w1, %w0, %w0, ror #16\n"
116 " add %w0, %w0, %3\n"
117 " stxr %w1, %w0, %2\n"
122 " eor %w1, %w0, %w0, ror #16\n"
124 " add %w1, %w0, %3\n"
125 " casa %w0, %w1, %2\n"
126 " and %w1, %w1, #0xffff\n"
127 " eor %w1, %w1, %w0, lsr #16\n"
129 : "=&r" (lockval), "=&r" (tmp), "+Q" (*lock)
130 : "I" (1 << TICKET_SHIFT)
136 static inline void arch_spin_unlock(arch_spinlock_t *lock)
140 asm volatile(ARM64_LSE_ATOMIC_INSN(
143 " add %w1, %w1, #1\n"
149 : "=Q" (lock->owner), "=&r" (tmp)
154 static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
156 return lock.owner == lock.next;
159 static inline int arch_spin_is_locked(arch_spinlock_t *lock)
161 smp_mb(); /* See arch_spin_unlock_wait */
162 return !arch_spin_value_unlocked(READ_ONCE(*lock));
165 static inline int arch_spin_is_contended(arch_spinlock_t *lock)
167 arch_spinlock_t lockval = READ_ONCE(*lock);
168 return (lockval.next - lockval.owner) > 1;
170 #define arch_spin_is_contended arch_spin_is_contended
173 * Write lock implementation.
175 * Write locks set bit 31. Unlocking, is done by writing 0 since the lock is
178 * The memory barriers are implicit with the load-acquire and store-release
182 static inline void arch_write_lock(arch_rwlock_t *rw)
186 asm volatile(ARM64_LSE_ATOMIC_INSN(
192 " stxr %w0, %w2, %1\n"
197 "2: casa %w0, %w2, %1\n"
204 : "=&r" (tmp), "+Q" (rw->lock)
209 static inline int arch_write_trylock(arch_rwlock_t *rw)
213 asm volatile(ARM64_LSE_ATOMIC_INSN(
217 " stxr %w0, %w2, %1\n"
222 " casa %w0, %w2, %1\n"
225 : "=&r" (tmp), "+Q" (rw->lock)
232 static inline void arch_write_unlock(arch_rwlock_t *rw)
234 asm volatile(ARM64_LSE_ATOMIC_INSN(
236 " swpl wzr, wzr, %0")
237 : "=Q" (rw->lock) :: "memory");
240 /* write_can_lock - would write_trylock() succeed? */
241 #define arch_write_can_lock(x) ((x)->lock == 0)
244 * Read lock implementation.
246 * It exclusively loads the lock value, increments it and stores the new value
247 * back if positive and the CPU still exclusively owns the location. If the
248 * value is negative, the lock is already held.
250 * During unlocking there may be multiple active read locks but no write lock.
252 * The memory barriers are implicit with the load-acquire and store-release
255 * Note that in UNDEFINED cases, such as unlocking a lock twice, the LL/SC
256 * and LSE implementations may exhibit different behaviour (although this
257 * will have no effect on lockdep).
259 static inline void arch_read_lock(arch_rwlock_t *rw)
261 unsigned int tmp, tmp2;
265 ARM64_LSE_ATOMIC_INSN(
269 " add %w0, %w0, #1\n"
270 " tbnz %w0, #31, 1b\n"
271 " stxr %w1, %w0, %2\n"
277 " adds %w1, %w0, #1\n"
278 " tbnz %w1, #31, 1b\n"
279 " casa %w0, %w1, %2\n"
280 " sbc %w0, %w1, %w0\n"
282 : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
287 static inline void arch_read_unlock(arch_rwlock_t *rw)
289 unsigned int tmp, tmp2;
291 asm volatile(ARM64_LSE_ATOMIC_INSN(
294 " sub %w0, %w0, #1\n"
295 " stlxr %w1, %w0, %2\n"
302 : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
307 static inline int arch_read_trylock(arch_rwlock_t *rw)
309 unsigned int tmp, tmp2;
311 asm volatile(ARM64_LSE_ATOMIC_INSN(
315 " add %w0, %w0, #1\n"
316 " tbnz %w0, #31, 2f\n"
317 " stxr %w1, %w0, %2\n"
322 " adds %w1, %w0, #1\n"
323 " tbnz %w1, #31, 1f\n"
324 " casa %w0, %w1, %2\n"
325 " sbc %w1, %w1, %w0\n"
328 : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
335 /* read_can_lock - would read_trylock() succeed? */
336 #define arch_read_can_lock(x) ((x)->lock < 0x80000000)
338 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
339 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
341 #define arch_spin_relax(lock) cpu_relax()
342 #define arch_read_relax(lock) cpu_relax()
343 #define arch_write_relax(lock) cpu_relax()
345 #endif /* __ASM_SPINLOCK_H */