1 #ifndef __ASM_SPINLOCK_H
2 #define __ASM_SPINLOCK_H
4 #include <asm/barrier.h>
6 #include <asm/processor.h>
7 #include <asm/spinlock_types.h>
9 static inline int arch_spin_is_locked(arch_spinlock_t *x)
11 volatile unsigned int *a = __ldcw_align(x);
15 #define arch_spin_lock(lock) arch_spin_lock_flags(lock, 0)
17 static inline void arch_spin_unlock_wait(arch_spinlock_t *x)
19 volatile unsigned int *a = __ldcw_align(x);
21 smp_cond_load_acquire(a, VAL);
24 static inline void arch_spin_lock_flags(arch_spinlock_t *x,
27 volatile unsigned int *a;
31 while (__ldcw(a) == 0)
33 if (flags & PSW_SM_I) {
42 static inline void arch_spin_unlock(arch_spinlock_t *x)
44 volatile unsigned int *a;
51 static inline int arch_spin_trylock(arch_spinlock_t *x)
53 volatile unsigned int *a;
65 * Read-write spinlocks, allowing multiple readers but only one writer.
66 * Linux rwlocks are unfair to writers; they can be starved for an indefinite
67 * time by readers. With care, they can also be taken in interrupt context.
69 * In the PA-RISC implementation, we have a spinlock and a counter.
70 * Readers use the lock to serialise their access to the counter (which
71 * records how many readers currently hold the lock).
72 * Writers hold the spinlock, preventing any readers or other writers from
73 * grabbing the rwlock.
76 /* Note that we have to ensure interrupts are disabled in case we're
77 * interrupted by some other code that wants to grab the same read lock */
78 static __inline__ void arch_read_lock(arch_rwlock_t *rw)
81 local_irq_save(flags);
82 arch_spin_lock_flags(&rw->lock, flags);
84 arch_spin_unlock(&rw->lock);
85 local_irq_restore(flags);
88 /* Note that we have to ensure interrupts are disabled in case we're
89 * interrupted by some other code that wants to grab the same read lock */
90 static __inline__ void arch_read_unlock(arch_rwlock_t *rw)
93 local_irq_save(flags);
94 arch_spin_lock_flags(&rw->lock, flags);
96 arch_spin_unlock(&rw->lock);
97 local_irq_restore(flags);
100 /* Note that we have to ensure interrupts are disabled in case we're
101 * interrupted by some other code that wants to grab the same read lock */
102 static __inline__ int arch_read_trylock(arch_rwlock_t *rw)
106 local_irq_save(flags);
107 if (arch_spin_trylock(&rw->lock)) {
109 arch_spin_unlock(&rw->lock);
110 local_irq_restore(flags);
114 local_irq_restore(flags);
115 /* If write-locked, we fail to acquire the lock */
119 /* Wait until we have a realistic chance at the lock */
120 while (arch_spin_is_locked(&rw->lock) && rw->counter >= 0)
126 /* Note that we have to ensure interrupts are disabled in case we're
127 * interrupted by some other code that wants to read_trylock() this lock */
128 static __inline__ void arch_write_lock(arch_rwlock_t *rw)
132 local_irq_save(flags);
133 arch_spin_lock_flags(&rw->lock, flags);
135 if (rw->counter != 0) {
136 arch_spin_unlock(&rw->lock);
137 local_irq_restore(flags);
139 while (rw->counter != 0)
145 rw->counter = -1; /* mark as write-locked */
147 local_irq_restore(flags);
150 static __inline__ void arch_write_unlock(arch_rwlock_t *rw)
153 arch_spin_unlock(&rw->lock);
156 /* Note that we have to ensure interrupts are disabled in case we're
157 * interrupted by some other code that wants to read_trylock() this lock */
158 static __inline__ int arch_write_trylock(arch_rwlock_t *rw)
163 local_irq_save(flags);
164 if (arch_spin_trylock(&rw->lock)) {
165 if (rw->counter == 0) {
169 /* Read-locked. Oh well. */
170 arch_spin_unlock(&rw->lock);
173 local_irq_restore(flags);
179 * read_can_lock - would read_trylock() succeed?
180 * @lock: the rwlock in question.
182 static __inline__ int arch_read_can_lock(arch_rwlock_t *rw)
184 return rw->counter >= 0;
188 * write_can_lock - would write_trylock() succeed?
189 * @lock: the rwlock in question.
191 static __inline__ int arch_write_can_lock(arch_rwlock_t *rw)
196 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
197 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
199 #endif /* __ASM_SPINLOCK_H */