1 #ifndef __ASM_SPINLOCK_H
2 #define __ASM_SPINLOCK_H
4 #if __LINUX_ARM_ARCH__ < 6
5 #error SMP not supported on pre-ARMv6 CPUs
11 * We exclusively read the old value. If it is zero, we may have
12 * won the lock, so we try exclusively storing it. A memory barrier
13 * is required after we get a lock, and before we release it, because
14 * V6 CPUs are assumed to have weakly ordered memory.
20 #define __raw_spin_is_locked(x) ((x)->lock != 0)
21 #define __raw_spin_unlock_wait(lock) \
22 do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
24 #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
26 static inline void __raw_spin_lock(raw_spinlock_t *lock)
33 #ifdef CONFIG_CPU_32v6K
36 " strexeq %0, %2, [%1]\n"
40 : "r" (&lock->lock), "r" (1)
46 static inline int __raw_spin_trylock(raw_spinlock_t *lock)
53 " strexeq %0, %2, [%1]"
55 : "r" (&lock->lock), "r" (1)
66 static inline void __raw_spin_unlock(raw_spinlock_t *lock)
72 #ifdef CONFIG_CPU_32v6K
73 " mcr p15, 0, %1, c7, c10, 4\n" /* DSB */
77 : "r" (&lock->lock), "r" (0)
85 * Write locks are easy - we just set bit 31. When unlocking, we can
86 * just write zero since the lock is exclusively held.
88 #define rwlock_is_locked(x) (*((volatile unsigned int *)(x)) != 0)
90 static inline void __raw_write_lock(raw_rwlock_t *rw)
97 #ifdef CONFIG_CPU_32v6K
100 " strexeq %0, %2, [%1]\n"
104 : "r" (&rw->lock), "r" (0x80000000)
110 static inline int __raw_write_trylock(raw_rwlock_t *rw)
114 __asm__ __volatile__(
115 "1: ldrex %0, [%1]\n"
117 " strexeq %0, %2, [%1]"
119 : "r" (&rw->lock), "r" (0x80000000)
130 static inline void __raw_write_unlock(raw_rwlock_t *rw)
134 __asm__ __volatile__(
136 #ifdef CONFIG_CPU_32v6K
137 " mcr p15, 0, %1, c7, c10, 4\n" /* DSB */
141 : "r" (&rw->lock), "r" (0)
146 * Read locks are a bit more hairy:
147 * - Exclusively load the lock value.
149 * - Store new lock value if positive, and we still own this location.
150 * If the value is negative, we've already failed.
151 * - If we failed to store the value, we want a negative result.
152 * - If we failed, try again.
153 * Unlocking is similarly hairy. We may have multiple read locks
154 * currently active. However, we know we won't have any write
157 static inline void __raw_read_lock(raw_rwlock_t *rw)
159 unsigned long tmp, tmp2;
161 __asm__ __volatile__(
162 "1: ldrex %0, [%2]\n"
164 " strexpl %1, %0, [%2]\n"
165 #ifdef CONFIG_CPU_32v6K
168 " rsbpls %0, %1, #0\n"
170 : "=&r" (tmp), "=&r" (tmp2)
177 static inline void __raw_read_unlock(raw_rwlock_t *rw)
179 unsigned long tmp, tmp2;
183 __asm__ __volatile__(
184 "1: ldrex %0, [%2]\n"
186 " strex %1, %0, [%2]\n"
189 #ifdef CONFIG_CPU_32v6K
191 " mcreq p15, 0, %0, c7, c10, 4\n"
194 : "=&r" (tmp), "=&r" (tmp2)
199 #define __raw_read_trylock(lock) generic__raw_read_trylock(lock)
201 #endif /* __ASM_SPINLOCK_H */