1 #ifndef __ASM_SPINLOCK_H
2 #define __ASM_SPINLOCK_H
4 #include <asm/atomic.h>
5 #include <asm/rwlock.h>
9 * Your basic SMP spinlocks, allowing only a single CPU anywhere
11 * Simple spin lock operations. There are two variants, one clears IRQ's
12 * on the local processor, one does not.
14 * We make no fairness assumptions. They have a cost.
16 * (the type definitions are in asm/spinlock_types.h)
19 #define __raw_spin_is_locked(x) \
20 (*(volatile signed int *)(&(x)->slock) <= 0)
22 #define __raw_spin_lock_string \
24 LOCK_PREFIX " ; decl %0\n\t" \
33 #define __raw_spin_lock_string_up \
36 #define __raw_spin_unlock_string \
38 :"=m" (lock->slock) : : "memory"
40 static inline void __raw_spin_lock(raw_spinlock_t *lock)
42 asm volatile(__raw_spin_lock_string : "=m" (lock->slock) : : "memory");
45 #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
47 static inline int __raw_spin_trylock(raw_spinlock_t *lock)
53 :"=q" (oldval), "=m" (lock->slock)
59 static inline void __raw_spin_unlock(raw_spinlock_t *lock)
62 __raw_spin_unlock_string
66 #define __raw_spin_unlock_wait(lock) \
67 do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
70 * Read-write spinlocks, allowing multiple readers
71 * but only one writer.
73 * NOTE! it is quite common to have readers in interrupts
74 * but no interrupt writers. For those circumstances we
75 * can "mix" irq-safe locks - any writer needs to get a
76 * irq-safe write-lock, but readers can get non-irqsafe
79 * On x86, we implement read-write locks as a 32-bit counter
80 * with the high bit (sign) being the "contended" bit.
83 #define __raw_read_can_lock(x) ((int)(x)->lock > 0)
84 #define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
86 static inline void __raw_read_lock(raw_rwlock_t *rw)
88 __build_read_lock(rw);
91 static inline void __raw_write_lock(raw_rwlock_t *rw)
93 __build_write_lock(rw);
96 static inline int __raw_read_trylock(raw_rwlock_t *lock)
98 atomic_t *count = (atomic_t *)lock;
100 if (atomic_read(count) >= 0)
106 static inline int __raw_write_trylock(raw_rwlock_t *lock)
108 atomic_t *count = (atomic_t *)lock;
109 if (atomic_sub_and_test(RW_LOCK_BIAS, count))
111 atomic_add(RW_LOCK_BIAS, count);
115 static inline void __raw_read_unlock(raw_rwlock_t *rw)
117 asm volatile(LOCK_PREFIX " ; incl %0" :"=m" (rw->lock) : : "memory");
120 static inline void __raw_write_unlock(raw_rwlock_t *rw)
122 asm volatile(LOCK_PREFIX " ; addl $" RW_LOCK_BIAS_STR ",%0"
123 : "=m" (rw->lock) : : "memory");
126 #endif /* __ASM_SPINLOCK_H */