1 #ifndef __ASM_ARCH_SPINLOCK_H
2 #define __ASM_ARCH_SPINLOCK_H
4 #include <asm/system.h>
6 #define RW_LOCK_BIAS 0x01000000
7 #define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 }
8 #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
10 #define spin_is_locked(x) (*(volatile signed char *)(&(x)->lock) <= 0)
11 #define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x))
13 extern void cris_spin_unlock(void *l, int val);
14 extern void cris_spin_lock(void *l);
15 extern int cris_spin_trylock(void* l);
17 static inline void _raw_spin_unlock(spinlock_t *lock)
19 __asm__ volatile ("move.d %1,%0" \
25 static inline int _raw_spin_trylock(spinlock_t *lock)
27 return cris_spin_trylock((void*)&lock->lock);
30 static inline void _raw_spin_lock(spinlock_t *lock)
32 cris_spin_lock((void*)&lock->lock);
35 static inline void _raw_spin_lock_flags (spinlock_t *lock, unsigned long flags)
41 * Read-write spinlocks, allowing multiple readers
42 * but only one writer.
44 * NOTE! it is quite common to have readers in interrupts
45 * but no interrupt writers. For those circumstances we
46 * can "mix" irq-safe locks - any writer needs to get a
47 * irq-safe write-lock, but readers can get non-irqsafe
54 unsigned int break_lock;
58 #define RW_LOCK_UNLOCKED (rwlock_t) { {1}, 0 }
60 #define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while (0)
63 * read_can_lock - would read_trylock() succeed?
64 * @lock: the rwlock in question.
66 #define read_can_lock(x) ((int)(x)->counter >= 0)
69 * write_can_lock - would write_trylock() succeed?
70 * @lock: the rwlock in question.
72 #define write_can_lock(x) ((x)->counter == 0)
74 #define _raw_read_trylock(lock) generic_raw_read_trylock(lock)
76 /* read_lock, read_unlock are pretty straightforward. Of course it somehow
77 * sucks we end up saving/restoring flags twice for read_lock_irqsave aso. */
79 static __inline__ void _raw_read_lock(rwlock_t *rw)
82 local_irq_save(flags);
83 _raw_spin_lock(&rw->lock);
87 _raw_spin_unlock(&rw->lock);
88 local_irq_restore(flags);
91 static __inline__ void _raw_read_unlock(rwlock_t *rw)
94 local_irq_save(flags);
95 _raw_spin_lock(&rw->lock);
99 _raw_spin_unlock(&rw->lock);
100 local_irq_restore(flags);
103 /* write_lock is less trivial. We optimistically grab the lock and check
104 * if we surprised any readers. If so we release the lock and wait till
105 * they're all gone before trying again
107 * Also note that we don't use the _irqsave / _irqrestore suffixes here.
108 * If we're called with interrupts enabled and we've got readers (or other
109 * writers) in interrupt handlers someone fucked up and we'd dead-lock
110 * sooner or later anyway. prumpf */
112 static __inline__ void _raw_write_lock(rwlock_t *rw)
115 _raw_spin_lock(&rw->lock);
117 if(rw->counter != 0) {
118 /* this basically never happens */
119 _raw_spin_unlock(&rw->lock);
121 while(rw->counter != 0);
126 /* got it. now leave without unlocking */
127 rw->counter = -1; /* remember we are locked */
130 /* write_unlock is absolutely trivial - we don't have to wait for anything */
132 static __inline__ void _raw_write_unlock(rwlock_t *rw)
135 _raw_spin_unlock(&rw->lock);
138 static __inline__ int _raw_write_trylock(rwlock_t *rw)
140 _raw_spin_lock(&rw->lock);
141 if (rw->counter != 0) {
142 /* this basically never happens */
143 _raw_spin_unlock(&rw->lock);
148 /* got it. now leave without unlocking */
149 rw->counter = -1; /* remember we are locked */
153 static __inline__ int is_read_locked(rwlock_t *rw)
155 return rw->counter > 0;
158 static __inline__ int is_write_locked(rwlock_t *rw)
160 return rw->counter < 0;
163 #define _raw_spin_relax(lock) cpu_relax()
164 #define _raw_read_relax(lock) cpu_relax()
165 #define _raw_write_relax(lock) cpu_relax()
167 #endif /* __ASM_ARCH_SPINLOCK_H */