1 /* spinlock.h: 64-bit Sparc spinlock support.
3 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
6 #ifndef __SPARC64_SPINLOCK_H
7 #define __SPARC64_SPINLOCK_H
9 #include <linux/config.h>
10 #include <linux/threads.h> /* For NR_CPUS */
14 /* To get debugging spinlocks which detect and catch
15 * deadlock situations, set CONFIG_DEBUG_SPINLOCK
16 * and rebuild your kernel.
19 /* All of these locking primitives are expected to work properly
20 * even in an RMO memory model, which currently is what the kernel
23 * There is another issue. Because we play games to save cycles
24 * in the non-contention case, we need to be extra careful about
25 * branch targets into the "spinning" code. They live in their
26 * own section, but the newer V9 branches have a shorter range
27 * than the traditional 32-bit sparc branch variants. The rule
28 * is that the branches that go into and out of the spinner sections
29 * must be pre-V9 branches.
32 #ifndef CONFIG_DEBUG_SPINLOCK
35 volatile unsigned char lock;
37 unsigned int break_lock;
40 #define SPIN_LOCK_UNLOCKED (spinlock_t) {0,}
42 #define spin_lock_init(lp) do { *(lp)= SPIN_LOCK_UNLOCKED; } while(0)
43 #define spin_is_locked(lp) ((lp)->lock != 0)
45 #define spin_unlock_wait(lp) \
46 do { membar("#LoadLoad"); \
49 static inline void _raw_spin_lock(spinlock_t *lock)
54 "1: ldstub [%1], %0\n"
55 " membar #StoreLoad | #StoreStore\n"
63 " ba,a,pt %%xcc, 1b\n"
70 static inline int _raw_spin_trylock(spinlock_t *lock)
76 " membar #StoreLoad | #StoreStore"
81 return (result == 0UL);
84 static inline void _raw_spin_unlock(spinlock_t *lock)
87 " membar #StoreStore | #LoadStore\n"
94 static inline void _raw_spin_lock_flags(spinlock_t *lock, unsigned long flags)
96 unsigned long tmp1, tmp2;
99 "1: ldstub [%2], %0\n"
100 " membar #StoreLoad | #StoreStore\n"
104 "2: rdpr %%pil, %1\n"
107 " membar #LoadLoad\n"
113 : "=&r" (tmp1), "=&r" (tmp2)
114 : "r"(lock), "r"(flags)
118 #else /* !(CONFIG_DEBUG_SPINLOCK) */
121 volatile unsigned char lock;
122 unsigned int owner_pc, owner_cpu;
123 #ifdef CONFIG_PREEMPT
124 unsigned int break_lock;
127 #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0, 0, 0xff }
128 #define spin_lock_init(lp) do { *(lp)= SPIN_LOCK_UNLOCKED; } while(0)
129 #define spin_is_locked(__lock) ((__lock)->lock != 0)
130 #define spin_unlock_wait(__lock) \
132 membar("#LoadLoad"); \
133 } while((__lock)->lock)
135 extern void _do_spin_lock (spinlock_t *lock, char *str);
136 extern void _do_spin_unlock (spinlock_t *lock);
137 extern int _do_spin_trylock (spinlock_t *lock);
139 #define _raw_spin_trylock(lp) _do_spin_trylock(lp)
140 #define _raw_spin_lock(lock) _do_spin_lock(lock, "spin_lock")
141 #define _raw_spin_unlock(lock) _do_spin_unlock(lock)
142 #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
144 #endif /* CONFIG_DEBUG_SPINLOCK */
146 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
148 #ifndef CONFIG_DEBUG_SPINLOCK
151 volatile unsigned int lock;
152 #ifdef CONFIG_PREEMPT
153 unsigned int break_lock;
156 #define RW_LOCK_UNLOCKED (rwlock_t) {0,}
157 #define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while(0)
159 static void inline __read_lock(rwlock_t *lock)
161 unsigned long tmp1, tmp2;
163 __asm__ __volatile__ (
167 " cas [%2], %0, %1\n"
169 " membar #StoreLoad | #StoreStore\n"
170 " bne,pn %%icc, 1b\n"
174 " membar #LoadLoad\n"
177 " ba,a,pt %%xcc, 4b\n"
179 : "=&r" (tmp1), "=&r" (tmp2)
184 static void inline __read_unlock(rwlock_t *lock)
186 unsigned long tmp1, tmp2;
188 __asm__ __volatile__(
189 " membar #StoreLoad | #LoadLoad\n"
192 " cas [%2], %0, %1\n"
194 " bne,pn %%xcc, 1b\n"
196 : "=&r" (tmp1), "=&r" (tmp2)
201 static void inline __write_lock(rwlock_t *lock)
203 unsigned long mask, tmp1, tmp2;
207 __asm__ __volatile__(
211 " cas [%2], %0, %1\n"
213 " membar #StoreLoad | #StoreStore\n"
214 " bne,pn %%icc, 1b\n"
218 " membar #LoadLoad\n"
221 " ba,a,pt %%xcc, 4b\n"
223 : "=&r" (tmp1), "=&r" (tmp2)
224 : "r" (lock), "r" (mask)
228 static void inline __write_unlock(rwlock_t *lock)
230 __asm__ __volatile__(
231 " membar #LoadStore | #StoreStore\n"
238 static int inline __write_trylock(rwlock_t *lock)
240 unsigned long mask, tmp1, tmp2, result;
244 __asm__ __volatile__(
249 " cas [%3], %0, %1\n"
251 " membar #StoreLoad | #StoreStore\n"
252 " bne,pn %%icc, 1b\n"
256 : "=&r" (tmp1), "=&r" (tmp2), "=&r" (result)
257 : "r" (lock), "r" (mask)
263 #define _raw_read_lock(p) __read_lock(p)
264 #define _raw_read_unlock(p) __read_unlock(p)
265 #define _raw_write_lock(p) __write_lock(p)
266 #define _raw_write_unlock(p) __write_unlock(p)
267 #define _raw_write_trylock(p) __write_trylock(p)
269 #else /* !(CONFIG_DEBUG_SPINLOCK) */
272 volatile unsigned long lock;
273 unsigned int writer_pc, writer_cpu;
274 unsigned int reader_pc[NR_CPUS];
275 #ifdef CONFIG_PREEMPT
276 unsigned int break_lock;
279 #define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0, 0xff, { } }
280 #define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while(0)
282 extern void _do_read_lock(rwlock_t *rw, char *str);
283 extern void _do_read_unlock(rwlock_t *rw, char *str);
284 extern void _do_write_lock(rwlock_t *rw, char *str);
285 extern void _do_write_unlock(rwlock_t *rw);
286 extern int _do_write_trylock(rwlock_t *rw, char *str);
288 #define _raw_read_lock(lock) \
289 do { unsigned long flags; \
290 local_irq_save(flags); \
291 _do_read_lock(lock, "read_lock"); \
292 local_irq_restore(flags); \
295 #define _raw_read_unlock(lock) \
296 do { unsigned long flags; \
297 local_irq_save(flags); \
298 _do_read_unlock(lock, "read_unlock"); \
299 local_irq_restore(flags); \
302 #define _raw_write_lock(lock) \
303 do { unsigned long flags; \
304 local_irq_save(flags); \
305 _do_write_lock(lock, "write_lock"); \
306 local_irq_restore(flags); \
309 #define _raw_write_unlock(lock) \
310 do { unsigned long flags; \
311 local_irq_save(flags); \
312 _do_write_unlock(lock); \
313 local_irq_restore(flags); \
316 #define _raw_write_trylock(lock) \
317 ({ unsigned long flags; \
319 local_irq_save(flags); \
320 val = _do_write_trylock(lock, "write_trylock"); \
321 local_irq_restore(flags); \
325 #endif /* CONFIG_DEBUG_SPINLOCK */
327 #define _raw_read_trylock(lock) generic_raw_read_trylock(lock)
328 #define read_can_lock(rw) (!((rw)->lock & 0x80000000UL))
329 #define write_can_lock(rw) (!(rw)->lock)
331 #endif /* !(__ASSEMBLY__) */
333 #endif /* !(__SPARC64_SPINLOCK_H) */