1 /* spinlock.h: 64-bit Sparc spinlock support.
3 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
6 #ifndef __SPARC64_SPINLOCK_H
7 #define __SPARC64_SPINLOCK_H
11 /* To get debugging spinlocks which detect and catch
12 * deadlock situations, set CONFIG_DEBUG_SPINLOCK
13 * and rebuild your kernel.
16 /* All of these locking primitives are expected to work properly
17 * even in an RMO memory model, which currently is what the kernel
20 * There is another issue. Because we play games to save cycles
21 * in the non-contention case, we need to be extra careful about
22 * branch targets into the "spinning" code. They live in their
23 * own section, but the newer V9 branches have a shorter range
24 * than the traditional 32-bit sparc branch variants. The rule
25 * is that the branches that go into and out of the spinner sections
26 * must be pre-V9 branches.
29 #define __raw_spin_is_locked(lp) ((lp)->lock != 0)
31 #define __raw_spin_unlock_wait(lp) \
35 static inline void __raw_spin_lock(raw_spinlock_t *lock)
40 "1: ldstub [%1], %0\n"
41 " membar #StoreLoad | #StoreStore\n"
49 " ba,a,pt %%xcc, 1b\n"
56 static inline int __raw_spin_trylock(raw_spinlock_t *lock)
62 " membar #StoreLoad | #StoreStore"
67 return (result == 0UL);
70 static inline void __raw_spin_unlock(raw_spinlock_t *lock)
73 " membar #StoreStore | #LoadStore\n"
80 static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
82 unsigned long tmp1, tmp2;
85 "1: ldstub [%2], %0\n"
86 " membar #StoreLoad | #StoreStore\n"
99 : "=&r" (tmp1), "=&r" (tmp2)
100 : "r"(lock), "r"(flags)
104 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
106 static void inline __read_lock(raw_rwlock_t *lock)
108 unsigned long tmp1, tmp2;
110 __asm__ __volatile__ (
114 " cas [%2], %0, %1\n"
116 " membar #StoreLoad | #StoreStore\n"
117 " bne,pn %%icc, 1b\n"
121 " membar #LoadLoad\n"
124 " ba,a,pt %%xcc, 4b\n"
126 : "=&r" (tmp1), "=&r" (tmp2)
131 static int inline __read_trylock(raw_rwlock_t *lock)
135 __asm__ __volatile__ (
137 " brlz,a,pn %0, 2f\n"
140 " cas [%2], %0, %1\n"
142 " membar #StoreLoad | #StoreStore\n"
143 " bne,pn %%icc, 1b\n"
146 : "=&r" (tmp1), "=&r" (tmp2)
153 static void inline __read_unlock(raw_rwlock_t *lock)
155 unsigned long tmp1, tmp2;
157 __asm__ __volatile__(
158 " membar #StoreLoad | #LoadLoad\n"
161 " cas [%2], %0, %1\n"
163 " bne,pn %%xcc, 1b\n"
165 : "=&r" (tmp1), "=&r" (tmp2)
170 static void inline __write_lock(raw_rwlock_t *lock)
172 unsigned long mask, tmp1, tmp2;
176 __asm__ __volatile__(
180 " cas [%2], %0, %1\n"
182 " membar #StoreLoad | #StoreStore\n"
183 " bne,pn %%icc, 1b\n"
187 " membar #LoadLoad\n"
190 " ba,a,pt %%xcc, 4b\n"
192 : "=&r" (tmp1), "=&r" (tmp2)
193 : "r" (lock), "r" (mask)
197 static void inline __write_unlock(raw_rwlock_t *lock)
199 __asm__ __volatile__(
200 " membar #LoadStore | #StoreStore\n"
207 static int inline __write_trylock(raw_rwlock_t *lock)
209 unsigned long mask, tmp1, tmp2, result;
213 __asm__ __volatile__(
218 " cas [%3], %0, %1\n"
220 " membar #StoreLoad | #StoreStore\n"
221 " bne,pn %%icc, 1b\n"
225 : "=&r" (tmp1), "=&r" (tmp2), "=&r" (result)
226 : "r" (lock), "r" (mask)
232 #define __raw_read_lock(p) __read_lock(p)
233 #define __raw_read_trylock(p) __read_trylock(p)
234 #define __raw_read_unlock(p) __read_unlock(p)
235 #define __raw_write_lock(p) __write_lock(p)
236 #define __raw_write_unlock(p) __write_unlock(p)
237 #define __raw_write_trylock(p) __write_trylock(p)
239 #define __raw_read_can_lock(rw) (!((rw)->lock & 0x80000000UL))
240 #define __raw_write_can_lock(rw) (!(rw)->lock)
242 #define _raw_spin_relax(lock) cpu_relax()
243 #define _raw_read_relax(lock) cpu_relax()
244 #define _raw_write_relax(lock) cpu_relax()
246 #endif /* !(__ASSEMBLY__) */
248 #endif /* !(__SPARC64_SPINLOCK_H) */