Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/sparc-2.6
[linux-2.6] / include / asm-sparc64 / spinlock.h
1 /* spinlock.h: 64-bit Sparc spinlock support.
2  *
3  * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
4  */
5
6 #ifndef __SPARC64_SPINLOCK_H
7 #define __SPARC64_SPINLOCK_H
8
9 #include <linux/config.h>
10 #include <linux/threads.h>      /* For NR_CPUS */
11
12 #ifndef __ASSEMBLY__
13
14 /* To get debugging spinlocks which detect and catch
15  * deadlock situations, set CONFIG_DEBUG_SPINLOCK
16  * and rebuild your kernel.
17  */
18
19 /* All of these locking primitives are expected to work properly
20  * even in an RMO memory model, which currently is what the kernel
21  * runs in.
22  *
23  * There is another issue.  Because we play games to save cycles
24  * in the non-contention case, we need to be extra careful about
25  * branch targets into the "spinning" code.  They live in their
26  * own section, but the newer V9 branches have a shorter range
27  * than the traditional 32-bit sparc branch variants.  The rule
28  * is that the branches that go into and out of the spinner sections
29  * must be pre-V9 branches.
30  */
31
32 #ifndef CONFIG_DEBUG_SPINLOCK
33
34 typedef struct {
35         volatile unsigned char lock;
36 #ifdef CONFIG_PREEMPT
37         unsigned int break_lock;
38 #endif
39 } spinlock_t;
40 #define SPIN_LOCK_UNLOCKED      (spinlock_t) {0,}
41
42 #define spin_lock_init(lp)      do { *(lp)= SPIN_LOCK_UNLOCKED; } while(0)
43 #define spin_is_locked(lp)  ((lp)->lock != 0)
44
45 #define spin_unlock_wait(lp)    \
46 do {    membar("#LoadLoad");    \
47 } while((lp)->lock)
48
49 static inline void _raw_spin_lock(spinlock_t *lock)
50 {
51         unsigned long tmp;
52
53         __asm__ __volatile__(
54 "1:     ldstub          [%1], %0\n"
55 "       membar          #StoreLoad | #StoreStore\n"
56 "       brnz,pn         %0, 2f\n"
57 "        nop\n"
58 "       .subsection     2\n"
59 "2:     ldub            [%1], %0\n"
60 "       membar          #LoadLoad\n"
61 "       brnz,pt         %0, 2b\n"
62 "        nop\n"
63 "       ba,a,pt         %%xcc, 1b\n"
64 "       .previous"
65         : "=&r" (tmp)
66         : "r" (lock)
67         : "memory");
68 }
69
70 static inline int _raw_spin_trylock(spinlock_t *lock)
71 {
72         unsigned long result;
73
74         __asm__ __volatile__(
75 "       ldstub          [%1], %0\n"
76 "       membar          #StoreLoad | #StoreStore"
77         : "=r" (result)
78         : "r" (lock)
79         : "memory");
80
81         return (result == 0UL);
82 }
83
84 static inline void _raw_spin_unlock(spinlock_t *lock)
85 {
86         __asm__ __volatile__(
87 "       membar          #StoreStore | #LoadStore\n"
88 "       stb             %%g0, [%0]"
89         : /* No outputs */
90         : "r" (lock)
91         : "memory");
92 }
93
94 static inline void _raw_spin_lock_flags(spinlock_t *lock, unsigned long flags)
95 {
96         unsigned long tmp1, tmp2;
97
98         __asm__ __volatile__(
99 "1:     ldstub          [%2], %0\n"
100 "       membar          #StoreLoad | #StoreStore\n"
101 "       brnz,pn         %0, 2f\n"
102 "        nop\n"
103 "       .subsection     2\n"
104 "2:     rdpr            %%pil, %1\n"
105 "       wrpr            %3, %%pil\n"
106 "3:     ldub            [%2], %0\n"
107 "       membar          #LoadLoad\n"
108 "       brnz,pt         %0, 3b\n"
109 "        nop\n"
110 "       ba,pt           %%xcc, 1b\n"
111 "        wrpr           %1, %%pil\n"
112 "       .previous"
113         : "=&r" (tmp1), "=&r" (tmp2)
114         : "r"(lock), "r"(flags)
115         : "memory");
116 }
117
118 #else /* !(CONFIG_DEBUG_SPINLOCK) */
119
120 typedef struct {
121         volatile unsigned char lock;
122         unsigned int owner_pc, owner_cpu;
123 #ifdef CONFIG_PREEMPT
124         unsigned int break_lock;
125 #endif
126 } spinlock_t;
127 #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0, 0, 0xff }
128 #define spin_lock_init(lp)      do { *(lp)= SPIN_LOCK_UNLOCKED; } while(0)
129 #define spin_is_locked(__lock)  ((__lock)->lock != 0)
130 #define spin_unlock_wait(__lock)        \
131 do { \
132         membar("#LoadLoad"); \
133 } while((__lock)->lock)
134
135 extern void _do_spin_lock (spinlock_t *lock, char *str);
136 extern void _do_spin_unlock (spinlock_t *lock);
137 extern int _do_spin_trylock (spinlock_t *lock);
138
139 #define _raw_spin_trylock(lp)   _do_spin_trylock(lp)
140 #define _raw_spin_lock(lock)    _do_spin_lock(lock, "spin_lock")
141 #define _raw_spin_unlock(lock)  _do_spin_unlock(lock)
142 #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
143
144 #endif /* CONFIG_DEBUG_SPINLOCK */
145
146 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
147
148 #ifndef CONFIG_DEBUG_SPINLOCK
149
150 typedef struct {
151         volatile unsigned int lock;
152 #ifdef CONFIG_PREEMPT
153         unsigned int break_lock;
154 #endif
155 } rwlock_t;
156 #define RW_LOCK_UNLOCKED        (rwlock_t) {0,}
157 #define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while(0)
158
159 static void inline __read_lock(rwlock_t *lock)
160 {
161         unsigned long tmp1, tmp2;
162
163         __asm__ __volatile__ (
164 "1:     ldsw            [%2], %0\n"
165 "       brlz,pn         %0, 2f\n"
166 "4:      add            %0, 1, %1\n"
167 "       cas             [%2], %0, %1\n"
168 "       cmp             %0, %1\n"
169 "       membar          #StoreLoad | #StoreStore\n"
170 "       bne,pn          %%icc, 1b\n"
171 "        nop\n"
172 "       .subsection     2\n"
173 "2:     ldsw            [%2], %0\n"
174 "       membar          #LoadLoad\n"
175 "       brlz,pt         %0, 2b\n"
176 "        nop\n"
177 "       ba,a,pt         %%xcc, 4b\n"
178 "       .previous"
179         : "=&r" (tmp1), "=&r" (tmp2)
180         : "r" (lock)
181         : "memory");
182 }
183
184 static void inline __read_unlock(rwlock_t *lock)
185 {
186         unsigned long tmp1, tmp2;
187
188         __asm__ __volatile__(
189 "       membar  #StoreLoad | #LoadLoad\n"
190 "1:     lduw    [%2], %0\n"
191 "       sub     %0, 1, %1\n"
192 "       cas     [%2], %0, %1\n"
193 "       cmp     %0, %1\n"
194 "       bne,pn  %%xcc, 1b\n"
195 "        nop"
196         : "=&r" (tmp1), "=&r" (tmp2)
197         : "r" (lock)
198         : "memory");
199 }
200
201 static void inline __write_lock(rwlock_t *lock)
202 {
203         unsigned long mask, tmp1, tmp2;
204
205         mask = 0x80000000UL;
206
207         __asm__ __volatile__(
208 "1:     lduw            [%2], %0\n"
209 "       brnz,pn         %0, 2f\n"
210 "4:      or             %0, %3, %1\n"
211 "       cas             [%2], %0, %1\n"
212 "       cmp             %0, %1\n"
213 "       membar          #StoreLoad | #StoreStore\n"
214 "       bne,pn          %%icc, 1b\n"
215 "        nop\n"
216 "       .subsection     2\n"
217 "2:     lduw            [%2], %0\n"
218 "       membar          #LoadLoad\n"
219 "       brnz,pt         %0, 2b\n"
220 "        nop\n"
221 "       ba,a,pt         %%xcc, 4b\n"
222 "       .previous"
223         : "=&r" (tmp1), "=&r" (tmp2)
224         : "r" (lock), "r" (mask)
225         : "memory");
226 }
227
228 static void inline __write_unlock(rwlock_t *lock)
229 {
230         __asm__ __volatile__(
231 "       membar          #LoadStore | #StoreStore\n"
232 "       stw             %%g0, [%0]"
233         : /* no outputs */
234         : "r" (lock)
235         : "memory");
236 }
237
238 static int inline __write_trylock(rwlock_t *lock)
239 {
240         unsigned long mask, tmp1, tmp2, result;
241
242         mask = 0x80000000UL;
243
244         __asm__ __volatile__(
245 "       mov             0, %2\n"
246 "1:     lduw            [%3], %0\n"
247 "       brnz,pn         %0, 2f\n"
248 "        or             %0, %4, %1\n"
249 "       cas             [%3], %0, %1\n"
250 "       cmp             %0, %1\n"
251 "       membar          #StoreLoad | #StoreStore\n"
252 "       bne,pn          %%icc, 1b\n"
253 "        nop\n"
254 "       mov             1, %2\n"
255 "2:"
256         : "=&r" (tmp1), "=&r" (tmp2), "=&r" (result)
257         : "r" (lock), "r" (mask)
258         : "memory");
259
260         return result;
261 }
262
263 #define _raw_read_lock(p)       __read_lock(p)
264 #define _raw_read_unlock(p)     __read_unlock(p)
265 #define _raw_write_lock(p)      __write_lock(p)
266 #define _raw_write_unlock(p)    __write_unlock(p)
267 #define _raw_write_trylock(p)   __write_trylock(p)
268
269 #else /* !(CONFIG_DEBUG_SPINLOCK) */
270
271 typedef struct {
272         volatile unsigned long lock;
273         unsigned int writer_pc, writer_cpu;
274         unsigned int reader_pc[NR_CPUS];
275 #ifdef CONFIG_PREEMPT
276         unsigned int break_lock;
277 #endif
278 } rwlock_t;
279 #define RW_LOCK_UNLOCKED        (rwlock_t) { 0, 0, 0xff, { } }
280 #define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while(0)
281
282 extern void _do_read_lock(rwlock_t *rw, char *str);
283 extern void _do_read_unlock(rwlock_t *rw, char *str);
284 extern void _do_write_lock(rwlock_t *rw, char *str);
285 extern void _do_write_unlock(rwlock_t *rw);
286 extern int _do_write_trylock(rwlock_t *rw, char *str);
287
288 #define _raw_read_lock(lock) \
289 do {    unsigned long flags; \
290         local_irq_save(flags); \
291         _do_read_lock(lock, "read_lock"); \
292         local_irq_restore(flags); \
293 } while(0)
294
295 #define _raw_read_unlock(lock) \
296 do {    unsigned long flags; \
297         local_irq_save(flags); \
298         _do_read_unlock(lock, "read_unlock"); \
299         local_irq_restore(flags); \
300 } while(0)
301
302 #define _raw_write_lock(lock) \
303 do {    unsigned long flags; \
304         local_irq_save(flags); \
305         _do_write_lock(lock, "write_lock"); \
306         local_irq_restore(flags); \
307 } while(0)
308
309 #define _raw_write_unlock(lock) \
310 do {    unsigned long flags; \
311         local_irq_save(flags); \
312         _do_write_unlock(lock); \
313         local_irq_restore(flags); \
314 } while(0)
315
316 #define _raw_write_trylock(lock) \
317 ({      unsigned long flags; \
318         int val; \
319         local_irq_save(flags); \
320         val = _do_write_trylock(lock, "write_trylock"); \
321         local_irq_restore(flags); \
322         val; \
323 })
324
325 #endif /* CONFIG_DEBUG_SPINLOCK */
326
327 #define _raw_read_trylock(lock) generic_raw_read_trylock(lock)
328 #define read_can_lock(rw)       (!((rw)->lock & 0x80000000UL))
329 #define write_can_lock(rw)      (!(rw)->lock)
330
331 #endif /* !(__ASSEMBLY__) */
332
333 #endif /* !(__SPARC64_SPINLOCK_H) */