Merge with /pub/scm/linux/kernel/git/torvalds/linux-2.6.git
[linux-2.6] / include / asm-parisc / spinlock.h
1 #ifndef __ASM_SPINLOCK_H
2 #define __ASM_SPINLOCK_H
3
4 #include <asm/system.h>
5
6 /* Note that PA-RISC has to use `1' to mean unlocked and `0' to mean locked
7  * since it only has load-and-zero. Moreover, at least on some PA processors,
8  * the semaphore address has to be 16-byte aligned.
9  */
10
11 #ifndef CONFIG_DEBUG_SPINLOCK
12
13 #define __SPIN_LOCK_UNLOCKED    { { 1, 1, 1, 1 } }
14 #undef SPIN_LOCK_UNLOCKED
15 #define SPIN_LOCK_UNLOCKED (spinlock_t) __SPIN_LOCK_UNLOCKED
16
17 #define spin_lock_init(x)       do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
18
19 static inline int spin_is_locked(spinlock_t *x)
20 {
21         volatile unsigned int *a = __ldcw_align(x);
22         return *a == 0;
23 }
24
25 #define spin_unlock_wait(x)     do { barrier(); } while(spin_is_locked(x))
26 #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
27
28 static inline void _raw_spin_lock(spinlock_t *x)
29 {
30         volatile unsigned int *a;
31
32         mb();
33         a = __ldcw_align(x);
34         while (__ldcw(a) == 0)
35                 while (*a == 0);
36         mb();
37 }
38
39 static inline void _raw_spin_unlock(spinlock_t *x)
40 {
41         volatile unsigned int *a;
42         mb();
43         a = __ldcw_align(x);
44         *a = 1;
45         mb();
46 }
47
48 static inline int _raw_spin_trylock(spinlock_t *x)
49 {
50         volatile unsigned int *a;
51         int ret;
52
53         mb();
54         a = __ldcw_align(x);
55         ret = __ldcw(a) != 0;
56         mb();
57
58         return ret;
59 }
60         
61 #define spin_lock_own(LOCK, LOCATION)   ((void)0)
62
63 #else /* !(CONFIG_DEBUG_SPINLOCK) */
64
65 #define SPINLOCK_MAGIC  0x1D244B3C
66
67 #define __SPIN_LOCK_UNLOCKED    { { 1, 1, 1, 1 }, SPINLOCK_MAGIC, 10, __FILE__ , NULL, 0, -1, NULL, NULL }
68 #undef SPIN_LOCK_UNLOCKED
69 #define SPIN_LOCK_UNLOCKED (spinlock_t) __SPIN_LOCK_UNLOCKED
70
71 #define spin_lock_init(x)       do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
72
73 #define CHECK_LOCK(x)                                                   \
74         do {                                                            \
75                 if (unlikely((x)->magic != SPINLOCK_MAGIC)) {                   \
76                         printk(KERN_ERR "%s:%d: spin_is_locked"         \
77                         " on uninitialized spinlock %p.\n",             \
78                                 __FILE__, __LINE__, (x));               \
79                 }                                                       \
80         } while(0)
81
82 #define spin_is_locked(x)                                               \
83         ({                                                              \
84                 CHECK_LOCK(x);                                          \
85                 volatile unsigned int *a = __ldcw_align(x);             \
86                 if (unlikely((*a == 0) && (x)->babble)) {                               \
87                         (x)->babble--;                                  \
88                         printk("KERN_WARNING                            \
89                                 %s:%d: spin_is_locked(%s/%p) already"   \
90                                 " locked by %s:%d in %s at %p(%d)\n",   \
91                                 __FILE__,__LINE__, (x)->module, (x),    \
92                                 (x)->bfile, (x)->bline, (x)->task->comm,\
93                                 (x)->previous, (x)->oncpu);             \
94                 }                                                       \
95                 *a == 0;                                                \
96         })
97
98 #define spin_unlock_wait(x)                                             \
99         do {                                                            \
100                 CHECK_LOCK(x);                                          \
101                 volatile unsigned int *a = __ldcw_align(x);             \
102                 if (unlikely((*a == 0) && (x)->babble)) {                               \
103                         (x)->babble--;                                  \
104                         printk("KERN_WARNING                            \
105                                 %s:%d: spin_unlock_wait(%s/%p)"         \
106                                 " owned by %s:%d in %s at %p(%d)\n",    \
107                                 __FILE__,__LINE__, (x)->module, (x),    \
108                                 (x)->bfile, (x)->bline, (x)->task->comm,\
109                                 (x)->previous, (x)->oncpu);             \
110                 }                                                       \
111                 barrier();                                              \
112         } while (*((volatile unsigned char *)(__ldcw_align(x))) == 0)
113
114 extern void _dbg_spin_lock(spinlock_t *lock, const char *base_file, int line_no);
115 extern void _dbg_spin_unlock(spinlock_t *lock, const char *, int);
116 extern int _dbg_spin_trylock(spinlock_t * lock, const char *, int);
117
118 #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
119
120 #define _raw_spin_unlock(lock)  _dbg_spin_unlock(lock, __FILE__, __LINE__)
121 #define _raw_spin_lock(lock) _dbg_spin_lock(lock, __FILE__, __LINE__)
122 #define _raw_spin_trylock(lock) _dbg_spin_trylock(lock, __FILE__, __LINE__)
123
124 /* just in case we need it */
125 #define spin_lock_own(LOCK, LOCATION)                                   \
126 do {                                                                    \
127         volatile unsigned int *a = __ldcw_align(LOCK);                  \
128         if (!((*a == 0) && ((LOCK)->oncpu == smp_processor_id())))      \
129                 printk("KERN_WARNING                                    \
130                         %s: called on %d from %p but lock %s on %d\n",  \
131                         LOCATION, smp_processor_id(),                   \
132                         __builtin_return_address(0),                    \
133                         (*a == 0) ? "taken" : "freed", (LOCK)->on_cpu); \
134 } while (0)
135
136 #endif /* !(CONFIG_DEBUG_SPINLOCK) */
137
138 /*
139  * Read-write spinlocks, allowing multiple readers
140  * but only one writer.
141  */
142 typedef struct {
143         spinlock_t lock;
144         volatile int counter;
145 #ifdef CONFIG_PREEMPT
146         unsigned int break_lock;
147 #endif
148 } rwlock_t;
149
150 #define RW_LOCK_UNLOCKED (rwlock_t) { __SPIN_LOCK_UNLOCKED, 0 }
151
152 #define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while (0)
153
154 #define _raw_read_trylock(lock) generic_raw_read_trylock(lock)
155
156 /* read_lock, read_unlock are pretty straightforward.  Of course it somehow
157  * sucks we end up saving/restoring flags twice for read_lock_irqsave aso. */
158
159 #ifdef CONFIG_DEBUG_RWLOCK
160 extern void _dbg_read_lock(rwlock_t * rw, const char *bfile, int bline);
161 #define _raw_read_lock(rw) _dbg_read_lock(rw, __FILE__, __LINE__)
162 #else
163 static  __inline__ void _raw_read_lock(rwlock_t *rw)
164 {
165         unsigned long flags;
166         local_irq_save(flags);
167         _raw_spin_lock(&rw->lock); 
168
169         rw->counter++;
170
171         _raw_spin_unlock(&rw->lock);
172         local_irq_restore(flags);
173 }
174 #endif  /* CONFIG_DEBUG_RWLOCK */
175
176 static  __inline__ void _raw_read_unlock(rwlock_t *rw)
177 {
178         unsigned long flags;
179         local_irq_save(flags);
180         _raw_spin_lock(&rw->lock); 
181
182         rw->counter--;
183
184         _raw_spin_unlock(&rw->lock);
185         local_irq_restore(flags);
186 }
187
188 /* write_lock is less trivial.  We optimistically grab the lock and check
189  * if we surprised any readers.  If so we release the lock and wait till
190  * they're all gone before trying again
191  *
192  * Also note that we don't use the _irqsave / _irqrestore suffixes here.
193  * If we're called with interrupts enabled and we've got readers (or other
194  * writers) in interrupt handlers someone fucked up and we'd dead-lock
195  * sooner or later anyway.   prumpf */
196
197 #ifdef CONFIG_DEBUG_RWLOCK
198 extern void _dbg_write_lock(rwlock_t * rw, const char *bfile, int bline);
199 #define _raw_write_lock(rw) _dbg_write_lock(rw, __FILE__, __LINE__)
200 #else
201 static  __inline__ void _raw_write_lock(rwlock_t *rw)
202 {
203 retry:
204         _raw_spin_lock(&rw->lock);
205
206         if(rw->counter != 0) {
207                 /* this basically never happens */
208                 _raw_spin_unlock(&rw->lock);
209
210                 while(rw->counter != 0);
211
212                 goto retry;
213         }
214
215         /* got it.  now leave without unlocking */
216         rw->counter = -1; /* remember we are locked */
217 }
218 #endif /* CONFIG_DEBUG_RWLOCK */
219
220 /* write_unlock is absolutely trivial - we don't have to wait for anything */
221
222 static  __inline__ void _raw_write_unlock(rwlock_t *rw)
223 {
224         rw->counter = 0;
225         _raw_spin_unlock(&rw->lock);
226 }
227
228 #ifdef CONFIG_DEBUG_RWLOCK
229 extern int _dbg_write_trylock(rwlock_t * rw, const char *bfile, int bline);
230 #define _raw_write_trylock(rw) _dbg_write_trylock(rw, __FILE__, __LINE__)
231 #else
232 static  __inline__ int _raw_write_trylock(rwlock_t *rw)
233 {
234         _raw_spin_lock(&rw->lock);
235         if (rw->counter != 0) {
236                 /* this basically never happens */
237                 _raw_spin_unlock(&rw->lock);
238
239                 return 0;
240         }
241
242         /* got it.  now leave without unlocking */
243         rw->counter = -1; /* remember we are locked */
244         return 1;
245 }
246 #endif /* CONFIG_DEBUG_RWLOCK */
247
248 static __inline__ int is_read_locked(rwlock_t *rw)
249 {
250         return rw->counter > 0;
251 }
252
253 static __inline__ int is_write_locked(rwlock_t *rw)
254 {
255         return rw->counter < 0;
256 }
257
258 #endif /* __ASM_SPINLOCK_H */