Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
[linux-2.6] / include / asm-x86 / spinlock.h
1 #ifndef _X86_SPINLOCK_H_
2 #define _X86_SPINLOCK_H_
3
4 #include <asm/atomic.h>
5 #include <asm/rwlock.h>
6 #include <asm/page.h>
7 #include <asm/processor.h>
8 #include <linux/compiler.h>
9
10 /*
11  * Your basic SMP spinlocks, allowing only a single CPU anywhere
12  *
13  * Simple spin lock operations.  There are two variants, one clears IRQ's
14  * on the local processor, one does not.
15  *
16  * These are fair FIFO ticket locks, which are currently limited to 256
17  * CPUs.
18  *
19  * (the type definitions are in asm/spinlock_types.h)
20  */
21
22 #ifdef CONFIG_X86_32
23 typedef char _slock_t;
24 # define LOCK_INS_DEC "decb"
25 # define LOCK_INS_XCH "xchgb"
26 # define LOCK_INS_MOV "movb"
27 # define LOCK_INS_CMP "cmpb"
28 # define LOCK_PTR_REG "a"
29 #else
30 typedef int _slock_t;
31 # define LOCK_INS_DEC "decl"
32 # define LOCK_INS_XCH "xchgl"
33 # define LOCK_INS_MOV "movl"
34 # define LOCK_INS_CMP "cmpl"
35 # define LOCK_PTR_REG "D"
36 #endif
37
38 #if defined(CONFIG_X86_32) && \
39         (defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE))
40 /*
41  * On PPro SMP or if we are using OOSTORE, we use a locked operation to unlock
42  * (PPro errata 66, 92)
43  */
44 # define UNLOCK_LOCK_PREFIX LOCK_PREFIX
45 #else
46 # define UNLOCK_LOCK_PREFIX
47 #endif
48
49 /*
50  * Ticket locks are conceptually two parts, one indicating the current head of
51  * the queue, and the other indicating the current tail. The lock is acquired
52  * by atomically noting the tail and incrementing it by one (thus adding
53  * ourself to the queue and noting our position), then waiting until the head
54  * becomes equal to the the initial value of the tail.
55  *
56  * We use an xadd covering *both* parts of the lock, to increment the tail and
57  * also load the position of the head, which takes care of memory ordering
58  * issues and should be optimal for the uncontended case. Note the tail must be
59  * in the high part, because a wide xadd increment of the low part would carry
60  * up and contaminate the high part.
61  *
62  * With fewer than 2^8 possible CPUs, we can use x86's partial registers to
63  * save some instructions and make the code more elegant. There really isn't
64  * much between them in performance though, especially as locks are out of line.
65  */
66 #if (NR_CPUS < 256)
67 static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
68 {
69         int tmp = *(volatile signed int *)(&(lock)->slock);
70
71         return (((tmp >> 8) & 0xff) != (tmp & 0xff));
72 }
73
74 static inline int __raw_spin_is_contended(raw_spinlock_t *lock)
75 {
76         int tmp = *(volatile signed int *)(&(lock)->slock);
77
78         return (((tmp >> 8) & 0xff) - (tmp & 0xff)) > 1;
79 }
80
81 static __always_inline void __raw_spin_lock(raw_spinlock_t *lock)
82 {
83         short inc = 0x0100;
84
85         asm volatile (
86                 LOCK_PREFIX "xaddw %w0, %1\n"
87                 "1:\t"
88                 "cmpb %h0, %b0\n\t"
89                 "je 2f\n\t"
90                 "rep ; nop\n\t"
91                 "movb %1, %b0\n\t"
92                 /* don't need lfence here, because loads are in-order */
93                 "jmp 1b\n"
94                 "2:"
95                 : "+Q" (inc), "+m" (lock->slock)
96                 :
97                 : "memory", "cc");
98 }
99
100 #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
101
102 static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock)
103 {
104         int tmp;
105         short new;
106
107         asm volatile("movw %2,%w0\n\t"
108                      "cmpb %h0,%b0\n\t"
109                      "jne 1f\n\t"
110                      "movw %w0,%w1\n\t"
111                      "incb %h1\n\t"
112                      "lock ; cmpxchgw %w1,%2\n\t"
113                      "1:"
114                      "sete %b1\n\t"
115                      "movzbl %b1,%0\n\t"
116                      : "=&a" (tmp), "=Q" (new), "+m" (lock->slock)
117                      :
118                      : "memory", "cc");
119
120         return tmp;
121 }
122
123 static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock)
124 {
125         asm volatile(UNLOCK_LOCK_PREFIX "incb %0"
126                      : "+m" (lock->slock)
127                      :
128                      : "memory", "cc");
129 }
130 #else
131 static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
132 {
133         int tmp = *(volatile signed int *)(&(lock)->slock);
134
135         return (((tmp >> 16) & 0xffff) != (tmp & 0xffff));
136 }
137
138 static inline int __raw_spin_is_contended(raw_spinlock_t *lock)
139 {
140         int tmp = *(volatile signed int *)(&(lock)->slock);
141
142         return (((tmp >> 16) & 0xffff) - (tmp & 0xffff)) > 1;
143 }
144
145 static __always_inline void __raw_spin_lock(raw_spinlock_t *lock)
146 {
147         int inc = 0x00010000;
148         int tmp;
149
150         asm volatile("lock ; xaddl %0, %1\n"
151                      "movzwl %w0, %2\n\t"
152                      "shrl $16, %0\n\t"
153                      "1:\t"
154                      "cmpl %0, %2\n\t"
155                      "je 2f\n\t"
156                      "rep ; nop\n\t"
157                      "movzwl %1, %2\n\t"
158                      /* don't need lfence here, because loads are in-order */
159                      "jmp 1b\n"
160                      "2:"
161                      : "+Q" (inc), "+m" (lock->slock), "=r" (tmp)
162                      :
163                      : "memory", "cc");
164 }
165
166 #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
167
168 static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock)
169 {
170         int tmp;
171         int new;
172
173         asm volatile("movl %2,%0\n\t"
174                      "movl %0,%1\n\t"
175                      "roll $16, %0\n\t"
176                      "cmpl %0,%1\n\t"
177                      "jne 1f\n\t"
178                      "addl $0x00010000, %1\n\t"
179                      "lock ; cmpxchgl %1,%2\n\t"
180                      "1:"
181                      "sete %b1\n\t"
182                      "movzbl %b1,%0\n\t"
183                      : "=&a" (tmp), "=r" (new), "+m" (lock->slock)
184                      :
185                      : "memory", "cc");
186
187         return tmp;
188 }
189
190 static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock)
191 {
192         asm volatile(UNLOCK_LOCK_PREFIX "incw %0"
193                      : "+m" (lock->slock)
194                      :
195                      : "memory", "cc");
196 }
197 #endif
198
199 static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
200 {
201         while (__raw_spin_is_locked(lock))
202                 cpu_relax();
203 }
204
205 /*
206  * Read-write spinlocks, allowing multiple readers
207  * but only one writer.
208  *
209  * NOTE! it is quite common to have readers in interrupts
210  * but no interrupt writers. For those circumstances we
211  * can "mix" irq-safe locks - any writer needs to get a
212  * irq-safe write-lock, but readers can get non-irqsafe
213  * read-locks.
214  *
215  * On x86, we implement read-write locks as a 32-bit counter
216  * with the high bit (sign) being the "contended" bit.
217  */
218
219 /**
220  * read_can_lock - would read_trylock() succeed?
221  * @lock: the rwlock in question.
222  */
223 static inline int __raw_read_can_lock(raw_rwlock_t *lock)
224 {
225         return (int)(lock)->lock > 0;
226 }
227
228 /**
229  * write_can_lock - would write_trylock() succeed?
230  * @lock: the rwlock in question.
231  */
232 static inline int __raw_write_can_lock(raw_rwlock_t *lock)
233 {
234         return (lock)->lock == RW_LOCK_BIAS;
235 }
236
237 static inline void __raw_read_lock(raw_rwlock_t *rw)
238 {
239         asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
240                      "jns 1f\n"
241                      "call __read_lock_failed\n\t"
242                      "1:\n"
243                      ::LOCK_PTR_REG (rw) : "memory");
244 }
245
246 static inline void __raw_write_lock(raw_rwlock_t *rw)
247 {
248         asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
249                      "jz 1f\n"
250                      "call __write_lock_failed\n\t"
251                      "1:\n"
252                      ::LOCK_PTR_REG (rw), "i" (RW_LOCK_BIAS) : "memory");
253 }
254
255 static inline int __raw_read_trylock(raw_rwlock_t *lock)
256 {
257         atomic_t *count = (atomic_t *)lock;
258
259         atomic_dec(count);
260         if (atomic_read(count) >= 0)
261                 return 1;
262         atomic_inc(count);
263         return 0;
264 }
265
266 static inline int __raw_write_trylock(raw_rwlock_t *lock)
267 {
268         atomic_t *count = (atomic_t *)lock;
269
270         if (atomic_sub_and_test(RW_LOCK_BIAS, count))
271                 return 1;
272         atomic_add(RW_LOCK_BIAS, count);
273         return 0;
274 }
275
276 static inline void __raw_read_unlock(raw_rwlock_t *rw)
277 {
278         asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
279 }
280
281 static inline void __raw_write_unlock(raw_rwlock_t *rw)
282 {
283         asm volatile(LOCK_PREFIX "addl %1, %0"
284                      : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
285 }
286
287 #define _raw_spin_relax(lock)   cpu_relax()
288 #define _raw_read_relax(lock)   cpu_relax()
289 #define _raw_write_relax(lock)  cpu_relax()
290
291 #endif