Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef __ASM_SPINLOCK_H |
2 | #define __ASM_SPINLOCK_H | |
3 | ||
4 | #include <asm/atomic.h> | |
5 | #include <asm/rwlock.h> | |
6 | #include <asm/page.h> | |
7 | #include <linux/config.h> | |
8 | ||
9 | extern int printk(const char * fmt, ...) | |
10 | __attribute__ ((format (printf, 1, 2))); | |
11 | ||
12 | /* | |
13 | * Your basic SMP spinlocks, allowing only a single CPU anywhere | |
14 | */ | |
15 | ||
16 | typedef struct { | |
17 | volatile unsigned int lock; | |
18 | #ifdef CONFIG_DEBUG_SPINLOCK | |
19 | unsigned magic; | |
20 | #endif | |
21 | #ifdef CONFIG_PREEMPT | |
22 | unsigned int break_lock; | |
23 | #endif | |
24 | } spinlock_t; | |
25 | ||
26 | #define SPINLOCK_MAGIC 0xdead4ead | |
27 | ||
28 | #ifdef CONFIG_DEBUG_SPINLOCK | |
29 | #define SPINLOCK_MAGIC_INIT , SPINLOCK_MAGIC | |
30 | #else | |
31 | #define SPINLOCK_MAGIC_INIT /* */ | |
32 | #endif | |
33 | ||
34 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 SPINLOCK_MAGIC_INIT } | |
35 | ||
36 | #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0) | |
37 | ||
38 | /* | |
39 | * Simple spin lock operations. There are two variants, one clears IRQ's | |
40 | * on the local processor, one does not. | |
41 | * | |
42 | * We make no fairness assumptions. They have a cost. | |
43 | */ | |
44 | ||
45 | #define spin_is_locked(x) (*(volatile signed char *)(&(x)->lock) <= 0) | |
46 | #define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x)) | |
47 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) | |
48 | ||
49 | #define spin_lock_string \ | |
50 | "\n1:\t" \ | |
51 | "lock ; decb %0\n\t" \ | |
52 | "js 2f\n" \ | |
53 | LOCK_SECTION_START("") \ | |
54 | "2:\t" \ | |
55 | "rep;nop\n\t" \ | |
56 | "cmpb $0,%0\n\t" \ | |
57 | "jle 2b\n\t" \ | |
58 | "jmp 1b\n" \ | |
59 | LOCK_SECTION_END | |
60 | ||
61 | /* | |
62 | * This works. Despite all the confusion. | |
63 | * (except on PPro SMP or if we are using OOSTORE) | |
64 | * (PPro errata 66, 92) | |
65 | */ | |
66 | ||
67 | #if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE) | |
68 | ||
69 | #define spin_unlock_string \ | |
70 | "movb $1,%0" \ | |
71 | :"=m" (lock->lock) : : "memory" | |
72 | ||
73 | ||
74 | static inline void _raw_spin_unlock(spinlock_t *lock) | |
75 | { | |
76 | #ifdef CONFIG_DEBUG_SPINLOCK | |
77 | BUG_ON(lock->magic != SPINLOCK_MAGIC); | |
78 | assert_spin_locked(lock); | |
79 | #endif | |
80 | __asm__ __volatile__( | |
81 | spin_unlock_string | |
82 | ); | |
83 | } | |
84 | ||
85 | #else | |
86 | ||
87 | #define spin_unlock_string \ | |
88 | "xchgb %b0, %1" \ | |
89 | :"=q" (oldval), "=m" (lock->lock) \ | |
90 | :"0" (oldval) : "memory" | |
91 | ||
92 | static inline void _raw_spin_unlock(spinlock_t *lock) | |
93 | { | |
94 | char oldval = 1; | |
95 | #ifdef CONFIG_DEBUG_SPINLOCK | |
96 | BUG_ON(lock->magic != SPINLOCK_MAGIC); | |
97 | assert_spin_locked(lock); | |
98 | #endif | |
99 | __asm__ __volatile__( | |
100 | spin_unlock_string | |
101 | ); | |
102 | } | |
103 | ||
104 | #endif | |
105 | ||
106 | static inline int _raw_spin_trylock(spinlock_t *lock) | |
107 | { | |
108 | char oldval; | |
109 | __asm__ __volatile__( | |
110 | "xchgb %b0,%1" | |
111 | :"=q" (oldval), "=m" (lock->lock) | |
112 | :"0" (0) : "memory"); | |
113 | return oldval > 0; | |
114 | } | |
115 | ||
116 | static inline void _raw_spin_lock(spinlock_t *lock) | |
117 | { | |
118 | #ifdef CONFIG_DEBUG_SPINLOCK | |
119 | if (lock->magic != SPINLOCK_MAGIC) { | |
120 | printk("eip: %p\n", __builtin_return_address(0)); | |
121 | BUG(); | |
122 | } | |
123 | #endif | |
124 | __asm__ __volatile__( | |
125 | spin_lock_string | |
126 | :"=m" (lock->lock) : : "memory"); | |
127 | } | |
128 | ||
129 | ||
130 | /* | |
131 | * Read-write spinlocks, allowing multiple readers | |
132 | * but only one writer. | |
133 | * | |
134 | * NOTE! it is quite common to have readers in interrupts | |
135 | * but no interrupt writers. For those circumstances we | |
136 | * can "mix" irq-safe locks - any writer needs to get a | |
137 | * irq-safe write-lock, but readers can get non-irqsafe | |
138 | * read-locks. | |
139 | */ | |
140 | typedef struct { | |
141 | volatile unsigned int lock; | |
142 | #ifdef CONFIG_DEBUG_SPINLOCK | |
143 | unsigned magic; | |
144 | #endif | |
145 | #ifdef CONFIG_PREEMPT | |
146 | unsigned int break_lock; | |
147 | #endif | |
148 | } rwlock_t; | |
149 | ||
150 | #define RWLOCK_MAGIC 0xdeaf1eed | |
151 | ||
152 | #ifdef CONFIG_DEBUG_SPINLOCK | |
153 | #define RWLOCK_MAGIC_INIT , RWLOCK_MAGIC | |
154 | #else | |
155 | #define RWLOCK_MAGIC_INIT /* */ | |
156 | #endif | |
157 | ||
158 | #define RW_LOCK_UNLOCKED (rwlock_t) { RW_LOCK_BIAS RWLOCK_MAGIC_INIT } | |
159 | ||
160 | #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0) | |
161 | ||
162 | #define read_can_lock(x) ((int)(x)->lock > 0) | |
163 | #define write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) | |
164 | ||
165 | /* | |
166 | * On x86, we implement read-write locks as a 32-bit counter | |
167 | * with the high bit (sign) being the "contended" bit. | |
168 | * | |
169 | * The inline assembly is non-obvious. Think about it. | |
170 | * | |
171 | * Changed to use the same technique as rw semaphores. See | |
172 | * semaphore.h for details. -ben | |
173 | */ | |
174 | /* the spinlock helpers are in arch/i386/kernel/semaphore.c */ | |
175 | ||
176 | static inline void _raw_read_lock(rwlock_t *rw) | |
177 | { | |
178 | #ifdef CONFIG_DEBUG_SPINLOCK | |
179 | BUG_ON(rw->magic != RWLOCK_MAGIC); | |
180 | #endif | |
181 | __build_read_lock(rw, "__read_lock_failed"); | |
182 | } | |
183 | ||
184 | static inline void _raw_write_lock(rwlock_t *rw) | |
185 | { | |
186 | #ifdef CONFIG_DEBUG_SPINLOCK | |
187 | BUG_ON(rw->magic != RWLOCK_MAGIC); | |
188 | #endif | |
189 | __build_write_lock(rw, "__write_lock_failed"); | |
190 | } | |
191 | ||
192 | #define _raw_read_unlock(rw) asm volatile("lock ; incl %0" :"=m" ((rw)->lock) : : "memory") | |
193 | #define _raw_write_unlock(rw) asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0":"=m" ((rw)->lock) : : "memory") | |
194 | ||
195 | static inline int _raw_read_trylock(rwlock_t *lock) | |
196 | { | |
197 | atomic_t *count = (atomic_t *)lock; | |
198 | atomic_dec(count); | |
199 | if (atomic_read(count) >= 0) | |
200 | return 1; | |
201 | atomic_inc(count); | |
202 | return 0; | |
203 | } | |
204 | ||
205 | static inline int _raw_write_trylock(rwlock_t *lock) | |
206 | { | |
207 | atomic_t *count = (atomic_t *)lock; | |
208 | if (atomic_sub_and_test(RW_LOCK_BIAS, count)) | |
209 | return 1; | |
210 | atomic_add(RW_LOCK_BIAS, count); | |
211 | return 0; | |
212 | } | |
213 | ||
214 | #endif /* __ASM_SPINLOCK_H */ |