1 #ifndef __ASM_CMPXCHG_H
2 #define __ASM_CMPXCHG_H
4 #include <linux/bitops.h> /* for LOCK_PREFIX */
7 * Note: if you use set64_bit(), __cmpxchg64(), or their variants, you
8 * you need to test for the feature in boot_cpu_data.
11 #define xchg(ptr, v) \
12 ((__typeof__(*(ptr)))__xchg((unsigned long)(v), (ptr), sizeof(*(ptr))))
17 #define __xg(x) ((struct __xchg_dummy *)(x))
20 * The semantics of XCHGCMP8B are a bit strange, this is why
21 * there is a loop and the loading of %%eax and %%edx has to
22 * be inside. This inlines well in most cases, the cached
23 * cost is around ~38 cycles. (in the future we might want
24 * to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that
25 * might have an implicit FPU-save as a cost, so it's not
26 * clear which path to go.)
28 * cmpxchg8b must be used with the lock prefix here to allow
29 * the instruction to be executed atomically, see page 3-102
30 * of the instruction set reference 24319102.pdf. We need
31 * the reader side to see the coherent 64bit value.
33 static inline void __set_64bit(unsigned long long *ptr,
34 unsigned int low, unsigned int high)
37 "movl (%0), %%eax\n\t"
38 "movl 4(%0), %%edx\n\t"
39 LOCK_PREFIX "cmpxchg8b (%0)\n\t"
45 : "ax", "dx", "memory");
48 static inline void __set_64bit_constant(unsigned long long *ptr,
49 unsigned long long value)
51 __set_64bit(ptr, (unsigned int)value, (unsigned int)(value >> 32));
54 #define ll_low(x) *(((unsigned int *)&(x)) + 0)
55 #define ll_high(x) *(((unsigned int *)&(x)) + 1)
57 static inline void __set_64bit_var(unsigned long long *ptr,
58 unsigned long long value)
60 __set_64bit(ptr, ll_low(value), ll_high(value));
63 #define set_64bit(ptr, value) \
64 (__builtin_constant_p((value)) \
65 ? __set_64bit_constant((ptr), (value)) \
66 : __set_64bit_var((ptr), (value)))
68 #define _set_64bit(ptr, value) \
69 (__builtin_constant_p(value) \
70 ? __set_64bit(ptr, (unsigned int)(value), \
71 (unsigned int)((value) >> 32)) \
72 : __set_64bit(ptr, ll_low((value)), ll_high((value))))
75 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
76 * Note 2: xchg has side effect, so that attribute volatile is necessary,
77 * but generally the primitive is invalid, *ptr is output argument. --ANK
79 static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
84 asm volatile("xchgb %b0,%1"
86 : "m" (*__xg(ptr)), "0" (x)
90 asm volatile("xchgw %w0,%1"
92 : "m" (*__xg(ptr)), "0" (x)
96 asm volatile("xchgl %0,%1"
98 : "m" (*__xg(ptr)), "0" (x)
106 * Atomic compare and exchange. Compare OLD with MEM, if identical,
107 * store NEW in MEM. Return the initial value in MEM. Success is
108 * indicated by comparing RETURN with OLD.
111 #ifdef CONFIG_X86_CMPXCHG
112 #define __HAVE_ARCH_CMPXCHG 1
113 #define cmpxchg(ptr, o, n) \
114 ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
115 (unsigned long)(n), \
117 #define sync_cmpxchg(ptr, o, n) \
118 ((__typeof__(*(ptr)))__sync_cmpxchg((ptr), (unsigned long)(o), \
119 (unsigned long)(n), \
121 #define cmpxchg_local(ptr, o, n) \
122 ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
123 (unsigned long)(n), \
127 #ifdef CONFIG_X86_CMPXCHG64
128 #define cmpxchg64(ptr, o, n) \
129 ((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \
130 (unsigned long long)(n)))
131 #define cmpxchg64_local(ptr, o, n) \
132 ((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o), \
133 (unsigned long long)(n)))
136 static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
137 unsigned long new, int size)
142 asm volatile(LOCK_PREFIX "cmpxchgb %b1,%2"
144 : "q"(new), "m"(*__xg(ptr)), "0"(old)
148 asm volatile(LOCK_PREFIX "cmpxchgw %w1,%2"
150 : "r"(new), "m"(*__xg(ptr)), "0"(old)
154 asm volatile(LOCK_PREFIX "cmpxchgl %1,%2"
156 : "r"(new), "m"(*__xg(ptr)), "0"(old)
164 * Always use locked operations when touching memory shared with a
165 * hypervisor, since the system may be SMP even if the guest kernel
168 static inline unsigned long __sync_cmpxchg(volatile void *ptr,
170 unsigned long new, int size)
175 asm volatile("lock; cmpxchgb %b1,%2"
177 : "q"(new), "m"(*__xg(ptr)), "0"(old)
181 asm volatile("lock; cmpxchgw %w1,%2"
183 : "r"(new), "m"(*__xg(ptr)), "0"(old)
187 asm volatile("lock; cmpxchgl %1,%2"
189 : "r"(new), "m"(*__xg(ptr)), "0"(old)
196 static inline unsigned long __cmpxchg_local(volatile void *ptr,
198 unsigned long new, int size)
203 asm volatile("cmpxchgb %b1,%2"
205 : "q"(new), "m"(*__xg(ptr)), "0"(old)
209 asm volatile("cmpxchgw %w1,%2"
211 : "r"(new), "m"(*__xg(ptr)), "0"(old)
215 asm volatile("cmpxchgl %1,%2"
217 : "r"(new), "m"(*__xg(ptr)), "0"(old)
224 static inline unsigned long long __cmpxchg64(volatile void *ptr,
225 unsigned long long old,
226 unsigned long long new)
228 unsigned long long prev;
229 asm volatile(LOCK_PREFIX "cmpxchg8b %3"
231 : "b"((unsigned long)new),
232 "c"((unsigned long)(new >> 32)),
239 static inline unsigned long long __cmpxchg64_local(volatile void *ptr,
240 unsigned long long old,
241 unsigned long long new)
243 unsigned long long prev;
244 asm volatile("cmpxchg8b %3"
246 : "b"((unsigned long)new),
247 "c"((unsigned long)(new >> 32)),
254 #ifndef CONFIG_X86_CMPXCHG
256 * Building a kernel capable running on 80386. It may be necessary to
257 * simulate the cmpxchg on the 80386 CPU. For that purpose we define
258 * a function for each of the sizes we support.
261 extern unsigned long cmpxchg_386_u8(volatile void *, u8, u8);
262 extern unsigned long cmpxchg_386_u16(volatile void *, u16, u16);
263 extern unsigned long cmpxchg_386_u32(volatile void *, u32, u32);
265 static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old,
266 unsigned long new, int size)
270 return cmpxchg_386_u8(ptr, old, new);
272 return cmpxchg_386_u16(ptr, old, new);
274 return cmpxchg_386_u32(ptr, old, new);
279 #define cmpxchg(ptr, o, n) \
281 __typeof__(*(ptr)) __ret; \
282 if (likely(boot_cpu_data.x86 > 3)) \
283 __ret = (__typeof__(*(ptr)))__cmpxchg((ptr), \
284 (unsigned long)(o), (unsigned long)(n), \
287 __ret = (__typeof__(*(ptr)))cmpxchg_386((ptr), \
288 (unsigned long)(o), (unsigned long)(n), \
292 #define cmpxchg_local(ptr, o, n) \
294 __typeof__(*(ptr)) __ret; \
295 if (likely(boot_cpu_data.x86 > 3)) \
296 __ret = (__typeof__(*(ptr)))__cmpxchg_local((ptr), \
297 (unsigned long)(o), (unsigned long)(n), \
300 __ret = (__typeof__(*(ptr)))cmpxchg_386((ptr), \
301 (unsigned long)(o), (unsigned long)(n), \
307 #ifndef CONFIG_X86_CMPXCHG64
309 * Building a kernel capable running on 80386 and 80486. It may be necessary
310 * to simulate the cmpxchg8b on the 80386 and 80486 CPU.
313 extern unsigned long long cmpxchg_486_u64(volatile void *, u64, u64);
315 #define cmpxchg64(ptr, o, n) \
317 __typeof__(*(ptr)) __ret; \
318 if (likely(boot_cpu_data.x86 > 4)) \
319 __ret = (__typeof__(*(ptr)))__cmpxchg64((ptr), \
320 (unsigned long long)(o), \
321 (unsigned long long)(n)); \
323 __ret = (__typeof__(*(ptr)))cmpxchg_486_u64((ptr), \
324 (unsigned long long)(o), \
325 (unsigned long long)(n)); \
328 #define cmpxchg64_local(ptr, o, n) \
330 __typeof__(*(ptr)) __ret; \
331 if (likely(boot_cpu_data.x86 > 4)) \
332 __ret = (__typeof__(*(ptr)))__cmpxchg64_local((ptr), \
333 (unsigned long long)(o), \
334 (unsigned long long)(n)); \
336 __ret = (__typeof__(*(ptr)))cmpxchg_486_u64((ptr), \
337 (unsigned long long)(o), \
338 (unsigned long long)(n)); \