Merge git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable
[linux-2.6] / arch / x86 / include / asm / cmpxchg_64.h
1 #ifndef _ASM_X86_CMPXCHG_64_H
2 #define _ASM_X86_CMPXCHG_64_H
3
4 #include <asm/alternative.h> /* Provides LOCK_PREFIX */
5
6 #define xchg(ptr, v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v), \
7                                                  (ptr), sizeof(*(ptr))))
8
9 #define __xg(x) ((volatile long *)(x))
10
11 static inline void set_64bit(volatile unsigned long *ptr, unsigned long val)
12 {
13         *ptr = val;
14 }
15
16 #define _set_64bit set_64bit
17
18 /*
19  * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
20  * Note 2: xchg has side effect, so that attribute volatile is necessary,
21  *        but generally the primitive is invalid, *ptr is output argument. --ANK
22  */
23 static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
24                                    int size)
25 {
26         switch (size) {
27         case 1:
28                 asm volatile("xchgb %b0,%1"
29                              : "=q" (x)
30                              : "m" (*__xg(ptr)), "0" (x)
31                              : "memory");
32                 break;
33         case 2:
34                 asm volatile("xchgw %w0,%1"
35                              : "=r" (x)
36                              : "m" (*__xg(ptr)), "0" (x)
37                              : "memory");
38                 break;
39         case 4:
40                 asm volatile("xchgl %k0,%1"
41                              : "=r" (x)
42                              : "m" (*__xg(ptr)), "0" (x)
43                              : "memory");
44                 break;
45         case 8:
46                 asm volatile("xchgq %0,%1"
47                              : "=r" (x)
48                              : "m" (*__xg(ptr)), "0" (x)
49                              : "memory");
50                 break;
51         }
52         return x;
53 }
54
55 /*
56  * Atomic compare and exchange.  Compare OLD with MEM, if identical,
57  * store NEW in MEM.  Return the initial value in MEM.  Success is
58  * indicated by comparing RETURN with OLD.
59  */
60
61 #define __HAVE_ARCH_CMPXCHG 1
62
63 static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
64                                       unsigned long new, int size)
65 {
66         unsigned long prev;
67         switch (size) {
68         case 1:
69                 asm volatile(LOCK_PREFIX "cmpxchgb %b1,%2"
70                              : "=a"(prev)
71                              : "q"(new), "m"(*__xg(ptr)), "0"(old)
72                              : "memory");
73                 return prev;
74         case 2:
75                 asm volatile(LOCK_PREFIX "cmpxchgw %w1,%2"
76                              : "=a"(prev)
77                              : "r"(new), "m"(*__xg(ptr)), "0"(old)
78                              : "memory");
79                 return prev;
80         case 4:
81                 asm volatile(LOCK_PREFIX "cmpxchgl %k1,%2"
82                              : "=a"(prev)
83                              : "r"(new), "m"(*__xg(ptr)), "0"(old)
84                              : "memory");
85                 return prev;
86         case 8:
87                 asm volatile(LOCK_PREFIX "cmpxchgq %1,%2"
88                              : "=a"(prev)
89                              : "r"(new), "m"(*__xg(ptr)), "0"(old)
90                              : "memory");
91                 return prev;
92         }
93         return old;
94 }
95
96 /*
97  * Always use locked operations when touching memory shared with a
98  * hypervisor, since the system may be SMP even if the guest kernel
99  * isn't.
100  */
101 static inline unsigned long __sync_cmpxchg(volatile void *ptr,
102                                            unsigned long old,
103                                            unsigned long new, int size)
104 {
105         unsigned long prev;
106         switch (size) {
107         case 1:
108                 asm volatile("lock; cmpxchgb %b1,%2"
109                              : "=a"(prev)
110                              : "q"(new), "m"(*__xg(ptr)), "0"(old)
111                              : "memory");
112                 return prev;
113         case 2:
114                 asm volatile("lock; cmpxchgw %w1,%2"
115                              : "=a"(prev)
116                              : "r"(new), "m"(*__xg(ptr)), "0"(old)
117                              : "memory");
118                 return prev;
119         case 4:
120                 asm volatile("lock; cmpxchgl %1,%2"
121                              : "=a"(prev)
122                              : "r"(new), "m"(*__xg(ptr)), "0"(old)
123                              : "memory");
124                 return prev;
125         }
126         return old;
127 }
128
129 static inline unsigned long __cmpxchg_local(volatile void *ptr,
130                                             unsigned long old,
131                                             unsigned long new, int size)
132 {
133         unsigned long prev;
134         switch (size) {
135         case 1:
136                 asm volatile("cmpxchgb %b1,%2"
137                              : "=a"(prev)
138                              : "q"(new), "m"(*__xg(ptr)), "0"(old)
139                              : "memory");
140                 return prev;
141         case 2:
142                 asm volatile("cmpxchgw %w1,%2"
143                              : "=a"(prev)
144                              : "r"(new), "m"(*__xg(ptr)), "0"(old)
145                              : "memory");
146                 return prev;
147         case 4:
148                 asm volatile("cmpxchgl %k1,%2"
149                              : "=a"(prev)
150                              : "r"(new), "m"(*__xg(ptr)), "0"(old)
151                              : "memory");
152                 return prev;
153         case 8:
154                 asm volatile("cmpxchgq %1,%2"
155                              : "=a"(prev)
156                              : "r"(new), "m"(*__xg(ptr)), "0"(old)
157                              : "memory");
158                 return prev;
159         }
160         return old;
161 }
162
163 #define cmpxchg(ptr, o, n)                                              \
164         ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o),       \
165                                        (unsigned long)(n), sizeof(*(ptr))))
166 #define cmpxchg64(ptr, o, n)                                            \
167 ({                                                                      \
168         BUILD_BUG_ON(sizeof(*(ptr)) != 8);                              \
169         cmpxchg((ptr), (o), (n));                                       \
170 })
171 #define cmpxchg_local(ptr, o, n)                                        \
172         ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
173                                              (unsigned long)(n),        \
174                                              sizeof(*(ptr))))
175 #define sync_cmpxchg(ptr, o, n)                                         \
176         ((__typeof__(*(ptr)))__sync_cmpxchg((ptr), (unsigned long)(o),  \
177                                             (unsigned long)(n),         \
178                                             sizeof(*(ptr))))
179 #define cmpxchg64_local(ptr, o, n)                                      \
180 ({                                                                      \
181         BUILD_BUG_ON(sizeof(*(ptr)) != 8);                              \
182         cmpxchg_local((ptr), (o), (n));                                 \
183 })
184
185 #endif /* _ASM_X86_CMPXCHG_64_H */