[S390] incorrect placement of include.
[linux-2.6] / include / asm-s390 / atomic.h
1 #ifndef __ARCH_S390_ATOMIC__
2 #define __ARCH_S390_ATOMIC__
3
4 #include <linux/compiler.h>
5
6 /*
7  *  include/asm-s390/atomic.h
8  *
9  *  S390 version
10  *    Copyright (C) 1999-2005 IBM Deutschland Entwicklung GmbH, IBM Corporation
11  *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
12  *               Denis Joseph Barrow,
13  *               Arnd Bergmann (arndb@de.ibm.com)
14  *
15  *  Derived from "include/asm-i386/bitops.h"
16  *    Copyright (C) 1992, Linus Torvalds
17  *
18  */
19
20 /*
21  * Atomic operations that C can't guarantee us.  Useful for
22  * resource counting etc..
23  * S390 uses 'Compare And Swap' for atomicity in SMP enviroment
24  */
25
26 typedef struct {
27         volatile int counter;
28 } __attribute__ ((aligned (4))) atomic_t;
29 #define ATOMIC_INIT(i)  { (i) }
30
31 #ifdef __KERNEL__
32
33 #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
34
35 #define __CS_LOOP(ptr, op_val, op_string) ({                            \
36         typeof(ptr->counter) old_val, new_val;                          \
37         asm volatile(                                                   \
38                 "       l       %0,%2\n"                                \
39                 "0:     lr      %1,%0\n"                                \
40                 op_string "     %1,%3\n"                                \
41                 "       cs      %0,%1,%2\n"                             \
42                 "       jl      0b"                                     \
43                 : "=&d" (old_val), "=&d" (new_val),                     \
44                   "=Q" (((atomic_t *)(ptr))->counter)                   \
45                 : "d" (op_val),  "Q" (((atomic_t *)(ptr))->counter)     \
46                 : "cc", "memory");                                      \
47         new_val;                                                        \
48 })
49
50 #else /* __GNUC__ */
51
52 #define __CS_LOOP(ptr, op_val, op_string) ({                            \
53         typeof(ptr->counter) old_val, new_val;                          \
54         asm volatile(                                                   \
55                 "       l       %0,0(%3)\n"                             \
56                 "0:     lr      %1,%0\n"                                \
57                 op_string "     %1,%4\n"                                \
58                 "       cs      %0,%1,0(%3)\n"                          \
59                 "       jl      0b"                                     \
60                 : "=&d" (old_val), "=&d" (new_val),                     \
61                   "=m" (((atomic_t *)(ptr))->counter)                   \
62                 : "a" (ptr), "d" (op_val),                              \
63                   "m" (((atomic_t *)(ptr))->counter)                    \
64                 : "cc", "memory");                                      \
65         new_val;                                                        \
66 })
67
68 #endif /* __GNUC__ */
69
70 #define atomic_read(v)          ((v)->counter)
71 #define atomic_set(v,i)         (((v)->counter) = (i))
72
73 static __inline__ int atomic_add_return(int i, atomic_t * v)
74 {
75         return __CS_LOOP(v, i, "ar");
76 }
77 #define atomic_add(_i, _v)              atomic_add_return(_i, _v)
78 #define atomic_add_negative(_i, _v)     (atomic_add_return(_i, _v) < 0)
79 #define atomic_inc(_v)                  atomic_add_return(1, _v)
80 #define atomic_inc_return(_v)           atomic_add_return(1, _v)
81 #define atomic_inc_and_test(_v)         (atomic_add_return(1, _v) == 0)
82
83 static __inline__ int atomic_sub_return(int i, atomic_t * v)
84 {
85         return __CS_LOOP(v, i, "sr");
86 }
87 #define atomic_sub(_i, _v)              atomic_sub_return(_i, _v)
88 #define atomic_sub_and_test(_i, _v)     (atomic_sub_return(_i, _v) == 0)
89 #define atomic_dec(_v)                  atomic_sub_return(1, _v)
90 #define atomic_dec_return(_v)           atomic_sub_return(1, _v)
91 #define atomic_dec_and_test(_v)         (atomic_sub_return(1, _v) == 0)
92
93 static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t * v)
94 {
95                __CS_LOOP(v, ~mask, "nr");
96 }
97
98 static __inline__ void atomic_set_mask(unsigned long mask, atomic_t * v)
99 {
100                __CS_LOOP(v, mask, "or");
101 }
102
103 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
104
105 static __inline__ int atomic_cmpxchg(atomic_t *v, int old, int new)
106 {
107 #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
108         asm volatile(
109                 "       cs      %0,%2,%1"
110                 : "+d" (old), "=Q" (v->counter)
111                 : "d" (new), "Q" (v->counter)
112                 : "cc", "memory");
113 #else /* __GNUC__ */
114         asm volatile(
115                 "       cs      %0,%3,0(%2)"
116                 : "+d" (old), "=m" (v->counter)
117                 : "a" (v), "d" (new), "m" (v->counter)
118                 : "cc", "memory");
119 #endif /* __GNUC__ */
120         return old;
121 }
122
123 static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
124 {
125         int c, old;
126         c = atomic_read(v);
127         for (;;) {
128                 if (unlikely(c == u))
129                         break;
130                 old = atomic_cmpxchg(v, c, c + a);
131                 if (likely(old == c))
132                         break;
133                 c = old;
134         }
135         return c != u;
136 }
137
138 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
139
140 #undef __CS_LOOP
141
142 #ifdef __s390x__
143 typedef struct {
144         volatile long long counter;
145 } __attribute__ ((aligned (8))) atomic64_t;
146 #define ATOMIC64_INIT(i)  { (i) }
147
148 #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
149
150 #define __CSG_LOOP(ptr, op_val, op_string) ({                           \
151         typeof(ptr->counter) old_val, new_val;                          \
152         asm volatile(                                                   \
153                 "       lg      %0,%2\n"                                \
154                 "0:     lgr     %1,%0\n"                                \
155                 op_string "     %1,%3\n"                                \
156                 "       csg     %0,%1,%2\n"                             \
157                 "       jl      0b"                                     \
158                 : "=&d" (old_val), "=&d" (new_val),                     \
159                   "=Q" (((atomic_t *)(ptr))->counter)                   \
160                 : "d" (op_val), "Q" (((atomic_t *)(ptr))->counter)      \
161                 : "cc", "memory" );                                     \
162         new_val;                                                        \
163 })
164
165 #else /* __GNUC__ */
166
167 #define __CSG_LOOP(ptr, op_val, op_string) ({                           \
168         typeof(ptr->counter) old_val, new_val;                          \
169         asm volatile(                                                   \
170                 "       lg      %0,0(%3)\n"                             \
171                 "0:     lgr     %1,%0\n"                                \
172                 op_string "     %1,%4\n"                                \
173                 "       csg     %0,%1,0(%3)\n"                          \
174                 "       jl      0b"                                     \
175                 : "=&d" (old_val), "=&d" (new_val),                     \
176                   "=m" (((atomic_t *)(ptr))->counter)                   \
177                 : "a" (ptr), "d" (op_val),                              \
178                   "m" (((atomic_t *)(ptr))->counter)                    \
179                 : "cc", "memory" );                                     \
180         new_val;                                                        \
181 })
182
183 #endif /* __GNUC__ */
184
185 #define atomic64_read(v)          ((v)->counter)
186 #define atomic64_set(v,i)         (((v)->counter) = (i))
187
188 static __inline__ long long atomic64_add_return(long long i, atomic64_t * v)
189 {
190         return __CSG_LOOP(v, i, "agr");
191 }
192 #define atomic64_add(_i, _v)            atomic64_add_return(_i, _v)
193 #define atomic64_add_negative(_i, _v)   (atomic64_add_return(_i, _v) < 0)
194 #define atomic64_inc(_v)                atomic64_add_return(1, _v)
195 #define atomic64_inc_return(_v)         atomic64_add_return(1, _v)
196 #define atomic64_inc_and_test(_v)       (atomic64_add_return(1, _v) == 0)
197
198 static __inline__ long long atomic64_sub_return(long long i, atomic64_t * v)
199 {
200         return __CSG_LOOP(v, i, "sgr");
201 }
202 #define atomic64_sub(_i, _v)            atomic64_sub_return(_i, _v)
203 #define atomic64_sub_and_test(_i, _v)   (atomic64_sub_return(_i, _v) == 0)
204 #define atomic64_dec(_v)                atomic64_sub_return(1, _v)
205 #define atomic64_dec_return(_v)         atomic64_sub_return(1, _v)
206 #define atomic64_dec_and_test(_v)       (atomic64_sub_return(1, _v) == 0)
207
208 static __inline__ void atomic64_clear_mask(unsigned long mask, atomic64_t * v)
209 {
210                __CSG_LOOP(v, ~mask, "ngr");
211 }
212
213 static __inline__ void atomic64_set_mask(unsigned long mask, atomic64_t * v)
214 {
215                __CSG_LOOP(v, mask, "ogr");
216 }
217
218 static __inline__ long long atomic64_cmpxchg(atomic64_t *v,
219                                              long long old, long long new)
220 {
221 #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
222         asm volatile(
223                 "       csg     %0,%2,%1"
224                 : "+d" (old), "=Q" (v->counter)
225                 : "d" (new), "Q" (v->counter)
226                 : "cc", "memory");
227 #else /* __GNUC__ */
228         asm volatile(
229                 "       csg     %0,%3,0(%2)"
230                 : "+d" (old), "=m" (v->counter)
231                 : "a" (v), "d" (new), "m" (v->counter)
232                 : "cc", "memory");
233 #endif /* __GNUC__ */
234         return old;
235 }
236
237 static __inline__ int atomic64_add_unless(atomic64_t *v,
238                                           long long a, long long u)
239 {
240         long long c, old;
241         c = atomic64_read(v);
242         for (;;) {
243                 if (unlikely(c == u))
244                         break;
245                 old = atomic64_cmpxchg(v, c, c + a);
246                 if (likely(old == c))
247                         break;
248                 c = old;
249         }
250         return c != u;
251 }
252
253 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
254
255 #undef __CSG_LOOP
256 #endif
257
258 #define smp_mb__before_atomic_dec()     smp_mb()
259 #define smp_mb__after_atomic_dec()      smp_mb()
260 #define smp_mb__before_atomic_inc()     smp_mb()
261 #define smp_mb__after_atomic_inc()      smp_mb()
262
263 #include <asm-generic/atomic.h>
264 #endif /* __KERNEL__ */
265 #endif /* __ARCH_S390_ATOMIC__  */