[ARM] Support register switch in nommu mode
[linux-2.6] / include / asm-s390 / atomic.h
1 #ifndef __ARCH_S390_ATOMIC__
2 #define __ARCH_S390_ATOMIC__
3
4 /*
5  *  include/asm-s390/atomic.h
6  *
7  *  S390 version
8  *    Copyright (C) 1999-2005 IBM Deutschland Entwicklung GmbH, IBM Corporation
9  *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
10  *               Denis Joseph Barrow,
11  *               Arnd Bergmann (arndb@de.ibm.com)
12  *
13  *  Derived from "include/asm-i386/bitops.h"
14  *    Copyright (C) 1992, Linus Torvalds
15  *
16  */
17
18 /*
19  * Atomic operations that C can't guarantee us.  Useful for
20  * resource counting etc..
21  * S390 uses 'Compare And Swap' for atomicity in SMP enviroment
22  */
23
24 typedef struct {
25         volatile int counter;
26 } __attribute__ ((aligned (4))) atomic_t;
27 #define ATOMIC_INIT(i)  { (i) }
28
29 #ifdef __KERNEL__
30
31 #define __CS_LOOP(ptr, op_val, op_string) ({                            \
32         typeof(ptr->counter) old_val, new_val;                          \
33         __asm__ __volatile__("   l     %0,0(%3)\n"                      \
34                              "0: lr    %1,%0\n"                         \
35                              op_string "  %1,%4\n"                      \
36                              "   cs    %0,%1,0(%3)\n"                   \
37                              "   jl    0b"                              \
38                              : "=&d" (old_val), "=&d" (new_val),        \
39                                "=m" (((atomic_t *)(ptr))->counter)      \
40                              : "a" (ptr), "d" (op_val),                 \
41                                "m" (((atomic_t *)(ptr))->counter)       \
42                              : "cc", "memory" );                        \
43         new_val;                                                        \
44 })
45 #define atomic_read(v)          ((v)->counter)
46 #define atomic_set(v,i)         (((v)->counter) = (i))
47
48 static __inline__ int atomic_add_return(int i, atomic_t * v)
49 {
50         return __CS_LOOP(v, i, "ar");
51 }
52 #define atomic_add(_i, _v)              atomic_add_return(_i, _v)
53 #define atomic_add_negative(_i, _v)     (atomic_add_return(_i, _v) < 0)
54 #define atomic_inc(_v)                  atomic_add_return(1, _v)
55 #define atomic_inc_return(_v)           atomic_add_return(1, _v)
56 #define atomic_inc_and_test(_v)         (atomic_add_return(1, _v) == 0)
57
58 static __inline__ int atomic_sub_return(int i, atomic_t * v)
59 {
60         return __CS_LOOP(v, i, "sr");
61 }
62 #define atomic_sub(_i, _v)              atomic_sub_return(_i, _v)
63 #define atomic_sub_and_test(_i, _v)     (atomic_sub_return(_i, _v) == 0)
64 #define atomic_dec(_v)                  atomic_sub_return(1, _v)
65 #define atomic_dec_return(_v)           atomic_sub_return(1, _v)
66 #define atomic_dec_and_test(_v)         (atomic_sub_return(1, _v) == 0)
67
68 static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t * v)
69 {
70                __CS_LOOP(v, ~mask, "nr");
71 }
72
73 static __inline__ void atomic_set_mask(unsigned long mask, atomic_t * v)
74 {
75                __CS_LOOP(v, mask, "or");
76 }
77
78 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
79
80 static __inline__ int atomic_cmpxchg(atomic_t *v, int old, int new)
81 {
82         __asm__ __volatile__("  cs   %0,%3,0(%2)\n"
83                              : "+d" (old), "=m" (v->counter)
84                              : "a" (v), "d" (new), "m" (v->counter)
85                              : "cc", "memory" );
86         return old;
87 }
88
89 static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
90 {
91         int c, old;
92
93         c = atomic_read(v);
94         while (c != u && (old = atomic_cmpxchg(v, c, c + a)) != c)
95                 c = old;
96         return c != u;
97 }
98
99 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
100
101 #undef __CS_LOOP
102
103 #ifdef __s390x__
104 typedef struct {
105         volatile long long counter;
106 } __attribute__ ((aligned (8))) atomic64_t;
107 #define ATOMIC64_INIT(i)  { (i) }
108
109 #define __CSG_LOOP(ptr, op_val, op_string) ({                           \
110         typeof(ptr->counter) old_val, new_val;                          \
111         __asm__ __volatile__("   lg    %0,0(%3)\n"                      \
112                              "0: lgr   %1,%0\n"                         \
113                              op_string "  %1,%4\n"                      \
114                              "   csg   %0,%1,0(%3)\n"                   \
115                              "   jl    0b"                              \
116                              : "=&d" (old_val), "=&d" (new_val),        \
117                                "=m" (((atomic_t *)(ptr))->counter)      \
118                              : "a" (ptr), "d" (op_val),                 \
119                                "m" (((atomic_t *)(ptr))->counter)       \
120                              : "cc", "memory" );                        \
121         new_val;                                                        \
122 })
123 #define atomic64_read(v)          ((v)->counter)
124 #define atomic64_set(v,i)         (((v)->counter) = (i))
125
126 static __inline__ long long atomic64_add_return(long long i, atomic64_t * v)
127 {
128         return __CSG_LOOP(v, i, "agr");
129 }
130 #define atomic64_add(_i, _v)            atomic64_add_return(_i, _v)
131 #define atomic64_add_negative(_i, _v)   (atomic64_add_return(_i, _v) < 0)
132 #define atomic64_inc(_v)                atomic64_add_return(1, _v)
133 #define atomic64_inc_return(_v)         atomic64_add_return(1, _v)
134 #define atomic64_inc_and_test(_v)       (atomic64_add_return(1, _v) == 0)
135
136 static __inline__ long long atomic64_sub_return(long long i, atomic64_t * v)
137 {
138         return __CSG_LOOP(v, i, "sgr");
139 }
140 #define atomic64_sub(_i, _v)            atomic64_sub_return(_i, _v)
141 #define atomic64_sub_and_test(_i, _v)   (atomic64_sub_return(_i, _v) == 0)
142 #define atomic64_dec(_v)                atomic64_sub_return(1, _v)
143 #define atomic64_dec_return(_v)         atomic64_sub_return(1, _v)
144 #define atomic64_dec_and_test(_v)       (atomic64_sub_return(1, _v) == 0)
145
146 static __inline__ void atomic64_clear_mask(unsigned long mask, atomic64_t * v)
147 {
148                __CSG_LOOP(v, ~mask, "ngr");
149 }
150
151 static __inline__ void atomic64_set_mask(unsigned long mask, atomic64_t * v)
152 {
153                __CSG_LOOP(v, mask, "ogr");
154 }
155
156 static __inline__ long long atomic64_cmpxchg(atomic64_t *v,
157                                              long long old, long long new)
158 {
159         __asm__ __volatile__("  csg  %0,%3,0(%2)\n"
160                              : "+d" (old), "=m" (v->counter)
161                              : "a" (v), "d" (new), "m" (v->counter)
162                              : "cc", "memory" );
163         return old;
164 }
165
166 static __inline__ int atomic64_add_unless(atomic64_t *v,
167                                           long long a, long long u)
168 {
169         long long c, old;
170
171         c = atomic64_read(v);
172         while (c != u && (old = atomic64_cmpxchg(v, c, c + a)) != c)
173                 c = old;
174         return c != u;
175 }
176
177 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
178
179 #undef __CSG_LOOP
180 #endif
181
182 #define smp_mb__before_atomic_dec()     smp_mb()
183 #define smp_mb__after_atomic_dec()      smp_mb()
184 #define smp_mb__before_atomic_inc()     smp_mb()
185 #define smp_mb__after_atomic_inc()      smp_mb()
186
187 #include <asm-generic/atomic.h>
188 #endif /* __KERNEL__ */
189 #endif /* __ARCH_S390_ATOMIC__  */