1 #ifndef _ALPHA_ATOMIC_H
2 #define _ALPHA_ATOMIC_H
4 #include <asm/barrier.h>
7 * Atomic operations that C can't guarantee us. Useful for
8 * resource counting etc...
10 * But use these as seldom as possible since they are much slower
11 * than regular operations.
16 * Counter is volatile to make sure gcc doesn't try to be clever
17 * and move things around on us. We need to use _exactly_ the address
18 * the user gave us, not some alias that contains the same information.
20 typedef struct { volatile int counter; } atomic_t;
21 typedef struct { volatile long counter; } atomic64_t;
23 #define ATOMIC_INIT(i) ( (atomic_t) { (i) } )
24 #define ATOMIC64_INIT(i) ( (atomic64_t) { (i) } )
26 #define atomic_read(v) ((v)->counter + 0)
27 #define atomic64_read(v) ((v)->counter + 0)
29 #define atomic_set(v,i) ((v)->counter = (i))
30 #define atomic64_set(v,i) ((v)->counter = (i))
33 * To get proper branch prediction for the main line, we must branch
34 * forward to code at the end of this object's .text section, then
35 * branch back to restart the operation.
38 static __inline__ void atomic_add(int i, atomic_t * v)
49 :"=&r" (temp), "=m" (v->counter)
50 :"Ir" (i), "m" (v->counter));
53 static __inline__ void atomic64_add(long i, atomic64_t * v)
64 :"=&r" (temp), "=m" (v->counter)
65 :"Ir" (i), "m" (v->counter));
68 static __inline__ void atomic_sub(int i, atomic_t * v)
79 :"=&r" (temp), "=m" (v->counter)
80 :"Ir" (i), "m" (v->counter));
83 static __inline__ void atomic64_sub(long i, atomic64_t * v)
94 :"=&r" (temp), "=m" (v->counter)
95 :"Ir" (i), "m" (v->counter));
100 * Same as above, but return the result value
102 static __inline__ long atomic_add_return(int i, atomic_t * v)
106 __asm__ __volatile__(
115 :"=&r" (temp), "=m" (v->counter), "=&r" (result)
116 :"Ir" (i), "m" (v->counter) : "memory");
121 static __inline__ long atomic64_add_return(long i, atomic64_t * v)
125 __asm__ __volatile__(
134 :"=&r" (temp), "=m" (v->counter), "=&r" (result)
135 :"Ir" (i), "m" (v->counter) : "memory");
140 static __inline__ long atomic_sub_return(int i, atomic_t * v)
144 __asm__ __volatile__(
153 :"=&r" (temp), "=m" (v->counter), "=&r" (result)
154 :"Ir" (i), "m" (v->counter) : "memory");
159 static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
163 __asm__ __volatile__(
172 :"=&r" (temp), "=m" (v->counter), "=&r" (result)
173 :"Ir" (i), "m" (v->counter) : "memory");
178 #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
180 #define atomic_add_unless(v, a, u) \
183 c = atomic_read(v); \
184 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
188 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
190 #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
191 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
193 #define atomic_dec_return(v) atomic_sub_return(1,(v))
194 #define atomic64_dec_return(v) atomic64_sub_return(1,(v))
196 #define atomic_inc_return(v) atomic_add_return(1,(v))
197 #define atomic64_inc_return(v) atomic64_add_return(1,(v))
199 #define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
200 #define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
202 #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
203 #define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0)
205 #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
206 #define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
208 #define atomic_inc(v) atomic_add(1,(v))
209 #define atomic64_inc(v) atomic64_add(1,(v))
211 #define atomic_dec(v) atomic_sub(1,(v))
212 #define atomic64_dec(v) atomic64_sub(1,(v))
214 #define smp_mb__before_atomic_dec() smp_mb()
215 #define smp_mb__after_atomic_dec() smp_mb()
216 #define smp_mb__before_atomic_inc() smp_mb()
217 #define smp_mb__after_atomic_inc() smp_mb()
219 #endif /* _ALPHA_ATOMIC_H */