1 #ifndef _ASM_POWERPC_ATOMIC_H_
2 #define _ASM_POWERPC_ATOMIC_H_
5 * PowerPC atomic operations
8 #include <linux/types.h>
11 #include <linux/compiler.h>
12 #include <asm/synch.h>
13 #include <asm/asm-compat.h>
14 #include <asm/system.h>
16 #define ATOMIC_INIT(i) { (i) }
18 static __inline__ int atomic_read(const atomic_t *v)
22 __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
27 static __inline__ void atomic_set(atomic_t *v, int i)
29 __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
32 static __inline__ void atomic_add(int a, atomic_t *v)
37 "1: lwarx %0,0,%3 # atomic_add\n\
42 : "=&r" (t), "+m" (v->counter)
43 : "r" (a), "r" (&v->counter)
47 static __inline__ int atomic_add_return(int a, atomic_t *v)
53 "1: lwarx %0,0,%2 # atomic_add_return\n\
60 : "r" (a), "r" (&v->counter)
66 #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
68 static __inline__ void atomic_sub(int a, atomic_t *v)
73 "1: lwarx %0,0,%3 # atomic_sub\n\
78 : "=&r" (t), "+m" (v->counter)
79 : "r" (a), "r" (&v->counter)
83 static __inline__ int atomic_sub_return(int a, atomic_t *v)
89 "1: lwarx %0,0,%2 # atomic_sub_return\n\
96 : "r" (a), "r" (&v->counter)
102 static __inline__ void atomic_inc(atomic_t *v)
106 __asm__ __volatile__(
107 "1: lwarx %0,0,%2 # atomic_inc\n\
112 : "=&r" (t), "+m" (v->counter)
117 static __inline__ int atomic_inc_return(atomic_t *v)
121 __asm__ __volatile__(
123 "1: lwarx %0,0,%1 # atomic_inc_return\n\
131 : "cc", "xer", "memory");
137 * atomic_inc_and_test - increment and test
138 * @v: pointer of type atomic_t
140 * Atomically increments @v by 1
141 * and returns true if the result is zero, or false for all
144 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
146 static __inline__ void atomic_dec(atomic_t *v)
150 __asm__ __volatile__(
151 "1: lwarx %0,0,%2 # atomic_dec\n\
156 : "=&r" (t), "+m" (v->counter)
161 static __inline__ int atomic_dec_return(atomic_t *v)
165 __asm__ __volatile__(
167 "1: lwarx %0,0,%1 # atomic_dec_return\n\
175 : "cc", "xer", "memory");
180 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
181 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
184 * atomic_add_unless - add unless the number is a given value
185 * @v: pointer of type atomic_t
186 * @a: the amount to add to v...
187 * @u: ...unless v is equal to u.
189 * Atomically adds @a to @v, so long as it was not @u.
190 * Returns non-zero if @v was not @u, and zero otherwise.
192 static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
196 __asm__ __volatile__ (
198 "1: lwarx %0,0,%1 # atomic_add_unless\n\
209 : "r" (&v->counter), "r" (a), "r" (u)
215 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
217 #define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0)
218 #define atomic_dec_and_test(v) (atomic_dec_return((v)) == 0)
221 * Atomically test *v and decrement if it is greater than 0.
222 * The function returns the old value of *v minus 1, even if
223 * the atomic variable, v, was not decremented.
225 static __inline__ int atomic_dec_if_positive(atomic_t *v)
229 __asm__ __volatile__(
231 "1: lwarx %0,0,%1 # atomic_dec_if_positive\n\
247 #define smp_mb__before_atomic_dec() smp_mb()
248 #define smp_mb__after_atomic_dec() smp_mb()
249 #define smp_mb__before_atomic_inc() smp_mb()
250 #define smp_mb__after_atomic_inc() smp_mb()
254 #define ATOMIC64_INIT(i) { (i) }
256 static __inline__ long atomic64_read(const atomic64_t *v)
260 __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
265 static __inline__ void atomic64_set(atomic64_t *v, long i)
267 __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
270 static __inline__ void atomic64_add(long a, atomic64_t *v)
274 __asm__ __volatile__(
275 "1: ldarx %0,0,%3 # atomic64_add\n\
279 : "=&r" (t), "+m" (v->counter)
280 : "r" (a), "r" (&v->counter)
284 static __inline__ long atomic64_add_return(long a, atomic64_t *v)
288 __asm__ __volatile__(
290 "1: ldarx %0,0,%2 # atomic64_add_return\n\
296 : "r" (a), "r" (&v->counter)
302 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
304 static __inline__ void atomic64_sub(long a, atomic64_t *v)
308 __asm__ __volatile__(
309 "1: ldarx %0,0,%3 # atomic64_sub\n\
313 : "=&r" (t), "+m" (v->counter)
314 : "r" (a), "r" (&v->counter)
318 static __inline__ long atomic64_sub_return(long a, atomic64_t *v)
322 __asm__ __volatile__(
324 "1: ldarx %0,0,%2 # atomic64_sub_return\n\
330 : "r" (a), "r" (&v->counter)
336 static __inline__ void atomic64_inc(atomic64_t *v)
340 __asm__ __volatile__(
341 "1: ldarx %0,0,%2 # atomic64_inc\n\
345 : "=&r" (t), "+m" (v->counter)
350 static __inline__ long atomic64_inc_return(atomic64_t *v)
354 __asm__ __volatile__(
356 "1: ldarx %0,0,%1 # atomic64_inc_return\n\
363 : "cc", "xer", "memory");
369 * atomic64_inc_and_test - increment and test
370 * @v: pointer of type atomic64_t
372 * Atomically increments @v by 1
373 * and returns true if the result is zero, or false for all
376 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
378 static __inline__ void atomic64_dec(atomic64_t *v)
382 __asm__ __volatile__(
383 "1: ldarx %0,0,%2 # atomic64_dec\n\
387 : "=&r" (t), "+m" (v->counter)
392 static __inline__ long atomic64_dec_return(atomic64_t *v)
396 __asm__ __volatile__(
398 "1: ldarx %0,0,%1 # atomic64_dec_return\n\
405 : "cc", "xer", "memory");
410 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
411 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
414 * Atomically test *v and decrement if it is greater than 0.
415 * The function returns the old value of *v minus 1.
417 static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
421 __asm__ __volatile__(
423 "1: ldarx %0,0,%1 # atomic64_dec_if_positive\n\
432 : "cc", "xer", "memory");
437 #define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
438 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
441 * atomic64_add_unless - add unless the number is a given value
442 * @v: pointer of type atomic64_t
443 * @a: the amount to add to v...
444 * @u: ...unless v is equal to u.
446 * Atomically adds @a to @v, so long as it was not @u.
447 * Returns non-zero if @v was not @u, and zero otherwise.
449 static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
453 __asm__ __volatile__ (
455 "1: ldarx %0,0,%1 # atomic_add_unless\n\
465 : "r" (&v->counter), "r" (a), "r" (u)
471 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
473 #else /* __powerpc64__ */
474 #include <asm-generic/atomic64.h>
476 #endif /* __powerpc64__ */
478 #include <asm-generic/atomic-long.h>
479 #endif /* __KERNEL__ */
480 #endif /* _ASM_POWERPC_ATOMIC_H_ */