2 * arch/arm/include/asm/atomic.h
4 * Copyright (C) 1996 Russell King.
5 * Copyright (C) 2002 Deep Blue Solutions Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #ifndef __ASM_ARM_ATOMIC_H
12 #define __ASM_ARM_ATOMIC_H
14 #include <linux/compiler.h>
15 #include <linux/types.h>
16 #include <asm/system.h>
18 #define ATOMIC_INIT(i) { (i) }
22 #define atomic_read(v) ((v)->counter)
24 #if __LINUX_ARM_ARCH__ >= 6
27 * ARMv6 UP and SMP safe atomic ops. We use load exclusive and
28 * store exclusive to ensure that these are atomic. We may loop
29 * to ensure that the update happens. Writing to 'v->counter'
30 * without using the following operations WILL break the atomic
31 * nature of these ops.
33 static inline void atomic_set(atomic_t *v, int i)
37 __asm__ __volatile__("@ atomic_set\n"
39 " strex %0, %2, [%1]\n"
43 : "r" (&v->counter), "r" (i)
47 static inline void atomic_add(int i, atomic_t *v)
52 __asm__ __volatile__("@ atomic_add\n"
55 " strex %1, %0, [%2]\n"
58 : "=&r" (result), "=&r" (tmp)
59 : "r" (&v->counter), "Ir" (i)
63 static inline int atomic_add_return(int i, atomic_t *v)
70 __asm__ __volatile__("@ atomic_add_return\n"
73 " strex %1, %0, [%2]\n"
76 : "=&r" (result), "=&r" (tmp)
77 : "r" (&v->counter), "Ir" (i)
85 static inline void atomic_sub(int i, atomic_t *v)
90 __asm__ __volatile__("@ atomic_sub\n"
93 " strex %1, %0, [%2]\n"
96 : "=&r" (result), "=&r" (tmp)
97 : "r" (&v->counter), "Ir" (i)
101 static inline int atomic_sub_return(int i, atomic_t *v)
108 __asm__ __volatile__("@ atomic_sub_return\n"
109 "1: ldrex %0, [%2]\n"
111 " strex %1, %0, [%2]\n"
114 : "=&r" (result), "=&r" (tmp)
115 : "r" (&v->counter), "Ir" (i)
123 static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
125 unsigned long oldval, res;
130 __asm__ __volatile__("@ atomic_cmpxchg\n"
134 "strexeq %0, %4, [%2]\n"
135 : "=&r" (res), "=&r" (oldval)
136 : "r" (&ptr->counter), "Ir" (old), "r" (new)
145 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
147 unsigned long tmp, tmp2;
149 __asm__ __volatile__("@ atomic_clear_mask\n"
150 "1: ldrex %0, [%2]\n"
152 " strex %1, %0, [%2]\n"
155 : "=&r" (tmp), "=&r" (tmp2)
156 : "r" (addr), "Ir" (mask)
160 #else /* ARM_ARCH_6 */
162 #include <asm/system.h>
165 #error SMP not supported on pre-ARMv6 CPUs
168 #define atomic_set(v,i) (((v)->counter) = (i))
170 static inline int atomic_add_return(int i, atomic_t *v)
175 raw_local_irq_save(flags);
177 v->counter = val += i;
178 raw_local_irq_restore(flags);
182 #define atomic_add(i, v) (void) atomic_add_return(i, v)
184 static inline int atomic_sub_return(int i, atomic_t *v)
189 raw_local_irq_save(flags);
191 v->counter = val -= i;
192 raw_local_irq_restore(flags);
196 #define atomic_sub(i, v) (void) atomic_sub_return(i, v)
198 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
203 raw_local_irq_save(flags);
205 if (likely(ret == old))
207 raw_local_irq_restore(flags);
212 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
216 raw_local_irq_save(flags);
218 raw_local_irq_restore(flags);
221 #endif /* __LINUX_ARM_ARCH__ */
223 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
225 static inline int atomic_add_unless(atomic_t *v, int a, int u)
230 while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
234 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
236 #define atomic_inc(v) atomic_add(1, v)
237 #define atomic_dec(v) atomic_sub(1, v)
239 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
240 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
241 #define atomic_inc_return(v) (atomic_add_return(1, v))
242 #define atomic_dec_return(v) (atomic_sub_return(1, v))
243 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
245 #define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
247 #define smp_mb__before_atomic_dec() smp_mb()
248 #define smp_mb__after_atomic_dec() smp_mb()
249 #define smp_mb__before_atomic_inc() smp_mb()
250 #define smp_mb__after_atomic_inc() smp_mb()
252 #include <asm-generic/atomic-long.h>