Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
4baa9922 | 2 | * arch/arm/include/asm/atomic.h |
1da177e4 LT |
3 | * |
4 | * Copyright (C) 1996 Russell King. | |
5 | * Copyright (C) 2002 Deep Blue Solutions Ltd. | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | */ | |
11 | #ifndef __ASM_ARM_ATOMIC_H | |
12 | #define __ASM_ARM_ATOMIC_H | |
13 | ||
8dc39b88 | 14 | #include <linux/compiler.h> |
2856f5e3 | 15 | #include <asm/system.h> |
1da177e4 LT |
16 | |
17 | typedef struct { volatile int counter; } atomic_t; | |
18 | ||
19 | #define ATOMIC_INIT(i) { (i) } | |
20 | ||
21 | #ifdef __KERNEL__ | |
22 | ||
23 | #define atomic_read(v) ((v)->counter) | |
24 | ||
25 | #if __LINUX_ARM_ARCH__ >= 6 | |
26 | ||
27 | /* | |
28 | * ARMv6 UP and SMP safe atomic ops. We use load exclusive and | |
29 | * store exclusive to ensure that these are atomic. We may loop | |
30 | * to ensure that the update happens. Writing to 'v->counter' | |
31 | * without using the following operations WILL break the atomic | |
32 | * nature of these ops. | |
33 | */ | |
34 | static inline void atomic_set(atomic_t *v, int i) | |
35 | { | |
36 | unsigned long tmp; | |
37 | ||
38 | __asm__ __volatile__("@ atomic_set\n" | |
39 | "1: ldrex %0, [%1]\n" | |
40 | " strex %0, %2, [%1]\n" | |
41 | " teq %0, #0\n" | |
42 | " bne 1b" | |
43 | : "=&r" (tmp) | |
44 | : "r" (&v->counter), "r" (i) | |
45 | : "cc"); | |
46 | } | |
47 | ||
48 | static inline int atomic_add_return(int i, atomic_t *v) | |
49 | { | |
50 | unsigned long tmp; | |
51 | int result; | |
52 | ||
53 | __asm__ __volatile__("@ atomic_add_return\n" | |
54 | "1: ldrex %0, [%2]\n" | |
55 | " add %0, %0, %3\n" | |
56 | " strex %1, %0, [%2]\n" | |
57 | " teq %1, #0\n" | |
58 | " bne 1b" | |
59 | : "=&r" (result), "=&r" (tmp) | |
60 | : "r" (&v->counter), "Ir" (i) | |
61 | : "cc"); | |
62 | ||
63 | return result; | |
64 | } | |
65 | ||
66 | static inline int atomic_sub_return(int i, atomic_t *v) | |
67 | { | |
68 | unsigned long tmp; | |
69 | int result; | |
70 | ||
71 | __asm__ __volatile__("@ atomic_sub_return\n" | |
72 | "1: ldrex %0, [%2]\n" | |
73 | " sub %0, %0, %3\n" | |
74 | " strex %1, %0, [%2]\n" | |
75 | " teq %1, #0\n" | |
76 | " bne 1b" | |
77 | : "=&r" (result), "=&r" (tmp) | |
78 | : "r" (&v->counter), "Ir" (i) | |
79 | : "cc"); | |
80 | ||
81 | return result; | |
82 | } | |
83 | ||
4a6dae6d NP |
84 | static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) |
85 | { | |
49ee57a3 | 86 | unsigned long oldval, res; |
4a6dae6d NP |
87 | |
88 | do { | |
89 | __asm__ __volatile__("@ atomic_cmpxchg\n" | |
90 | "ldrex %1, [%2]\n" | |
a7d06833 | 91 | "mov %0, #0\n" |
4a6dae6d NP |
92 | "teq %1, %3\n" |
93 | "strexeq %0, %4, [%2]\n" | |
94 | : "=&r" (res), "=&r" (oldval) | |
95 | : "r" (&ptr->counter), "Ir" (old), "r" (new) | |
96 | : "cc"); | |
97 | } while (res); | |
98 | ||
99 | return oldval; | |
100 | } | |
101 | ||
1da177e4 LT |
102 | static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) |
103 | { | |
104 | unsigned long tmp, tmp2; | |
105 | ||
106 | __asm__ __volatile__("@ atomic_clear_mask\n" | |
0803c30c | 107 | "1: ldrex %0, [%2]\n" |
1da177e4 | 108 | " bic %0, %0, %3\n" |
0803c30c | 109 | " strex %1, %0, [%2]\n" |
1da177e4 LT |
110 | " teq %1, #0\n" |
111 | " bne 1b" | |
112 | : "=&r" (tmp), "=&r" (tmp2) | |
113 | : "r" (addr), "Ir" (mask) | |
114 | : "cc"); | |
115 | } | |
116 | ||
117 | #else /* ARM_ARCH_6 */ | |
118 | ||
119 | #include <asm/system.h> | |
120 | ||
121 | #ifdef CONFIG_SMP | |
122 | #error SMP not supported on pre-ARMv6 CPUs | |
123 | #endif | |
124 | ||
125 | #define atomic_set(v,i) (((v)->counter) = (i)) | |
126 | ||
127 | static inline int atomic_add_return(int i, atomic_t *v) | |
128 | { | |
129 | unsigned long flags; | |
130 | int val; | |
131 | ||
8dd5c845 | 132 | raw_local_irq_save(flags); |
1da177e4 LT |
133 | val = v->counter; |
134 | v->counter = val += i; | |
8dd5c845 | 135 | raw_local_irq_restore(flags); |
1da177e4 LT |
136 | |
137 | return val; | |
138 | } | |
139 | ||
140 | static inline int atomic_sub_return(int i, atomic_t *v) | |
141 | { | |
142 | unsigned long flags; | |
143 | int val; | |
144 | ||
8dd5c845 | 145 | raw_local_irq_save(flags); |
1da177e4 LT |
146 | val = v->counter; |
147 | v->counter = val -= i; | |
8dd5c845 | 148 | raw_local_irq_restore(flags); |
1da177e4 LT |
149 | |
150 | return val; | |
151 | } | |
152 | ||
4a6dae6d NP |
153 | static inline int atomic_cmpxchg(atomic_t *v, int old, int new) |
154 | { | |
155 | int ret; | |
156 | unsigned long flags; | |
157 | ||
8dd5c845 | 158 | raw_local_irq_save(flags); |
4a6dae6d NP |
159 | ret = v->counter; |
160 | if (likely(ret == old)) | |
161 | v->counter = new; | |
8dd5c845 | 162 | raw_local_irq_restore(flags); |
4a6dae6d NP |
163 | |
164 | return ret; | |
165 | } | |
166 | ||
1da177e4 LT |
167 | static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) |
168 | { | |
169 | unsigned long flags; | |
170 | ||
8dd5c845 | 171 | raw_local_irq_save(flags); |
1da177e4 | 172 | *addr &= ~mask; |
8dd5c845 | 173 | raw_local_irq_restore(flags); |
1da177e4 LT |
174 | } |
175 | ||
176 | #endif /* __LINUX_ARM_ARCH__ */ | |
177 | ||
ffbf670f IM |
178 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) |
179 | ||
8426e1f6 NP |
180 | static inline int atomic_add_unless(atomic_t *v, int a, int u) |
181 | { | |
182 | int c, old; | |
183 | ||
184 | c = atomic_read(v); | |
185 | while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c) | |
186 | c = old; | |
187 | return c != u; | |
188 | } | |
189 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) | |
190 | ||
1da177e4 LT |
191 | #define atomic_add(i, v) (void) atomic_add_return(i, v) |
192 | #define atomic_inc(v) (void) atomic_add_return(1, v) | |
193 | #define atomic_sub(i, v) (void) atomic_sub_return(i, v) | |
194 | #define atomic_dec(v) (void) atomic_sub_return(1, v) | |
195 | ||
196 | #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0) | |
197 | #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0) | |
198 | #define atomic_inc_return(v) (atomic_add_return(1, v)) | |
199 | #define atomic_dec_return(v) (atomic_sub_return(1, v)) | |
200 | #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0) | |
201 | ||
202 | #define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0) | |
203 | ||
204 | /* Atomic operations are already serializing on ARM */ | |
205 | #define smp_mb__before_atomic_dec() barrier() | |
206 | #define smp_mb__after_atomic_dec() barrier() | |
207 | #define smp_mb__before_atomic_inc() barrier() | |
208 | #define smp_mb__after_atomic_inc() barrier() | |
209 | ||
d3cb4871 | 210 | #include <asm-generic/atomic.h> |
1da177e4 LT |
211 | #endif |
212 | #endif |