1 #ifndef _ASM_X86_LOCAL_H
2 #define _ASM_X86_LOCAL_H
4 #include <linux/percpu.h>
6 #include <asm/system.h>
7 #include <asm/atomic.h>
14 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
16 #define local_read(l) atomic_long_read(&(l)->a)
17 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
19 static inline void local_inc(local_t *l)
21 asm volatile(_ASM_INC "%0"
22 : "+m" (l->a.counter));
25 static inline void local_dec(local_t *l)
27 asm volatile(_ASM_DEC "%0"
28 : "+m" (l->a.counter));
31 static inline void local_add(long i, local_t *l)
33 asm volatile(_ASM_ADD "%1,%0"
38 static inline void local_sub(long i, local_t *l)
40 asm volatile(_ASM_SUB "%1,%0"
46 * local_sub_and_test - subtract value from variable and test result
47 * @i: integer value to subtract
48 * @l: pointer to type local_t
50 * Atomically subtracts @i from @l and returns
51 * true if the result is zero, or false for all
54 static inline int local_sub_and_test(long i, local_t *l)
58 asm volatile(_ASM_SUB "%2,%0; sete %1"
59 : "+m" (l->a.counter), "=qm" (c)
60 : "ir" (i) : "memory");
65 * local_dec_and_test - decrement and test
66 * @l: pointer to type local_t
68 * Atomically decrements @l by 1 and
69 * returns true if the result is 0, or false for all other
72 static inline int local_dec_and_test(local_t *l)
76 asm volatile(_ASM_DEC "%0; sete %1"
77 : "+m" (l->a.counter), "=qm" (c)
83 * local_inc_and_test - increment and test
84 * @l: pointer to type local_t
86 * Atomically increments @l by 1
87 * and returns true if the result is zero, or false for all
90 static inline int local_inc_and_test(local_t *l)
94 asm volatile(_ASM_INC "%0; sete %1"
95 : "+m" (l->a.counter), "=qm" (c)
101 * local_add_negative - add and test if negative
102 * @i: integer value to add
103 * @l: pointer to type local_t
105 * Atomically adds @i to @l and returns true
106 * if the result is negative, or false when
107 * result is greater than or equal to zero.
109 static inline int local_add_negative(long i, local_t *l)
113 asm volatile(_ASM_ADD "%2,%0; sets %1"
114 : "+m" (l->a.counter), "=qm" (c)
115 : "ir" (i) : "memory");
120 * local_add_return - add and return
121 * @i: integer value to add
122 * @l: pointer to type local_t
124 * Atomically adds @i to @l and returns @i + @l
126 static inline long local_add_return(long i, local_t *l)
131 if (unlikely(boot_cpu_data.x86 <= 3))
134 /* Modern 486+ processor */
136 asm volatile(_ASM_XADD "%0, %1;"
137 : "+r" (i), "+m" (l->a.counter)
142 no_xadd: /* Legacy 386 processor */
143 local_irq_save(flags);
145 local_set(l, i + __i);
146 local_irq_restore(flags);
151 static inline long local_sub_return(long i, local_t *l)
153 return local_add_return(-i, l);
156 #define local_inc_return(l) (local_add_return(1, l))
157 #define local_dec_return(l) (local_sub_return(1, l))
159 #define local_cmpxchg(l, o, n) \
160 (cmpxchg_local(&((l)->a.counter), (o), (n)))
161 /* Always has a lock prefix */
162 #define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
165 * local_add_unless - add unless the number is a given value
166 * @l: pointer of type local_t
167 * @a: the amount to add to l...
168 * @u: ...unless l is equal to u.
170 * Atomically adds @a to @l, so long as it was not @u.
171 * Returns non-zero if @l was not @u, and zero otherwise.
173 #define local_add_unless(l, a, u) \
176 c = local_read((l)); \
178 if (unlikely(c == (u))) \
180 old = local_cmpxchg((l), c, c + (a)); \
181 if (likely(old == c)) \
187 #define local_inc_not_zero(l) local_add_unless((l), 1, 0)
189 /* On x86_32, these are no better than the atomic variants.
190 * On x86-64 these are better than the atomic variants on SMP kernels
191 * because they dont use a lock prefix.
193 #define __local_inc(l) local_inc(l)
194 #define __local_dec(l) local_dec(l)
195 #define __local_add(i, l) local_add((i), (l))
196 #define __local_sub(i, l) local_sub((i), (l))
198 /* Use these for per-cpu local_t variables: on some archs they are
199 * much more efficient than these naive implementations. Note they take
200 * a variable, not an address.
202 * X86_64: This could be done better if we moved the per cpu data directly
206 /* Need to disable preemption for the cpu local counters otherwise we could
207 still access a variable of a previous CPU in a non atomic way. */
208 #define cpu_local_wrap_v(l) \
216 #define cpu_local_wrap(l) \
223 #define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var((l))))
224 #define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var((l)), (i)))
225 #define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var((l))))
226 #define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var((l))))
227 #define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var((l))))
228 #define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var((l))))
230 #define __cpu_local_inc(l) cpu_local_inc((l))
231 #define __cpu_local_dec(l) cpu_local_dec((l))
232 #define __cpu_local_add(i, l) cpu_local_add((i), (l))
233 #define __cpu_local_sub(i, l) cpu_local_sub((i), (l))
235 #endif /* _ASM_X86_LOCAL_H */