1 #ifndef _ASM_GENERIC_LOCAL_H
2 #define _ASM_GENERIC_LOCAL_H
4 #include <linux/percpu.h>
5 #include <asm/atomic.h>
9 * A signed long type for operations which are atomic for a single CPU.
10 * Usually used in combination with per-cpu variables.
12 * This is the default implementation, which uses atomic_long_t. Which is
13 * rather pointless. The whole point behind local_t is that some processors
14 * can perform atomic adds and subtracts in a manner which is atomic wrt IRQs
15 * running on this CPU. local_t allows exploitation of such capabilities.
18 /* Implement in terms of atomics. */
20 /* Don't use typedef: don't want them to be mixed with atomic_t's. */
26 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
28 #define local_read(l) atomic_long_read(&(l)->a)
29 #define local_set(l,i) atomic_long_set((&(l)->a),(i))
30 #define local_inc(l) atomic_long_inc(&(l)->a)
31 #define local_dec(l) atomic_long_dec(&(l)->a)
32 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
33 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
35 #define local_sub_and_test(i, l) atomic_long_sub_and_test((i), (&(l)->a))
36 #define local_dec_and_test(l) atomic_long_dec_and_test(&(l)->a)
37 #define local_inc_and_test(l) atomic_long_inc_and_test(&(l)->a)
38 #define local_add_negative(i, l) atomic_long_add_negative((i), (&(l)->a))
39 #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
40 #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
41 #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
43 #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
44 #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
45 #define local_add_unless(l, a, u) atomic_long_add_unless((&(l)->a), (a), (u))
46 #define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a)
48 /* Non-atomic variants, ie. preemption disabled and won't be touched
49 * in interrupt, etc. Some archs can optimize this case well. */
50 #define __local_inc(l) local_set((l), local_read(l) + 1)
51 #define __local_dec(l) local_set((l), local_read(l) - 1)
52 #define __local_add(i,l) local_set((l), local_read(l) + (i))
53 #define __local_sub(i,l) local_set((l), local_read(l) - (i))
55 /* Use these for per-cpu local_t variables: on some archs they are
56 * much more efficient than these naive implementations. Note they take
57 * a variable (eg. mystruct.foo), not an address.
59 #define cpu_local_read(l) local_read(&__get_cpu_var(l))
60 #define cpu_local_set(l, i) local_set(&__get_cpu_var(l), (i))
61 #define cpu_local_inc(l) local_inc(&__get_cpu_var(l))
62 #define cpu_local_dec(l) local_dec(&__get_cpu_var(l))
63 #define cpu_local_add(i, l) local_add((i), &__get_cpu_var(l))
64 #define cpu_local_sub(i, l) local_sub((i), &__get_cpu_var(l))
66 /* Non-atomic increments, ie. preemption disabled and won't be touched
67 * in interrupt, etc. Some archs can optimize this case well.
69 #define __cpu_local_inc(l) __local_inc(&__get_cpu_var(l))
70 #define __cpu_local_dec(l) __local_dec(&__get_cpu_var(l))
71 #define __cpu_local_add(i, l) __local_add((i), &__get_cpu_var(l))
72 #define __cpu_local_sub(i, l) __local_sub((i), &__get_cpu_var(l))
74 #endif /* _ASM_GENERIC_LOCAL_H */