1 #ifndef __ASM_SH_ATOMIC_H
2 #define __ASM_SH_ATOMIC_H
5 * Atomic operations that C can't guarantee us. Useful for
6 * resource counting etc..
10 typedef struct { volatile int counter; } atomic_t;
12 #define ATOMIC_INIT(i) ( (atomic_t) { (i) } )
14 #define atomic_read(v) ((v)->counter)
15 #define atomic_set(v,i) ((v)->counter = (i))
17 #include <linux/compiler.h>
18 #include <asm/system.h>
21 * To get proper branch prediction for the main line, we must branch
22 * forward to code at the end of this object's .text section, then
23 * branch back to restart the operation.
25 static inline void atomic_add(int i, atomic_t *v)
27 #ifdef CONFIG_CPU_SH4A
30 __asm__ __volatile__ (
31 "1: movli.l @%2, %0 ! atomic_add \n"
36 : "r" (i), "r" (&v->counter)
41 local_irq_save(flags);
43 local_irq_restore(flags);
47 static inline void atomic_sub(int i, atomic_t *v)
49 #ifdef CONFIG_CPU_SH4A
52 __asm__ __volatile__ (
53 "1: movli.l @%2, %0 ! atomic_sub \n"
58 : "r" (i), "r" (&v->counter)
63 local_irq_save(flags);
65 local_irq_restore(flags);
72 * We basically get atomic_xxx_return() for free compared with
73 * atomic_xxx(). movli.l/movco.l require r0 due to the instruction
74 * encoding, so the retval is automatically set without having to
75 * do any special work.
77 static inline int atomic_add_return(int i, atomic_t *v)
81 #ifdef CONFIG_CPU_SH4A
82 __asm__ __volatile__ (
83 "1: movli.l @%2, %0 ! atomic_add_return \n"
89 : "r" (i), "r" (&v->counter)
94 local_irq_save(flags);
98 local_irq_restore(flags);
104 #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
106 static inline int atomic_sub_return(int i, atomic_t *v)
110 #ifdef CONFIG_CPU_SH4A
111 __asm__ __volatile__ (
112 "1: movli.l @%2, %0 ! atomic_sub_return \n"
114 " movco.l %0, @%2 \n"
118 : "r" (i), "r" (&v->counter)
123 local_irq_save(flags);
127 local_irq_restore(flags);
133 #define atomic_dec_return(v) atomic_sub_return(1,(v))
134 #define atomic_inc_return(v) atomic_add_return(1,(v))
137 * atomic_inc_and_test - increment and test
138 * @v: pointer of type atomic_t
140 * Atomically increments @v by 1
141 * and returns true if the result is zero, or false for all
144 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
146 #define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
147 #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
149 #define atomic_inc(v) atomic_add(1,(v))
150 #define atomic_dec(v) atomic_sub(1,(v))
152 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
157 local_irq_save(flags);
159 if (likely(ret == old))
161 local_irq_restore(flags);
166 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
168 static inline int atomic_add_unless(atomic_t *v, int a, int u)
173 local_irq_save(flags);
177 local_irq_restore(flags);
181 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
183 static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
185 #ifdef CONFIG_CPU_SH4A
188 __asm__ __volatile__ (
189 "1: movli.l @%2, %0 ! atomic_clear_mask \n"
191 " movco.l %0, @%2 \n"
194 : "r" (~mask), "r" (&v->counter)
199 local_irq_save(flags);
201 local_irq_restore(flags);
205 static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
207 #ifdef CONFIG_CPU_SH4A
210 __asm__ __volatile__ (
211 "1: movli.l @%2, %0 ! atomic_set_mask \n"
213 " movco.l %0, @%2 \n"
216 : "r" (mask), "r" (&v->counter)
221 local_irq_save(flags);
223 local_irq_restore(flags);
227 /* Atomic operations are already serializing on SH */
228 #define smp_mb__before_atomic_dec() barrier()
229 #define smp_mb__after_atomic_dec() barrier()
230 #define smp_mb__before_atomic_inc() barrier()
231 #define smp_mb__after_atomic_inc() barrier()
233 #include <asm-generic/atomic.h>
234 #endif /* __ASM_SH_ATOMIC_H */