1 #ifndef __ARCH_M68K_ATOMIC__
2 #define __ARCH_M68K_ATOMIC__
4 #include <linux/config.h>
6 #include <asm/system.h> /* local_irq_XXX() */
9 * Atomic operations that C can't guarantee us. Useful for
10 * resource counting etc..
14 * We do not have SMP m68k systems, so we don't have to deal with that.
17 typedef struct { int counter; } atomic_t;
18 #define ATOMIC_INIT(i) { (i) }
20 #define atomic_read(v) ((v)->counter)
21 #define atomic_set(v, i) (((v)->counter) = i)
23 static inline void atomic_add(int i, atomic_t *v)
25 __asm__ __volatile__("addl %1,%0" : "+m" (*v) : "id" (i));
28 static inline void atomic_sub(int i, atomic_t *v)
30 __asm__ __volatile__("subl %1,%0" : "+m" (*v) : "id" (i));
33 static inline void atomic_inc(atomic_t *v)
35 __asm__ __volatile__("addql #1,%0" : "+m" (*v));
38 static inline void atomic_dec(atomic_t *v)
40 __asm__ __volatile__("subql #1,%0" : "+m" (*v));
43 static inline int atomic_dec_and_test(atomic_t *v)
46 __asm__ __volatile__("subql #1,%1; seq %0" : "=d" (c), "+m" (*v));
50 static inline int atomic_inc_and_test(atomic_t *v)
53 __asm__ __volatile__("addql #1,%1; seq %0" : "=d" (c), "+m" (*v));
57 #ifdef CONFIG_RMW_INSNS
59 static inline int atomic_add_return(int i, atomic_t *v)
68 : "+m" (*v), "=&d" (t), "=&d" (tmp)
69 : "g" (i), "2" (atomic_read(v)));
73 static inline int atomic_sub_return(int i, atomic_t *v)
82 : "+m" (*v), "=&d" (t), "=&d" (tmp)
83 : "g" (i), "2" (atomic_read(v)));
87 #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
88 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
90 #else /* !CONFIG_RMW_INSNS */
92 static inline int atomic_add_return(int i, atomic_t * v)
97 local_irq_save(flags);
101 local_irq_restore(flags);
106 static inline int atomic_sub_return(int i, atomic_t * v)
111 local_irq_save(flags);
115 local_irq_restore(flags);
120 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
125 local_irq_save(flags);
126 prev = atomic_read(v);
129 local_irq_restore(flags);
133 static inline int atomic_xchg(atomic_t *v, int new)
138 local_irq_save(flags);
139 prev = atomic_read(v);
141 local_irq_restore(flags);
145 #endif /* !CONFIG_RMW_INSNS */
147 #define atomic_dec_return(v) atomic_sub_return(1, (v))
148 #define atomic_inc_return(v) atomic_add_return(1, (v))
150 static inline int atomic_sub_and_test(int i, atomic_t *v)
153 __asm__ __volatile__("subl %2,%1; seq %0" : "=d" (c), "+m" (*v): "g" (i));
157 static inline int atomic_add_negative(int i, atomic_t *v)
160 __asm__ __volatile__("addl %2,%1; smi %0" : "=d" (c), "+m" (*v): "g" (i));
164 static inline void atomic_clear_mask(unsigned long mask, unsigned long *v)
166 __asm__ __volatile__("andl %1,%0" : "+m" (*v) : "id" (~(mask)));
169 static inline void atomic_set_mask(unsigned long mask, unsigned long *v)
171 __asm__ __volatile__("orl %1,%0" : "+m" (*v) : "id" (mask));
174 #define atomic_add_unless(v, a, u) \
177 c = atomic_read(v); \
179 if (unlikely(c == (u))) \
181 old = atomic_cmpxchg((v), c, c + (a)); \
182 if (likely(old == c)) \
188 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
190 /* Atomic operations are already serializing */
191 #define smp_mb__before_atomic_dec() barrier()
192 #define smp_mb__after_atomic_dec() barrier()
193 #define smp_mb__before_atomic_inc() barrier()
194 #define smp_mb__after_atomic_inc() barrier()
196 #include <asm-generic/atomic.h>
197 #endif /* __ARCH_M68K_ATOMIC __ */