1 #ifndef __ARCH_M68KNOMMU_ATOMIC__
2 #define __ARCH_M68KNOMMU_ATOMIC__
4 #include <asm/system.h>
7 * Atomic operations that C can't guarantee us. Useful for
8 * resource counting etc..
12 * We do not have SMP m68k systems, so we don't have to deal with that.
15 typedef struct { int counter; } atomic_t;
16 #define ATOMIC_INIT(i) { (i) }
18 #define atomic_read(v) ((v)->counter)
19 #define atomic_set(v, i) (((v)->counter) = i)
21 static __inline__ void atomic_add(int i, atomic_t *v)
23 #ifdef CONFIG_COLDFIRE
24 __asm__ __volatile__("addl %1,%0" : "+m" (*v) : "d" (i));
26 __asm__ __volatile__("addl %1,%0" : "+m" (*v) : "di" (i));
30 static __inline__ void atomic_sub(int i, atomic_t *v)
32 #ifdef CONFIG_COLDFIRE
33 __asm__ __volatile__("subl %1,%0" : "+m" (*v) : "d" (i));
35 __asm__ __volatile__("subl %1,%0" : "+m" (*v) : "di" (i));
39 static __inline__ int atomic_sub_and_test(int i, atomic_t * v)
42 #ifdef CONFIG_COLDFIRE
43 __asm__ __volatile__("subl %2,%1; seq %0"
47 __asm__ __volatile__("subl %2,%1; seq %0"
54 static __inline__ void atomic_inc(volatile atomic_t *v)
56 __asm__ __volatile__("addql #1,%0" : "+m" (*v));
60 * atomic_inc_and_test - increment and test
61 * @v: pointer of type atomic_t
63 * Atomically increments @v by 1
64 * and returns true if the result is zero, or false for all
68 static __inline__ int atomic_inc_and_test(volatile atomic_t *v)
71 __asm__ __volatile__("addql #1,%1; seq %0" : "=d" (c), "+m" (*v));
75 static __inline__ void atomic_dec(volatile atomic_t *v)
77 __asm__ __volatile__("subql #1,%0" : "+m" (*v));
80 static __inline__ int atomic_dec_and_test(volatile atomic_t *v)
83 __asm__ __volatile__("subql #1,%1; seq %0" : "=d" (c), "+m" (*v));
87 static __inline__ void atomic_clear_mask(unsigned long mask, unsigned long *v)
89 __asm__ __volatile__("andl %1,%0" : "+m" (*v) : "id" (~(mask)));
92 static __inline__ void atomic_set_mask(unsigned long mask, unsigned long *v)
94 __asm__ __volatile__("orl %1,%0" : "+m" (*v) : "id" (mask));
97 /* Atomic operations are already serializing */
98 #define smp_mb__before_atomic_dec() barrier()
99 #define smp_mb__after_atomic_dec() barrier()
100 #define smp_mb__before_atomic_inc() barrier()
101 #define smp_mb__after_atomic_inc() barrier()
103 static inline int atomic_add_return(int i, atomic_t * v)
105 unsigned long temp, flags;
107 local_irq_save(flags);
111 local_irq_restore(flags);
116 #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
118 static inline int atomic_sub_return(int i, atomic_t * v)
120 unsigned long temp, flags;
122 local_irq_save(flags);
126 local_irq_restore(flags);
131 #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
132 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
134 static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
139 if (unlikely(c == (u)))
141 old = atomic_cmpxchg((v), c, c + (a));
142 if (likely(old == c))
149 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
151 #define atomic_dec_return(v) atomic_sub_return(1,(v))
152 #define atomic_inc_return(v) atomic_add_return(1,(v))
154 #include <asm-generic/atomic.h>
155 #endif /* __ARCH_M68KNOMMU_ATOMIC __ */