1 /* atomic.h: These still suck, but the I-cache hit rate is higher.
3 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
4 * Copyright (C) 2000 Anton Blanchard (anton@linuxcare.com.au)
5 * Copyright (C) 2007 Kyle McMartin (kyle@parisc-linux.org)
7 * Additions by Keith M Wesolowski (wesolows@foobazco.org) based
8 * on asm-parisc/atomic.h Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>.
11 #ifndef __ARCH_SPARC_ATOMIC__
12 #define __ARCH_SPARC_ATOMIC__
14 #include <linux/types.h>
16 typedef struct { volatile int counter; } atomic_t;
20 /* Emulate cmpxchg() the same way we emulate atomics,
21 * by hashing the object address and indexing into an array
22 * of spinlocks to get a bit of performance...
24 * See arch/sparc/lib/atomic32.c for implementation.
26 * Cribbed from <asm-parisc/atomic.h>
28 #define __HAVE_ARCH_CMPXCHG 1
30 /* bug catcher for when unsupported size is used - won't link */
31 extern void __cmpxchg_called_with_bad_pointer(void);
32 /* we only need to support cmpxchg of a u32 on sparc */
33 extern unsigned long __cmpxchg_u32(volatile u32 *m, u32 old, u32 new_);
35 /* don't worry...optimizer will get rid of most of this */
36 static __inline__ unsigned long
37 __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
41 return __cmpxchg_u32((u32 *)ptr, (u32)old, (u32)new_);
43 __cmpxchg_called_with_bad_pointer();
49 #define cmpxchg(ptr,o,n) ({ \
50 __typeof__(*(ptr)) _o_ = (o); \
51 __typeof__(*(ptr)) _n_ = (n); \
52 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
53 (unsigned long)_n_, sizeof(*(ptr))); \
56 #define ATOMIC_INIT(i) { (i) }
58 extern int __atomic_add_return(int, atomic_t *);
59 extern int atomic_cmpxchg(atomic_t *, int, int);
60 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
61 extern int atomic_add_unless(atomic_t *, int, int);
62 extern void atomic_set(atomic_t *, int);
64 #define atomic_read(v) ((v)->counter)
66 #define atomic_add(i, v) ((void)__atomic_add_return( (int)(i), (v)))
67 #define atomic_sub(i, v) ((void)__atomic_add_return(-(int)(i), (v)))
68 #define atomic_inc(v) ((void)__atomic_add_return( 1, (v)))
69 #define atomic_dec(v) ((void)__atomic_add_return( -1, (v)))
71 #define atomic_add_return(i, v) (__atomic_add_return( (int)(i), (v)))
72 #define atomic_sub_return(i, v) (__atomic_add_return(-(int)(i), (v)))
73 #define atomic_inc_return(v) (__atomic_add_return( 1, (v)))
74 #define atomic_dec_return(v) (__atomic_add_return( -1, (v)))
76 #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
79 * atomic_inc_and_test - increment and test
80 * @v: pointer of type atomic_t
82 * Atomically increments @v by 1
83 * and returns true if the result is zero, or false for all
86 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
88 #define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
89 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
91 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
93 /* This is the old 24-bit implementation. It's still used internally
94 * by some sparc-specific code, notably the semaphore implementation.
96 typedef struct { volatile int counter; } atomic24_t;
100 #define ATOMIC24_INIT(i) { (i) }
101 #define atomic24_read(v) ((v)->counter)
102 #define atomic24_set(v, i) (((v)->counter) = i)
105 /* We do the bulk of the actual work out of line in two common
106 * routines in assembler, see arch/sparc/lib/atomic.S for the
109 * For SMP the trick is you embed the spin lock byte within
110 * the word, use the low byte so signedness is easily retained
111 * via a quick arithmetic shift. It looks like this:
113 * ----------------------------------------
114 * | signed 24-bit counter value | lock | atomic_t
115 * ----------------------------------------
119 #define ATOMIC24_INIT(i) { ((i) << 8) }
121 static inline int atomic24_read(const atomic24_t *v)
123 int ret = v->counter;
131 #define atomic24_set(v, i) (((v)->counter) = ((i) << 8))
134 static inline int __atomic24_add(int i, atomic24_t *v)
136 register volatile int *ptr asm("g1");
137 register int increment asm("g2");
138 register int tmp1 asm("g3");
139 register int tmp2 asm("g4");
140 register int tmp3 asm("g7");
145 __asm__ __volatile__(
147 "call ___atomic24_add\n\t"
148 " add %%o7, 8, %%o7\n"
149 : "=&r" (increment), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3)
150 : "0" (increment), "r" (ptr)
156 static inline int __atomic24_sub(int i, atomic24_t *v)
158 register volatile int *ptr asm("g1");
159 register int increment asm("g2");
160 register int tmp1 asm("g3");
161 register int tmp2 asm("g4");
162 register int tmp3 asm("g7");
167 __asm__ __volatile__(
169 "call ___atomic24_sub\n\t"
170 " add %%o7, 8, %%o7\n"
171 : "=&r" (increment), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3)
172 : "0" (increment), "r" (ptr)
178 #define atomic24_add(i, v) ((void)__atomic24_add((i), (v)))
179 #define atomic24_sub(i, v) ((void)__atomic24_sub((i), (v)))
181 #define atomic24_dec_return(v) __atomic24_sub(1, (v))
182 #define atomic24_inc_return(v) __atomic24_add(1, (v))
184 #define atomic24_sub_and_test(i, v) (__atomic24_sub((i), (v)) == 0)
185 #define atomic24_dec_and_test(v) (__atomic24_sub(1, (v)) == 0)
187 #define atomic24_inc(v) ((void)__atomic24_add(1, (v)))
188 #define atomic24_dec(v) ((void)__atomic24_sub(1, (v)))
190 #define atomic24_add_negative(i, v) (__atomic24_add((i), (v)) < 0)
192 /* Atomic operations are already serializing */
193 #define smp_mb__before_atomic_dec() barrier()
194 #define smp_mb__after_atomic_dec() barrier()
195 #define smp_mb__before_atomic_inc() barrier()
196 #define smp_mb__after_atomic_inc() barrier()
198 #endif /* !(__KERNEL__) */
200 #include <asm-generic/atomic.h>
201 #endif /* !(__ARCH_SPARC_ATOMIC__) */