1 #ifndef _ASM_PARISC_ATOMIC_H_
2 #define _ASM_PARISC_ATOMIC_H_
4 #include <linux/config.h>
5 #include <asm/system.h>
6 /* Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>. */
9 * Atomic operations that C can't guarantee us. Useful for
10 * resource counting etc..
12 * And probably incredibly slow on parisc. OTOH, we don't
13 * have to write any serious assembly. prumpf
17 #include <asm/spinlock.h>
18 #include <asm/cache.h> /* we use L1_CACHE_BYTES */
20 /* Use an array of spinlocks for our atomic_ts.
21 * Hash function to index into a different SPINLOCK.
22 * Since "a" is usually an address, use one spinlock per cacheline.
24 # define ATOMIC_HASH_SIZE 4
25 # define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
27 extern spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
29 /* Can't use _raw_spin_lock_irq because of #include problems, so
30 * this is the substitute */
31 #define _atomic_spin_lock_irqsave(l,f) do { \
32 spinlock_t *s = ATOMIC_HASH(l); \
37 #define _atomic_spin_unlock_irqrestore(l,f) do { \
38 spinlock_t *s = ATOMIC_HASH(l); \
39 _raw_spin_unlock(s); \
40 local_irq_restore(f); \
45 # define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)
46 # define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
49 /* Note that we need not lock read accesses - aligned word writes/reads
50 * are atomic, so a reader never sees unconsistent values.
52 * Cache-line alignment would conflict with, for example, linux/module.h
55 typedef struct { volatile int counter; } atomic_t;
58 /* This should get optimized out since it's never called.
59 ** Or get a link error if xchg is used "wrong".
61 extern void __xchg_called_with_bad_pointer(void);
64 /* __xchg32/64 defined in arch/parisc/lib/bitops.c */
65 extern unsigned long __xchg8(char, char *);
66 extern unsigned long __xchg32(int, int *);
68 extern unsigned long __xchg64(unsigned long, unsigned long *);
71 /* optimizer better get rid of switch since size is a constant */
72 static __inline__ unsigned long __xchg(unsigned long x, __volatile__ void * ptr,
78 case 8: return __xchg64(x,(unsigned long *) ptr);
80 case 4: return __xchg32((int) x, (int *) ptr);
81 case 1: return __xchg8((char) x, (char *) ptr);
83 __xchg_called_with_bad_pointer();
89 ** REVISIT - Abandoned use of LDCW in xchg() for now:
90 ** o need to test sizeof(*ptr) to avoid clearing adjacent bytes
91 ** o and while we are at it, could __LP64__ code use LDCD too?
93 ** if (__builtin_constant_p(x) && (x == NULL))
94 ** if (((unsigned long)p & 0xf) == 0)
98 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
101 #define __HAVE_ARCH_CMPXCHG 1
103 /* bug catcher for when unsupported size is used - won't link */
104 extern void __cmpxchg_called_with_bad_pointer(void);
106 /* __cmpxchg_u32/u64 defined in arch/parisc/lib/bitops.c */
107 extern unsigned long __cmpxchg_u32(volatile unsigned int *m, unsigned int old, unsigned int new_);
108 extern unsigned long __cmpxchg_u64(volatile unsigned long *ptr, unsigned long old, unsigned long new_);
110 /* don't worry...optimizer will get rid of most of this */
111 static __inline__ unsigned long
112 __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
116 case 8: return __cmpxchg_u64((unsigned long *)ptr, old, new_);
118 case 4: return __cmpxchg_u32((unsigned int *)ptr, (unsigned int) old, (unsigned int) new_);
120 __cmpxchg_called_with_bad_pointer();
124 #define cmpxchg(ptr,o,n) \
126 __typeof__(*(ptr)) _o_ = (o); \
127 __typeof__(*(ptr)) _n_ = (n); \
128 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
129 (unsigned long)_n_, sizeof(*(ptr))); \
134 /* It's possible to reduce all atomic operations to either
135 * __atomic_add_return, atomic_set and atomic_read (the latter
136 * is there only for consistency).
139 static __inline__ int __atomic_add_return(int i, atomic_t *v)
143 _atomic_spin_lock_irqsave(v, flags);
145 ret = (v->counter += i);
147 _atomic_spin_unlock_irqrestore(v, flags);
151 static __inline__ void atomic_set(atomic_t *v, int i)
154 _atomic_spin_lock_irqsave(v, flags);
158 _atomic_spin_unlock_irqrestore(v, flags);
161 static __inline__ int atomic_read(const atomic_t *v)
166 /* exported interface */
168 #define atomic_add(i,v) ((void)(__atomic_add_return( ((int)i),(v))))
169 #define atomic_sub(i,v) ((void)(__atomic_add_return(-((int)i),(v))))
170 #define atomic_inc(v) ((void)(__atomic_add_return( 1,(v))))
171 #define atomic_dec(v) ((void)(__atomic_add_return( -1,(v))))
173 #define atomic_add_return(i,v) (__atomic_add_return( ((int)i),(v)))
174 #define atomic_sub_return(i,v) (__atomic_add_return(-((int)i),(v)))
175 #define atomic_inc_return(v) (__atomic_add_return( 1,(v)))
176 #define atomic_dec_return(v) (__atomic_add_return( -1,(v)))
178 #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
181 * atomic_inc_and_test - increment and test
182 * @v: pointer of type atomic_t
184 * Atomically increments @v by 1
185 * and returns true if the result is zero, or false for all
188 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
190 #define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
192 #define ATOMIC_INIT(i) { (i) }
194 #define smp_mb__before_atomic_dec() smp_mb()
195 #define smp_mb__after_atomic_dec() smp_mb()
196 #define smp_mb__before_atomic_inc() smp_mb()
197 #define smp_mb__after_atomic_inc() smp_mb()