1 #ifndef _ASM_GENERIC_BITOPS_ATOMIC_H_
2 #define _ASM_GENERIC_BITOPS_ATOMIC_H_
7 #include <asm/spinlock.h>
8 #include <asm/cache.h> /* we use L1_CACHE_BYTES */
10 /* Use an array of spinlocks for our atomic_ts.
11 * Hash function to index into a different SPINLOCK.
12 * Since "a" is usually an address, use one spinlock per cacheline.
14 # define ATOMIC_HASH_SIZE 4
15 # define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
17 extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
19 /* Can't use raw_spin_lock_irq because of #include problems, so
20 * this is the substitute */
21 #define _atomic_spin_lock_irqsave(l,f) do { \
22 raw_spinlock_t *s = ATOMIC_HASH(l); \
27 #define _atomic_spin_unlock_irqrestore(l,f) do { \
28 raw_spinlock_t *s = ATOMIC_HASH(l); \
29 __raw_spin_unlock(s); \
30 local_irq_restore(f); \
35 # define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)
36 # define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
40 * NMI events can occur at any time, including when interrupts have been
41 * disabled by *_irqsave(). So you can get NMI events occurring while a
42 * *_bit function is holding a spin lock. If the NMI handler also wants
43 * to do bit manipulation (and they do) then you can get a deadlock
44 * between the original caller of *_bit() and the NMI handler.
50 * set_bit - Atomically set a bit in memory
52 * @addr: the address to start counting from
54 * This function is atomic and may not be reordered. See __set_bit()
55 * if you do not require the atomic guarantees.
57 * Note: there are no guarantees that this function will not be reordered
58 * on non x86 architectures, so if you are writing portable code,
59 * make sure not to rely on its reordering guarantees.
61 * Note that @nr may be almost arbitrarily large; this function is not
62 * restricted to acting on a single-word quantity.
64 static inline void set_bit(int nr, volatile unsigned long *addr)
66 unsigned long mask = BIT_MASK(nr);
67 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
70 _atomic_spin_lock_irqsave(p, flags);
72 _atomic_spin_unlock_irqrestore(p, flags);
76 * clear_bit - Clears a bit in memory
78 * @addr: Address to start counting from
80 * clear_bit() is atomic and may not be reordered. However, it does
81 * not contain a memory barrier, so if it is used for locking purposes,
82 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
83 * in order to ensure changes are visible on other processors.
85 static inline void clear_bit(int nr, volatile unsigned long *addr)
87 unsigned long mask = BIT_MASK(nr);
88 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
91 _atomic_spin_lock_irqsave(p, flags);
93 _atomic_spin_unlock_irqrestore(p, flags);
97 * change_bit - Toggle a bit in memory
99 * @addr: Address to start counting from
101 * change_bit() is atomic and may not be reordered. It may be
102 * reordered on other architectures than x86.
103 * Note that @nr may be almost arbitrarily large; this function is not
104 * restricted to acting on a single-word quantity.
106 static inline void change_bit(int nr, volatile unsigned long *addr)
108 unsigned long mask = BIT_MASK(nr);
109 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
112 _atomic_spin_lock_irqsave(p, flags);
114 _atomic_spin_unlock_irqrestore(p, flags);
118 * test_and_set_bit - Set a bit and return its old value
120 * @addr: Address to count from
122 * This operation is atomic and cannot be reordered.
123 * It may be reordered on other architectures than x86.
124 * It also implies a memory barrier.
126 static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
128 unsigned long mask = BIT_MASK(nr);
129 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
133 _atomic_spin_lock_irqsave(p, flags);
136 _atomic_spin_unlock_irqrestore(p, flags);
138 return (old & mask) != 0;
142 * test_and_clear_bit - Clear a bit and return its old value
144 * @addr: Address to count from
146 * This operation is atomic and cannot be reordered.
147 * It can be reorderdered on other architectures other than x86.
148 * It also implies a memory barrier.
150 static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
152 unsigned long mask = BIT_MASK(nr);
153 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
157 _atomic_spin_lock_irqsave(p, flags);
160 _atomic_spin_unlock_irqrestore(p, flags);
162 return (old & mask) != 0;
166 * test_and_change_bit - Change a bit and return its old value
168 * @addr: Address to count from
170 * This operation is atomic and cannot be reordered.
171 * It also implies a memory barrier.
173 static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
175 unsigned long mask = BIT_MASK(nr);
176 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
180 _atomic_spin_lock_irqsave(p, flags);
183 _atomic_spin_unlock_irqrestore(p, flags);
185 return (old & mask) != 0;
188 #endif /* _ASM_GENERIC_BITOPS_ATOMIC_H */