2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (c) 1994 - 1997, 99, 2000, 06, 07 Ralf Baechle (ralf@linux-mips.org)
7 * Copyright (c) 1999, 2000 Silicon Graphics, Inc.
12 #include <linux/compiler.h>
13 #include <linux/irqflags.h>
14 #include <linux/types.h>
15 #include <asm/barrier.h>
17 #include <asm/byteorder.h> /* sigh ... */
18 #include <asm/cpu-features.h>
19 #include <asm/sgidefs.h>
22 #if (_MIPS_SZLONG == 32)
24 #define SZLONG_MASK 31UL
29 #elif (_MIPS_SZLONG == 64)
31 #define SZLONG_MASK 63UL
39 * clear_bit() doesn't provide any barrier for the compiler.
41 #define smp_mb__before_clear_bit() smp_mb()
42 #define smp_mb__after_clear_bit() smp_mb()
45 * set_bit - Atomically set a bit in memory
47 * @addr: the address to start counting from
49 * This function is atomic and may not be reordered. See __set_bit()
50 * if you do not require the atomic guarantees.
51 * Note that @nr may be almost arbitrarily large; this function is not
52 * restricted to acting on a single-word quantity.
54 static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
56 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
59 if (cpu_has_llsc && R10000_LLSC_WAR) {
62 "1: " __LL "%0, %1 # set_bit \n"
67 : "=&r" (temp), "=m" (*m)
68 : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
69 #ifdef CONFIG_CPU_MIPSR2
70 } else if (__builtin_constant_p(nr)) {
72 "1: " __LL "%0, %1 # set_bit \n"
73 " " __INS "%0, %4, %2, 1 \n"
79 : "=&r" (temp), "=m" (*m)
80 : "ir" (nr & SZLONG_MASK), "m" (*m), "r" (~0));
81 #endif /* CONFIG_CPU_MIPSR2 */
82 } else if (cpu_has_llsc) {
85 "1: " __LL "%0, %1 # set_bit \n"
93 : "=&r" (temp), "=m" (*m)
94 : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
96 volatile unsigned long *a = addr;
100 a += nr >> SZLONG_LOG;
101 mask = 1UL << (nr & SZLONG_MASK);
102 local_irq_save(flags);
104 local_irq_restore(flags);
109 * clear_bit - Clears a bit in memory
111 * @addr: Address to start counting from
113 * clear_bit() is atomic and may not be reordered. However, it does
114 * not contain a memory barrier, so if it is used for locking purposes,
115 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
116 * in order to ensure changes are visible on other processors.
118 static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
120 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
123 if (cpu_has_llsc && R10000_LLSC_WAR) {
124 __asm__ __volatile__(
126 "1: " __LL "%0, %1 # clear_bit \n"
131 : "=&r" (temp), "=m" (*m)
132 : "ir" (~(1UL << (nr & SZLONG_MASK))), "m" (*m));
133 #ifdef CONFIG_CPU_MIPSR2
134 } else if (__builtin_constant_p(nr)) {
135 __asm__ __volatile__(
136 "1: " __LL "%0, %1 # clear_bit \n"
137 " " __INS "%0, $0, %2, 1 \n"
143 : "=&r" (temp), "=m" (*m)
144 : "ir" (nr & SZLONG_MASK), "m" (*m));
145 #endif /* CONFIG_CPU_MIPSR2 */
146 } else if (cpu_has_llsc) {
147 __asm__ __volatile__(
149 "1: " __LL "%0, %1 # clear_bit \n"
157 : "=&r" (temp), "=m" (*m)
158 : "ir" (~(1UL << (nr & SZLONG_MASK))), "m" (*m));
160 volatile unsigned long *a = addr;
164 a += nr >> SZLONG_LOG;
165 mask = 1UL << (nr & SZLONG_MASK);
166 local_irq_save(flags);
168 local_irq_restore(flags);
173 * change_bit - Toggle a bit in memory
175 * @addr: Address to start counting from
177 * change_bit() is atomic and may not be reordered.
178 * Note that @nr may be almost arbitrarily large; this function is not
179 * restricted to acting on a single-word quantity.
181 static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
183 if (cpu_has_llsc && R10000_LLSC_WAR) {
184 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
187 __asm__ __volatile__(
189 "1: " __LL "%0, %1 # change_bit \n"
194 : "=&r" (temp), "=m" (*m)
195 : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
196 } else if (cpu_has_llsc) {
197 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
200 __asm__ __volatile__(
202 "1: " __LL "%0, %1 # change_bit \n"
210 : "=&r" (temp), "=m" (*m)
211 : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
213 volatile unsigned long *a = addr;
217 a += nr >> SZLONG_LOG;
218 mask = 1UL << (nr & SZLONG_MASK);
219 local_irq_save(flags);
221 local_irq_restore(flags);
226 * test_and_set_bit - Set a bit and return its old value
228 * @addr: Address to count from
230 * This operation is atomic and cannot be reordered.
231 * It also implies a memory barrier.
233 static inline int test_and_set_bit(unsigned long nr,
234 volatile unsigned long *addr)
236 if (cpu_has_llsc && R10000_LLSC_WAR) {
237 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
238 unsigned long temp, res;
240 __asm__ __volatile__(
242 "1: " __LL "%0, %1 # test_and_set_bit \n"
248 : "=&r" (temp), "=m" (*m), "=&r" (res)
249 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
253 } else if (cpu_has_llsc) {
254 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
255 unsigned long temp, res;
257 __asm__ __volatile__(
261 "1: " __LL "%0, %1 # test_and_set_bit \n"
271 : "=&r" (temp), "=m" (*m), "=&r" (res)
272 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
277 volatile unsigned long *a = addr;
282 a += nr >> SZLONG_LOG;
283 mask = 1UL << (nr & SZLONG_MASK);
284 local_irq_save(flags);
285 retval = (mask & *a) != 0;
287 local_irq_restore(flags);
296 * test_and_clear_bit - Clear a bit and return its old value
298 * @addr: Address to count from
300 * This operation is atomic and cannot be reordered.
301 * It also implies a memory barrier.
303 static inline int test_and_clear_bit(unsigned long nr,
304 volatile unsigned long *addr)
306 if (cpu_has_llsc && R10000_LLSC_WAR) {
307 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
308 unsigned long temp, res;
310 __asm__ __volatile__(
312 "1: " __LL "%0, %1 # test_and_clear_bit \n"
319 : "=&r" (temp), "=m" (*m), "=&r" (res)
320 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
324 #ifdef CONFIG_CPU_MIPSR2
325 } else if (__builtin_constant_p(nr)) {
326 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
327 unsigned long temp, res;
329 __asm__ __volatile__(
330 "1: " __LL "%0, %1 # test_and_clear_bit \n"
331 " " __EXT "%2, %0, %3, 1 \n"
332 " " __INS "%0, $0, %3, 1 \n"
338 : "=&r" (temp), "=m" (*m), "=&r" (res)
339 : "ri" (nr & SZLONG_MASK), "m" (*m)
344 } else if (cpu_has_llsc) {
345 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
346 unsigned long temp, res;
348 __asm__ __volatile__(
352 "1: " __LL "%0, %1 # test_and_clear_bit \n"
363 : "=&r" (temp), "=m" (*m), "=&r" (res)
364 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
369 volatile unsigned long *a = addr;
374 a += nr >> SZLONG_LOG;
375 mask = 1UL << (nr & SZLONG_MASK);
376 local_irq_save(flags);
377 retval = (mask & *a) != 0;
379 local_irq_restore(flags);
388 * test_and_change_bit - Change a bit and return its old value
390 * @addr: Address to count from
392 * This operation is atomic and cannot be reordered.
393 * It also implies a memory barrier.
395 static inline int test_and_change_bit(unsigned long nr,
396 volatile unsigned long *addr)
398 if (cpu_has_llsc && R10000_LLSC_WAR) {
399 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
400 unsigned long temp, res;
402 __asm__ __volatile__(
404 "1: " __LL "%0, %1 # test_and_change_bit \n"
410 : "=&r" (temp), "=m" (*m), "=&r" (res)
411 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
415 } else if (cpu_has_llsc) {
416 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
417 unsigned long temp, res;
419 __asm__ __volatile__(
423 "1: " __LL "%0, %1 # test_and_change_bit \n"
425 " " __SC "\t%2, %1 \n"
433 : "=&r" (temp), "=m" (*m), "=&r" (res)
434 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
439 volatile unsigned long *a = addr;
440 unsigned long mask, retval;
443 a += nr >> SZLONG_LOG;
444 mask = 1UL << (nr & SZLONG_MASK);
445 local_irq_save(flags);
446 retval = (mask & *a) != 0;
448 local_irq_restore(flags);
456 #include <asm-generic/bitops/non-atomic.h>
459 * Return the bit position (0..63) of the most significant 1 bit in a word
460 * Returns -1 if no 1 bit exists
462 static inline int __ilog2(unsigned long x)
466 if (sizeof(x) == 4) {
478 BUG_ON(sizeof(x) != 8);
491 #if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
494 * __ffs - find first bit in word.
495 * @word: The word to search
497 * Returns 0..SZLONG-1
498 * Undefined if no bit exists, so code should check against 0 first.
500 static inline unsigned long __ffs(unsigned long word)
502 return __ilog2(word & -word);
506 * fls - find last bit set.
507 * @word: The word to search
509 * This is defined the same way as ffs.
510 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
512 static inline int fls(int word)
514 __asm__ ("clz %0, %1" : "=r" (word) : "r" (word));
519 #if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPS64)
520 static inline int fls64(__u64 word)
522 __asm__ ("dclz %0, %1" : "=r" (word) : "r" (word));
527 #include <asm-generic/bitops/fls64.h>
531 * ffs - find first bit set.
532 * @word: The word to search
534 * This is defined the same way as
535 * the libc and compiler builtin ffs routines, therefore
536 * differs in spirit from the above ffz (man ffs).
538 static inline int ffs(int word)
543 return fls(word & -word);
548 #include <asm-generic/bitops/__ffs.h>
549 #include <asm-generic/bitops/ffs.h>
550 #include <asm-generic/bitops/fls.h>
551 #include <asm-generic/bitops/fls64.h>
553 #endif /*defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64) */
555 #include <asm-generic/bitops/ffz.h>
556 #include <asm-generic/bitops/find.h>
560 #include <asm-generic/bitops/sched.h>
561 #include <asm-generic/bitops/hweight.h>
562 #include <asm-generic/bitops/ext2-non-atomic.h>
563 #include <asm-generic/bitops/ext2-atomic.h>
564 #include <asm-generic/bitops/minix.h>
566 #endif /* __KERNEL__ */
568 #endif /* _ASM_BITOPS_H */