1 #ifndef __ASM_SH_ATOMIC_LLSC_H
2 #define __ASM_SH_ATOMIC_LLSC_H
5 * To get proper branch prediction for the main line, we must branch
6 * forward to code at the end of this object's .text section, then
7 * branch back to restart the operation.
9 static inline void atomic_add(int i, atomic_t *v)
13 __asm__ __volatile__ (
14 "1: movli.l @%2, %0 ! atomic_add \n"
19 : "r" (i), "r" (&v->counter)
23 static inline void atomic_sub(int i, atomic_t *v)
27 __asm__ __volatile__ (
28 "1: movli.l @%2, %0 ! atomic_sub \n"
33 : "r" (i), "r" (&v->counter)
40 * We basically get atomic_xxx_return() for free compared with
41 * atomic_xxx(). movli.l/movco.l require r0 due to the instruction
42 * encoding, so the retval is automatically set without having to
43 * do any special work.
45 static inline int atomic_add_return(int i, atomic_t *v)
49 __asm__ __volatile__ (
50 "1: movli.l @%2, %0 ! atomic_add_return \n"
56 : "r" (i), "r" (&v->counter)
62 static inline int atomic_sub_return(int i, atomic_t *v)
66 __asm__ __volatile__ (
67 "1: movli.l @%2, %0 ! atomic_sub_return \n"
73 : "r" (i), "r" (&v->counter)
79 static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
83 __asm__ __volatile__ (
84 "1: movli.l @%2, %0 ! atomic_clear_mask \n"
89 : "r" (~mask), "r" (&v->counter)
93 static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
97 __asm__ __volatile__ (
98 "1: movli.l @%2, %0 ! atomic_set_mask \n"
100 " movco.l %0, @%2 \n"
103 : "r" (mask), "r" (&v->counter)
107 #endif /* __ASM_SH_ATOMIC_LLSC_H */