1 /* atomic.S: These things are too big to do inline.
3 * Copyright (C) 1999, 2007 David S. Miller (davem@davemloft.net)
7 #include <asm/backoff.h>
11 /* Two versions of the atomic routines, one that
12 * does not return a value and does not perform
13 * memory barriers, and a second which returns
14 * a value and does the barriers.
17 .type atomic_add,#function
18 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
28 2: BACKOFF_SPIN(%o2, %o3, 1b)
29 .size atomic_add, .-atomic_add
32 .type atomic_sub,#function
33 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
43 2: BACKOFF_SPIN(%o2, %o3, 1b)
44 .size atomic_sub, .-atomic_sub
46 /* On SMP we need to use memory barriers to ensure
47 * correct memory operation ordering, nop these out
52 #define ATOMIC_PRE_BARRIER membar #StoreLoad | #LoadLoad;
53 #define ATOMIC_POST_BARRIER \
55 membar #StoreLoad | #StoreStore
60 #define ATOMIC_PRE_BARRIER
61 #define ATOMIC_POST_BARRIER
65 .type atomic_add_ret,#function
66 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
79 2: BACKOFF_SPIN(%o2, %o3, 1b)
80 .size atomic_add_ret, .-atomic_add_ret
83 .type atomic_sub_ret,#function
84 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
97 2: BACKOFF_SPIN(%o2, %o3, 1b)
98 .size atomic_sub_ret, .-atomic_sub_ret
101 .type atomic64_add,#function
102 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
112 2: BACKOFF_SPIN(%o2, %o3, 1b)
113 .size atomic64_add, .-atomic64_add
116 .type atomic64_sub,#function
117 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
127 2: BACKOFF_SPIN(%o2, %o3, 1b)
128 .size atomic64_sub, .-atomic64_sub
130 .globl atomic64_add_ret
131 .type atomic64_add_ret,#function
132 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
145 2: BACKOFF_SPIN(%o2, %o3, 1b)
146 .size atomic64_add_ret, .-atomic64_add_ret
148 .globl atomic64_sub_ret
149 .type atomic64_sub_ret,#function
150 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
163 2: BACKOFF_SPIN(%o2, %o3, 1b)
164 .size atomic64_sub_ret, .-atomic64_sub_ret