1 /* $Id: atomic.S,v 1.4 2001/11/18 00:12:56 davem Exp $
2 * atomic.S: These things are too big to do inline.
4 * Copyright (C) 1999 David S. Miller (davem@redhat.com)
11 /* Two versions of the atomic routines, one that
12 * does not return a value and does not perform
13 * memory barriers, and a second which returns
14 * a value and does the barriers.
17 .type atomic_add,#function
18 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
27 .size atomic_add, .-atomic_add
30 .type atomic_sub,#function
31 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
40 .size atomic_sub, .-atomic_sub
42 /* On SMP we need to use memory barriers to ensure
43 * correct memory operation ordering, nop these out
48 #define ATOMIC_PRE_BARRIER membar #StoreLoad | #LoadLoad;
49 #define ATOMIC_POST_BARRIER \
51 membar #StoreLoad | #StoreStore
56 #define ATOMIC_PRE_BARRIER
57 #define ATOMIC_POST_BARRIER
61 .type atomic_add_ret,#function
62 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
74 .size atomic_add_ret, .-atomic_add_ret
77 .type atomic_sub_ret,#function
78 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
90 .size atomic_sub_ret, .-atomic_sub_ret
93 .type atomic64_add,#function
94 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
103 .size atomic64_add, .-atomic64_add
106 .type atomic64_sub,#function
107 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
116 .size atomic64_sub, .-atomic64_sub
118 .globl atomic64_add_ret
119 .type atomic64_add_ret,#function
120 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
132 .size atomic64_add_ret, .-atomic64_add_ret
134 .globl atomic64_sub_ret
135 .type atomic64_sub_ret,#function
136 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
148 .size atomic64_sub_ret, .-atomic64_sub_ret