1 /* $Id: atomic.S,v 1.4 2001/11/18 00:12:56 davem Exp $
2 * atomic.S: These things are too big to do inline.
4 * Copyright (C) 1999 David S. Miller (davem@redhat.com)
7 #include <linux/config.h>
12 /* Two versions of the atomic routines, one that
13 * does not return a value and does not perform
14 * memory barriers, and a second which returns
15 * a value and does the barriers.
18 .type atomic_add,#function
19 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
28 .size atomic_add, .-atomic_add
31 .type atomic_sub,#function
32 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
41 .size atomic_sub, .-atomic_sub
43 /* On SMP we need to use memory barriers to ensure
44 * correct memory operation ordering, nop these out
49 #define ATOMIC_PRE_BARRIER membar #StoreLoad | #LoadLoad;
50 #define ATOMIC_POST_BARRIER \
52 membar #StoreLoad | #StoreStore
57 #define ATOMIC_PRE_BARRIER
58 #define ATOMIC_POST_BARRIER
62 .type atomic_add_ret,#function
63 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
75 .size atomic_add_ret, .-atomic_add_ret
78 .type atomic_sub_ret,#function
79 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
91 .size atomic_sub_ret, .-atomic_sub_ret
94 .type atomic64_add,#function
95 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
104 .size atomic64_add, .-atomic64_add
107 .type atomic64_sub,#function
108 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
117 .size atomic64_sub, .-atomic64_sub
119 .globl atomic64_add_ret
120 .type atomic64_add_ret,#function
121 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
133 .size atomic64_add_ret, .-atomic64_add_ret
135 .globl atomic64_sub_ret
136 .type atomic64_sub_ret,#function
137 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
149 .size atomic64_sub_ret, .-atomic64_sub_ret