S2IO: statistics for memory allocation failuers
[linux-2.6] / include / asm-frv / atomic.h
1 /* atomic.h: atomic operation emulation for FR-V
2  *
3  * For an explanation of how atomic ops work in this arch, see:
4  *   Documentation/fujitsu/frv/atomic-ops.txt
5  *
6  * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
7  * Written by David Howells (dhowells@redhat.com)
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License
11  * as published by the Free Software Foundation; either version
12  * 2 of the License, or (at your option) any later version.
13  */
14 #ifndef _ASM_ATOMIC_H
15 #define _ASM_ATOMIC_H
16
17 #include <linux/types.h>
18 #include <asm/spr-regs.h>
19 #include <asm/system.h>
20
21 #ifdef CONFIG_SMP
22 #error not SMP safe
23 #endif
24
25 /*
26  * Atomic operations that C can't guarantee us.  Useful for
27  * resource counting etc..
28  *
29  * We do not have SMP systems, so we don't have to deal with that.
30  */
31
32 /* Atomic operations are already serializing */
33 #define smp_mb__before_atomic_dec()     barrier()
34 #define smp_mb__after_atomic_dec()      barrier()
35 #define smp_mb__before_atomic_inc()     barrier()
36 #define smp_mb__after_atomic_inc()      barrier()
37
38 typedef struct {
39         int counter;
40 } atomic_t;
41
42 #define ATOMIC_INIT(i)          { (i) }
43 #define atomic_read(v)          ((v)->counter)
44 #define atomic_set(v, i)        (((v)->counter) = (i))
45
46 #ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
47 static inline int atomic_add_return(int i, atomic_t *v)
48 {
49         unsigned long val;
50
51         asm("0:                                         \n"
52             "   orcc            gr0,gr0,gr0,icc3        \n"     /* set ICC3.Z */
53             "   ckeq            icc3,cc7                \n"
54             "   ld.p            %M0,%1                  \n"     /* LD.P/ORCR must be atomic */
55             "   orcr            cc7,cc7,cc3             \n"     /* set CC3 to true */
56             "   add%I2          %1,%2,%1                \n"
57             "   cst.p           %1,%M0          ,cc3,#1 \n"
58             "   corcc           gr29,gr29,gr0   ,cc3,#1 \n"     /* clear ICC3.Z if store happens */
59             "   beq             icc3,#0,0b              \n"
60             : "+U"(v->counter), "=&r"(val)
61             : "NPr"(i)
62             : "memory", "cc7", "cc3", "icc3"
63             );
64
65         return val;
66 }
67
68 static inline int atomic_sub_return(int i, atomic_t *v)
69 {
70         unsigned long val;
71
72         asm("0:                                         \n"
73             "   orcc            gr0,gr0,gr0,icc3        \n"     /* set ICC3.Z */
74             "   ckeq            icc3,cc7                \n"
75             "   ld.p            %M0,%1                  \n"     /* LD.P/ORCR must be atomic */
76             "   orcr            cc7,cc7,cc3             \n"     /* set CC3 to true */
77             "   sub%I2          %1,%2,%1                \n"
78             "   cst.p           %1,%M0          ,cc3,#1 \n"
79             "   corcc           gr29,gr29,gr0   ,cc3,#1 \n"     /* clear ICC3.Z if store happens */
80             "   beq             icc3,#0,0b              \n"
81             : "+U"(v->counter), "=&r"(val)
82             : "NPr"(i)
83             : "memory", "cc7", "cc3", "icc3"
84             );
85
86         return val;
87 }
88
89 #else
90
91 extern int atomic_add_return(int i, atomic_t *v);
92 extern int atomic_sub_return(int i, atomic_t *v);
93
94 #endif
95
96 static inline int atomic_add_negative(int i, atomic_t *v)
97 {
98         return atomic_add_return(i, v) < 0;
99 }
100
101 static inline void atomic_add(int i, atomic_t *v)
102 {
103         atomic_add_return(i, v);
104 }
105
106 static inline void atomic_sub(int i, atomic_t *v)
107 {
108         atomic_sub_return(i, v);
109 }
110
111 static inline void atomic_inc(atomic_t *v)
112 {
113         atomic_add_return(1, v);
114 }
115
116 static inline void atomic_dec(atomic_t *v)
117 {
118         atomic_sub_return(1, v);
119 }
120
121 #define atomic_dec_return(v)            atomic_sub_return(1, (v))
122 #define atomic_inc_return(v)            atomic_add_return(1, (v))
123
124 #define atomic_sub_and_test(i,v)        (atomic_sub_return((i), (v)) == 0)
125 #define atomic_dec_and_test(v)          (atomic_sub_return(1, (v)) == 0)
126 #define atomic_inc_and_test(v)          (atomic_add_return(1, (v)) == 0)
127
128 #ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
129 static inline
130 unsigned long atomic_test_and_ANDNOT_mask(unsigned long mask, volatile unsigned long *v)
131 {
132         unsigned long old, tmp;
133
134         asm volatile(
135                 "0:                                             \n"
136                 "       orcc            gr0,gr0,gr0,icc3        \n"     /* set ICC3.Z */
137                 "       ckeq            icc3,cc7                \n"
138                 "       ld.p            %M0,%1                  \n"     /* LD.P/ORCR are atomic */
139                 "       orcr            cc7,cc7,cc3             \n"     /* set CC3 to true */
140                 "       and%I3          %1,%3,%2                \n"
141                 "       cst.p           %2,%M0          ,cc3,#1 \n"     /* if store happens... */
142                 "       corcc           gr29,gr29,gr0   ,cc3,#1 \n"     /* ... clear ICC3.Z */
143                 "       beq             icc3,#0,0b              \n"
144                 : "+U"(*v), "=&r"(old), "=r"(tmp)
145                 : "NPr"(~mask)
146                 : "memory", "cc7", "cc3", "icc3"
147                 );
148
149         return old;
150 }
151
152 static inline
153 unsigned long atomic_test_and_OR_mask(unsigned long mask, volatile unsigned long *v)
154 {
155         unsigned long old, tmp;
156
157         asm volatile(
158                 "0:                                             \n"
159                 "       orcc            gr0,gr0,gr0,icc3        \n"     /* set ICC3.Z */
160                 "       ckeq            icc3,cc7                \n"
161                 "       ld.p            %M0,%1                  \n"     /* LD.P/ORCR are atomic */
162                 "       orcr            cc7,cc7,cc3             \n"     /* set CC3 to true */
163                 "       or%I3           %1,%3,%2                \n"
164                 "       cst.p           %2,%M0          ,cc3,#1 \n"     /* if store happens... */
165                 "       corcc           gr29,gr29,gr0   ,cc3,#1 \n"     /* ... clear ICC3.Z */
166                 "       beq             icc3,#0,0b              \n"
167                 : "+U"(*v), "=&r"(old), "=r"(tmp)
168                 : "NPr"(mask)
169                 : "memory", "cc7", "cc3", "icc3"
170                 );
171
172         return old;
173 }
174
175 static inline
176 unsigned long atomic_test_and_XOR_mask(unsigned long mask, volatile unsigned long *v)
177 {
178         unsigned long old, tmp;
179
180         asm volatile(
181                 "0:                                             \n"
182                 "       orcc            gr0,gr0,gr0,icc3        \n"     /* set ICC3.Z */
183                 "       ckeq            icc3,cc7                \n"
184                 "       ld.p            %M0,%1                  \n"     /* LD.P/ORCR are atomic */
185                 "       orcr            cc7,cc7,cc3             \n"     /* set CC3 to true */
186                 "       xor%I3          %1,%3,%2                \n"
187                 "       cst.p           %2,%M0          ,cc3,#1 \n"     /* if store happens... */
188                 "       corcc           gr29,gr29,gr0   ,cc3,#1 \n"     /* ... clear ICC3.Z */
189                 "       beq             icc3,#0,0b              \n"
190                 : "+U"(*v), "=&r"(old), "=r"(tmp)
191                 : "NPr"(mask)
192                 : "memory", "cc7", "cc3", "icc3"
193                 );
194
195         return old;
196 }
197
198 #else
199
200 extern unsigned long atomic_test_and_ANDNOT_mask(unsigned long mask, volatile unsigned long *v);
201 extern unsigned long atomic_test_and_OR_mask(unsigned long mask, volatile unsigned long *v);
202 extern unsigned long atomic_test_and_XOR_mask(unsigned long mask, volatile unsigned long *v);
203
204 #endif
205
206 #define atomic_clear_mask(mask, v)      atomic_test_and_ANDNOT_mask((mask), (v))
207 #define atomic_set_mask(mask, v)        atomic_test_and_OR_mask((mask), (v))
208
209 /*****************************************************************************/
210 /*
211  * exchange value with memory
212  */
213 #ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
214
215 #define xchg(ptr, x)                                                            \
216 ({                                                                              \
217         __typeof__(ptr) __xg_ptr = (ptr);                                       \
218         __typeof__(*(ptr)) __xg_orig;                                           \
219                                                                                 \
220         switch (sizeof(__xg_orig)) {                                            \
221         case 4:                                                                 \
222                 asm volatile(                                                   \
223                         "swap%I0 %M0,%1"                                        \
224                         : "+m"(*__xg_ptr), "=r"(__xg_orig)                      \
225                         : "1"(x)                                                \
226                         : "memory"                                              \
227                         );                                                      \
228                 break;                                                          \
229                                                                                 \
230         default:                                                                \
231                 __xg_orig = (__typeof__(__xg_orig))0;                           \
232                 asm volatile("break");                                          \
233                 break;                                                          \
234         }                                                                       \
235                                                                                 \
236         __xg_orig;                                                              \
237 })
238
239 #else
240
241 extern uint32_t __xchg_32(uint32_t i, volatile void *v);
242
243 #define xchg(ptr, x)                                                                            \
244 ({                                                                                              \
245         __typeof__(ptr) __xg_ptr = (ptr);                                                       \
246         __typeof__(*(ptr)) __xg_orig;                                                           \
247                                                                                                 \
248         switch (sizeof(__xg_orig)) {                                                            \
249         case 4: __xg_orig = (__typeof__(*(ptr))) __xchg_32((uint32_t) x, __xg_ptr);     break;  \
250         default:                                                                                \
251                 __xg_orig = (__typeof__(__xg_orig))0;                                                                   \
252                 asm volatile("break");                                                          \
253                 break;                                                                          \
254         }                                                                                       \
255         __xg_orig;                                                                              \
256 })
257
258 #endif
259
260 #define tas(ptr) (xchg((ptr), 1))
261
262 #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
263 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
264
265 static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
266 {
267         int c, old;
268         c = atomic_read(v);
269         for (;;) {
270                 if (unlikely(c == (u)))
271                         break;
272                 old = atomic_cmpxchg((v), c, c + (a));
273                 if (likely(old == c))
274                         break;
275                 c = old;
276         }
277         return c != (u);
278 }
279
280 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
281
282 #include <asm-generic/atomic.h>
283 #endif /* _ASM_ATOMIC_H */