[PATCH] Create fs/utimes.c
[linux-2.6] / include / asm-alpha / rwsem.h
1 #ifndef _ALPHA_RWSEM_H
2 #define _ALPHA_RWSEM_H
3
4 /*
5  * Written by Ivan Kokshaysky <ink@jurassic.park.msu.ru>, 2001.
6  * Based on asm-alpha/semaphore.h and asm-i386/rwsem.h
7  */
8
9 #ifndef _LINUX_RWSEM_H
10 #error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
11 #endif
12
13 #ifdef __KERNEL__
14
15 #include <linux/compiler.h>
16 #include <linux/list.h>
17 #include <linux/spinlock.h>
18
19 struct rwsem_waiter;
20
21 extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
22 extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
23 extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *);
24 extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
25
26 /*
27  * the semaphore definition
28  */
29 struct rw_semaphore {
30         long                    count;
31 #define RWSEM_UNLOCKED_VALUE            0x0000000000000000L
32 #define RWSEM_ACTIVE_BIAS               0x0000000000000001L
33 #define RWSEM_ACTIVE_MASK               0x00000000ffffffffL
34 #define RWSEM_WAITING_BIAS              (-0x0000000100000000L)
35 #define RWSEM_ACTIVE_READ_BIAS          RWSEM_ACTIVE_BIAS
36 #define RWSEM_ACTIVE_WRITE_BIAS         (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
37         spinlock_t              wait_lock;
38         struct list_head        wait_list;
39 };
40
41 #define __RWSEM_INITIALIZER(name) \
42         { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \
43         LIST_HEAD_INIT((name).wait_list) }
44
45 #define DECLARE_RWSEM(name) \
46         struct rw_semaphore name = __RWSEM_INITIALIZER(name)
47
48 static inline void init_rwsem(struct rw_semaphore *sem)
49 {
50         sem->count = RWSEM_UNLOCKED_VALUE;
51         spin_lock_init(&sem->wait_lock);
52         INIT_LIST_HEAD(&sem->wait_list);
53 }
54
55 static inline void __down_read(struct rw_semaphore *sem)
56 {
57         long oldcount;
58 #ifndef CONFIG_SMP
59         oldcount = sem->count;
60         sem->count += RWSEM_ACTIVE_READ_BIAS;
61 #else
62         long temp;
63         __asm__ __volatile__(
64         "1:     ldq_l   %0,%1\n"
65         "       addq    %0,%3,%2\n"
66         "       stq_c   %2,%1\n"
67         "       beq     %2,2f\n"
68         "       mb\n"
69         ".subsection 2\n"
70         "2:     br      1b\n"
71         ".previous"
72         :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
73         :"Ir" (RWSEM_ACTIVE_READ_BIAS), "m" (sem->count) : "memory");
74 #endif
75         if (unlikely(oldcount < 0))
76                 rwsem_down_read_failed(sem);
77 }
78
79 /*
80  * trylock for reading -- returns 1 if successful, 0 if contention
81  */
82 static inline int __down_read_trylock(struct rw_semaphore *sem)
83 {
84         long old, new, res;
85
86         res = sem->count;
87         do {
88                 new = res + RWSEM_ACTIVE_READ_BIAS;
89                 if (new <= 0)
90                         break;
91                 old = res;
92                 res = cmpxchg(&sem->count, old, new);
93         } while (res != old);
94         return res >= 0 ? 1 : 0;
95 }
96
97 static inline void __down_write(struct rw_semaphore *sem)
98 {
99         long oldcount;
100 #ifndef CONFIG_SMP
101         oldcount = sem->count;
102         sem->count += RWSEM_ACTIVE_WRITE_BIAS;
103 #else
104         long temp;
105         __asm__ __volatile__(
106         "1:     ldq_l   %0,%1\n"
107         "       addq    %0,%3,%2\n"
108         "       stq_c   %2,%1\n"
109         "       beq     %2,2f\n"
110         "       mb\n"
111         ".subsection 2\n"
112         "2:     br      1b\n"
113         ".previous"
114         :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
115         :"Ir" (RWSEM_ACTIVE_WRITE_BIAS), "m" (sem->count) : "memory");
116 #endif
117         if (unlikely(oldcount))
118                 rwsem_down_write_failed(sem);
119 }
120
121 /*
122  * trylock for writing -- returns 1 if successful, 0 if contention
123  */
124 static inline int __down_write_trylock(struct rw_semaphore *sem)
125 {
126         long ret = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
127                            RWSEM_ACTIVE_WRITE_BIAS);
128         if (ret == RWSEM_UNLOCKED_VALUE)
129                 return 1;
130         return 0;
131 }
132
133 static inline void __up_read(struct rw_semaphore *sem)
134 {
135         long oldcount;
136 #ifndef CONFIG_SMP
137         oldcount = sem->count;
138         sem->count -= RWSEM_ACTIVE_READ_BIAS;
139 #else
140         long temp;
141         __asm__ __volatile__(
142         "       mb\n"
143         "1:     ldq_l   %0,%1\n"
144         "       subq    %0,%3,%2\n"
145         "       stq_c   %2,%1\n"
146         "       beq     %2,2f\n"
147         ".subsection 2\n"
148         "2:     br      1b\n"
149         ".previous"
150         :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
151         :"Ir" (RWSEM_ACTIVE_READ_BIAS), "m" (sem->count) : "memory");
152 #endif
153         if (unlikely(oldcount < 0))
154                 if ((int)oldcount - RWSEM_ACTIVE_READ_BIAS == 0)
155                         rwsem_wake(sem);
156 }
157
158 static inline void __up_write(struct rw_semaphore *sem)
159 {
160         long count;
161 #ifndef CONFIG_SMP
162         sem->count -= RWSEM_ACTIVE_WRITE_BIAS;
163         count = sem->count;
164 #else
165         long temp;
166         __asm__ __volatile__(
167         "       mb\n"
168         "1:     ldq_l   %0,%1\n"
169         "       subq    %0,%3,%2\n"
170         "       stq_c   %2,%1\n"
171         "       beq     %2,2f\n"
172         "       subq    %0,%3,%0\n"
173         ".subsection 2\n"
174         "2:     br      1b\n"
175         ".previous"
176         :"=&r" (count), "=m" (sem->count), "=&r" (temp)
177         :"Ir" (RWSEM_ACTIVE_WRITE_BIAS), "m" (sem->count) : "memory");
178 #endif
179         if (unlikely(count))
180                 if ((int)count == 0)
181                         rwsem_wake(sem);
182 }
183
184 /*
185  * downgrade write lock to read lock
186  */
187 static inline void __downgrade_write(struct rw_semaphore *sem)
188 {
189         long oldcount;
190 #ifndef CONFIG_SMP
191         oldcount = sem->count;
192         sem->count -= RWSEM_WAITING_BIAS;
193 #else
194         long temp;
195         __asm__ __volatile__(
196         "1:     ldq_l   %0,%1\n"
197         "       addq    %0,%3,%2\n"
198         "       stq_c   %2,%1\n"
199         "       beq     %2,2f\n"
200         "       mb\n"
201         ".subsection 2\n"
202         "2:     br      1b\n"
203         ".previous"
204         :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
205         :"Ir" (-RWSEM_WAITING_BIAS), "m" (sem->count) : "memory");
206 #endif
207         if (unlikely(oldcount < 0))
208                 rwsem_downgrade_wake(sem);
209 }
210
211 static inline void rwsem_atomic_add(long val, struct rw_semaphore *sem)
212 {
213 #ifndef CONFIG_SMP
214         sem->count += val;
215 #else
216         long temp;
217         __asm__ __volatile__(
218         "1:     ldq_l   %0,%1\n"
219         "       addq    %0,%2,%0\n"
220         "       stq_c   %0,%1\n"
221         "       beq     %0,2f\n"
222         ".subsection 2\n"
223         "2:     br      1b\n"
224         ".previous"
225         :"=&r" (temp), "=m" (sem->count)
226         :"Ir" (val), "m" (sem->count));
227 #endif
228 }
229
230 static inline long rwsem_atomic_update(long val, struct rw_semaphore *sem)
231 {
232 #ifndef CONFIG_SMP
233         sem->count += val;
234         return sem->count;
235 #else
236         long ret, temp;
237         __asm__ __volatile__(
238         "1:     ldq_l   %0,%1\n"
239         "       addq    %0,%3,%2\n"
240         "       addq    %0,%3,%0\n"
241         "       stq_c   %2,%1\n"
242         "       beq     %2,2f\n"
243         ".subsection 2\n"
244         "2:     br      1b\n"
245         ".previous"
246         :"=&r" (ret), "=m" (sem->count), "=&r" (temp)
247         :"Ir" (val), "m" (sem->count));
248
249         return ret;
250 #endif
251 }
252
253 static inline int rwsem_is_locked(struct rw_semaphore *sem)
254 {
255         return (sem->count != 0);
256 }
257
258 #endif /* __KERNEL__ */
259 #endif /* _ALPHA_RWSEM_H */