[PATCH] splice: add support for SPLICE_F_MOVE flag
[linux-2.6] / include / asm-xtensa / rwsem.h
1 /*
2  * include/asm-xtensa/rwsem.h
3  *
4  * This file is subject to the terms and conditions of the GNU General Public
5  * License.  See the file "COPYING" in the main directory of this archive
6  * for more details.
7  *
8  * Largely copied from include/asm-ppc/rwsem.h
9  *
10  * Copyright (C) 2001 - 2005 Tensilica Inc.
11  */
12
13 #ifndef _XTENSA_RWSEM_H
14 #define _XTENSA_RWSEM_H
15
16 #include <linux/list.h>
17 #include <linux/spinlock.h>
18 #include <asm/atomic.h>
19 #include <asm/system.h>
20
21 /*
22  * the semaphore definition
23  */
24 struct rw_semaphore {
25         signed long             count;
26 #define RWSEM_UNLOCKED_VALUE            0x00000000
27 #define RWSEM_ACTIVE_BIAS               0x00000001
28 #define RWSEM_ACTIVE_MASK               0x0000ffff
29 #define RWSEM_WAITING_BIAS              (-0x00010000)
30 #define RWSEM_ACTIVE_READ_BIAS          RWSEM_ACTIVE_BIAS
31 #define RWSEM_ACTIVE_WRITE_BIAS         (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
32         spinlock_t              wait_lock;
33         struct list_head        wait_list;
34 #if RWSEM_DEBUG
35         int                     debug;
36 #endif
37 };
38
39 /*
40  * initialisation
41  */
42 #if RWSEM_DEBUG
43 #define __RWSEM_DEBUG_INIT      , 0
44 #else
45 #define __RWSEM_DEBUG_INIT      /* */
46 #endif
47
48 #define __RWSEM_INITIALIZER(name) \
49         { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \
50           LIST_HEAD_INIT((name).wait_list) \
51           __RWSEM_DEBUG_INIT }
52
53 #define DECLARE_RWSEM(name)             \
54         struct rw_semaphore name = __RWSEM_INITIALIZER(name)
55
56 extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
57 extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
58 extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
59 extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
60
61 static inline void init_rwsem(struct rw_semaphore *sem)
62 {
63         sem->count = RWSEM_UNLOCKED_VALUE;
64         spin_lock_init(&sem->wait_lock);
65         INIT_LIST_HEAD(&sem->wait_list);
66 #if RWSEM_DEBUG
67         sem->debug = 0;
68 #endif
69 }
70
71 /*
72  * lock for reading
73  */
74 static inline void __down_read(struct rw_semaphore *sem)
75 {
76         if (atomic_add_return(1,(atomic_t *)(&sem->count)) > 0)
77                 smp_wmb();
78         else
79                 rwsem_down_read_failed(sem);
80 }
81
82 static inline int __down_read_trylock(struct rw_semaphore *sem)
83 {
84         int tmp;
85
86         while ((tmp = sem->count) >= 0) {
87                 if (tmp == cmpxchg(&sem->count, tmp,
88                                    tmp + RWSEM_ACTIVE_READ_BIAS)) {
89                         smp_wmb();
90                         return 1;
91                 }
92         }
93         return 0;
94 }
95
96 /*
97  * lock for writing
98  */
99 static inline void __down_write(struct rw_semaphore *sem)
100 {
101         int tmp;
102
103         tmp = atomic_add_return(RWSEM_ACTIVE_WRITE_BIAS,
104                                 (atomic_t *)(&sem->count));
105         if (tmp == RWSEM_ACTIVE_WRITE_BIAS)
106                 smp_wmb();
107         else
108                 rwsem_down_write_failed(sem);
109 }
110
111 static inline int __down_write_trylock(struct rw_semaphore *sem)
112 {
113         int tmp;
114
115         tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
116                       RWSEM_ACTIVE_WRITE_BIAS);
117         smp_wmb();
118         return tmp == RWSEM_UNLOCKED_VALUE;
119 }
120
121 /*
122  * unlock after reading
123  */
124 static inline void __up_read(struct rw_semaphore *sem)
125 {
126         int tmp;
127
128         smp_wmb();
129         tmp = atomic_sub_return(1,(atomic_t *)(&sem->count));
130         if (tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0)
131                 rwsem_wake(sem);
132 }
133
134 /*
135  * unlock after writing
136  */
137 static inline void __up_write(struct rw_semaphore *sem)
138 {
139         smp_wmb();
140         if (atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
141                               (atomic_t *)(&sem->count)) < 0)
142                 rwsem_wake(sem);
143 }
144
145 /*
146  * implement atomic add functionality
147  */
148 static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
149 {
150         atomic_add(delta, (atomic_t *)(&sem->count));
151 }
152
153 /*
154  * downgrade write lock to read lock
155  */
156 static inline void __downgrade_write(struct rw_semaphore *sem)
157 {
158         int tmp;
159
160         smp_wmb();
161         tmp = atomic_add_return(-RWSEM_WAITING_BIAS, (atomic_t *)(&sem->count));
162         if (tmp < 0)
163                 rwsem_downgrade_wake(sem);
164 }
165
166 /*
167  * implement exchange and add functionality
168  */
169 static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
170 {
171         smp_mb();
172         return atomic_add_return(delta, (atomic_t *)(&sem->count));
173 }
174
175 #endif  /* _XTENSA_RWSEM_XADD_H */