Merge branch 'v2.6.24-rc7-lockdep' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6] / include / asm-powerpc / rwsem.h
1 #ifndef _ASM_POWERPC_RWSEM_H
2 #define _ASM_POWERPC_RWSEM_H
3
4 #ifndef _LINUX_RWSEM_H
5 #error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead."
6 #endif
7
8 #ifdef __KERNEL__
9
10 /*
11  * include/asm-powerpc/rwsem.h: R/W semaphores for PPC using the stuff
12  * in lib/rwsem.c.  Adapted largely from include/asm-i386/rwsem.h
13  * by Paul Mackerras <paulus@samba.org>.
14  */
15
16 #include <linux/list.h>
17 #include <linux/spinlock.h>
18 #include <asm/atomic.h>
19 #include <asm/system.h>
20
21 /*
22  * the semaphore definition
23  */
24 struct rw_semaphore {
25         /* XXX this should be able to be an atomic_t  -- paulus */
26         signed int              count;
27 #define RWSEM_UNLOCKED_VALUE            0x00000000
28 #define RWSEM_ACTIVE_BIAS               0x00000001
29 #define RWSEM_ACTIVE_MASK               0x0000ffff
30 #define RWSEM_WAITING_BIAS              (-0x00010000)
31 #define RWSEM_ACTIVE_READ_BIAS          RWSEM_ACTIVE_BIAS
32 #define RWSEM_ACTIVE_WRITE_BIAS         (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
33         spinlock_t              wait_lock;
34         struct list_head        wait_list;
35 };
36
37 #define __RWSEM_INITIALIZER(name) \
38         { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \
39           LIST_HEAD_INIT((name).wait_list) }
40
41 #define DECLARE_RWSEM(name)             \
42         struct rw_semaphore name = __RWSEM_INITIALIZER(name)
43
44 extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
45 extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
46 extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
47 extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
48
49 static inline void init_rwsem(struct rw_semaphore *sem)
50 {
51         sem->count = RWSEM_UNLOCKED_VALUE;
52         spin_lock_init(&sem->wait_lock);
53         INIT_LIST_HEAD(&sem->wait_list);
54 }
55
56 /*
57  * lock for reading
58  */
59 static inline void __down_read(struct rw_semaphore *sem)
60 {
61         if (unlikely(atomic_inc_return((atomic_t *)(&sem->count)) <= 0))
62                 rwsem_down_read_failed(sem);
63 }
64
65 static inline int __down_read_trylock(struct rw_semaphore *sem)
66 {
67         int tmp;
68
69         while ((tmp = sem->count) >= 0) {
70                 if (tmp == cmpxchg(&sem->count, tmp,
71                                    tmp + RWSEM_ACTIVE_READ_BIAS)) {
72                         return 1;
73                 }
74         }
75         return 0;
76 }
77
78 /*
79  * lock for writing
80  */
81 static inline void __down_write(struct rw_semaphore *sem)
82 {
83         int tmp;
84
85         tmp = atomic_add_return(RWSEM_ACTIVE_WRITE_BIAS,
86                                 (atomic_t *)(&sem->count));
87         if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
88                 rwsem_down_write_failed(sem);
89 }
90
91 static inline int __down_write_trylock(struct rw_semaphore *sem)
92 {
93         int tmp;
94
95         tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
96                       RWSEM_ACTIVE_WRITE_BIAS);
97         return tmp == RWSEM_UNLOCKED_VALUE;
98 }
99
100 /*
101  * unlock after reading
102  */
103 static inline void __up_read(struct rw_semaphore *sem)
104 {
105         int tmp;
106
107         tmp = atomic_dec_return((atomic_t *)(&sem->count));
108         if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0))
109                 rwsem_wake(sem);
110 }
111
112 /*
113  * unlock after writing
114  */
115 static inline void __up_write(struct rw_semaphore *sem)
116 {
117         if (unlikely(atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
118                               (atomic_t *)(&sem->count)) < 0))
119                 rwsem_wake(sem);
120 }
121
122 /*
123  * implement atomic add functionality
124  */
125 static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
126 {
127         atomic_add(delta, (atomic_t *)(&sem->count));
128 }
129
130 /*
131  * downgrade write lock to read lock
132  */
133 static inline void __downgrade_write(struct rw_semaphore *sem)
134 {
135         int tmp;
136
137         tmp = atomic_add_return(-RWSEM_WAITING_BIAS, (atomic_t *)(&sem->count));
138         if (tmp < 0)
139                 rwsem_downgrade_wake(sem);
140 }
141
142 /*
143  * implement exchange and add functionality
144  */
145 static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
146 {
147         return atomic_add_return(delta, (atomic_t *)(&sem->count));
148 }
149
150 static inline int rwsem_is_locked(struct rw_semaphore *sem)
151 {
152         return (sem->count != 0);
153 }
154
155 #endif  /* __KERNEL__ */
156 #endif  /* _ASM_POWERPC_RWSEM_H */