Merge branch 'master' into 85xx
[linux-2.6] / include / asm-powerpc / rwsem.h
1 #ifndef _ASM_POWERPC_RWSEM_H
2 #define _ASM_POWERPC_RWSEM_H
3
4 #ifdef __KERNEL__
5
6 /*
7  * include/asm-powerpc/rwsem.h: R/W semaphores for PPC using the stuff
8  * in lib/rwsem.c.  Adapted largely from include/asm-i386/rwsem.h
9  * by Paul Mackerras <paulus@samba.org>.
10  */
11
12 #include <linux/list.h>
13 #include <linux/spinlock.h>
14 #include <asm/atomic.h>
15 #include <asm/system.h>
16
17 /*
18  * the semaphore definition
19  */
20 struct rw_semaphore {
21         /* XXX this should be able to be an atomic_t  -- paulus */
22         signed int              count;
23 #define RWSEM_UNLOCKED_VALUE            0x00000000
24 #define RWSEM_ACTIVE_BIAS               0x00000001
25 #define RWSEM_ACTIVE_MASK               0x0000ffff
26 #define RWSEM_WAITING_BIAS              (-0x00010000)
27 #define RWSEM_ACTIVE_READ_BIAS          RWSEM_ACTIVE_BIAS
28 #define RWSEM_ACTIVE_WRITE_BIAS         (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
29         spinlock_t              wait_lock;
30         struct list_head        wait_list;
31 };
32
33 #define __RWSEM_INITIALIZER(name) \
34         { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \
35           LIST_HEAD_INIT((name).wait_list) }
36
37 #define DECLARE_RWSEM(name)             \
38         struct rw_semaphore name = __RWSEM_INITIALIZER(name)
39
40 extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
41 extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
42 extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
43 extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
44
45 static inline void init_rwsem(struct rw_semaphore *sem)
46 {
47         sem->count = RWSEM_UNLOCKED_VALUE;
48         spin_lock_init(&sem->wait_lock);
49         INIT_LIST_HEAD(&sem->wait_list);
50 }
51
52 /*
53  * lock for reading
54  */
55 static inline void __down_read(struct rw_semaphore *sem)
56 {
57         if (unlikely(atomic_inc_return((atomic_t *)(&sem->count)) <= 0))
58                 rwsem_down_read_failed(sem);
59 }
60
61 static inline int __down_read_trylock(struct rw_semaphore *sem)
62 {
63         int tmp;
64
65         while ((tmp = sem->count) >= 0) {
66                 if (tmp == cmpxchg(&sem->count, tmp,
67                                    tmp + RWSEM_ACTIVE_READ_BIAS)) {
68                         return 1;
69                 }
70         }
71         return 0;
72 }
73
74 /*
75  * lock for writing
76  */
77 static inline void __down_write(struct rw_semaphore *sem)
78 {
79         int tmp;
80
81         tmp = atomic_add_return(RWSEM_ACTIVE_WRITE_BIAS,
82                                 (atomic_t *)(&sem->count));
83         if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
84                 rwsem_down_write_failed(sem);
85 }
86
87 static inline int __down_write_trylock(struct rw_semaphore *sem)
88 {
89         int tmp;
90
91         tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
92                       RWSEM_ACTIVE_WRITE_BIAS);
93         return tmp == RWSEM_UNLOCKED_VALUE;
94 }
95
96 /*
97  * unlock after reading
98  */
99 static inline void __up_read(struct rw_semaphore *sem)
100 {
101         int tmp;
102
103         tmp = atomic_dec_return((atomic_t *)(&sem->count));
104         if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0))
105                 rwsem_wake(sem);
106 }
107
108 /*
109  * unlock after writing
110  */
111 static inline void __up_write(struct rw_semaphore *sem)
112 {
113         if (unlikely(atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
114                               (atomic_t *)(&sem->count)) < 0))
115                 rwsem_wake(sem);
116 }
117
118 /*
119  * implement atomic add functionality
120  */
121 static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
122 {
123         atomic_add(delta, (atomic_t *)(&sem->count));
124 }
125
126 /*
127  * downgrade write lock to read lock
128  */
129 static inline void __downgrade_write(struct rw_semaphore *sem)
130 {
131         int tmp;
132
133         tmp = atomic_add_return(-RWSEM_WAITING_BIAS, (atomic_t *)(&sem->count));
134         if (tmp < 0)
135                 rwsem_downgrade_wake(sem);
136 }
137
138 /*
139  * implement exchange and add functionality
140  */
141 static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
142 {
143         return atomic_add_return(delta, (atomic_t *)(&sem->count));
144 }
145
146 static inline int rwsem_is_locked(struct rw_semaphore *sem)
147 {
148         return (sem->count != 0);
149 }
150
151 #endif  /* __KERNEL__ */
152 #endif  /* _ASM_POWERPC_RWSEM_H */