2 * asm-ia64/rwsem.h: R/W semaphores for ia64
4 * Copyright (C) 2003 Ken Chen <kenneth.w.chen@intel.com>
5 * Copyright (C) 2003 Asit Mallick <asit.k.mallick@intel.com>
6 * Copyright (C) 2005 Christoph Lameter <clameter@sgi.com>
8 * Based on asm-i386/rwsem.h and other architecture implementation.
10 * The MSW of the count is the negated number of active writers and
11 * waiting lockers, and the LSW is the total number of active locks.
13 * The lock count is initialized to 0 (no active and no waiting lockers).
15 * When a writer subtracts WRITE_BIAS, it'll get 0xffffffff00000001 for
16 * the case of an uncontended lock. Readers increment by 1 and see a positive
17 * value when uncontended, negative if there are writers (and maybe) readers
18 * waiting (in which case it goes to sleep).
21 #ifndef _ASM_IA64_RWSEM_H
22 #define _ASM_IA64_RWSEM_H
24 #include <linux/list.h>
25 #include <linux/spinlock.h>
27 #include <asm/intrinsics.h>
30 * the semaphore definition
35 struct list_head wait_list;
38 #define RWSEM_UNLOCKED_VALUE __IA64_UL_CONST(0x0000000000000000)
39 #define RWSEM_ACTIVE_BIAS __IA64_UL_CONST(0x0000000000000001)
40 #define RWSEM_ACTIVE_MASK __IA64_UL_CONST(0x00000000ffffffff)
41 #define RWSEM_WAITING_BIAS -__IA64_UL_CONST(0x0000000100000000)
42 #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
43 #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
45 #define __RWSEM_INITIALIZER(name) \
46 { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \
47 LIST_HEAD_INIT((name).wait_list) }
49 #define DECLARE_RWSEM(name) \
50 struct rw_semaphore name = __RWSEM_INITIALIZER(name)
52 extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
53 extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
54 extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
55 extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
58 init_rwsem (struct rw_semaphore *sem)
60 sem->count = RWSEM_UNLOCKED_VALUE;
61 spin_lock_init(&sem->wait_lock);
62 INIT_LIST_HEAD(&sem->wait_list);
69 __down_read (struct rw_semaphore *sem)
71 long result = ia64_fetchadd8_acq((unsigned long *)&sem->count, 1);
74 rwsem_down_read_failed(sem);
81 __down_write (struct rw_semaphore *sem)
87 new = old + RWSEM_ACTIVE_WRITE_BIAS;
88 } while (cmpxchg_acq(&sem->count, old, new) != old);
91 rwsem_down_write_failed(sem);
95 * unlock after reading
98 __up_read (struct rw_semaphore *sem)
100 long result = ia64_fetchadd8_rel((unsigned long *)&sem->count, -1);
102 if (result < 0 && (--result & RWSEM_ACTIVE_MASK) == 0)
107 * unlock after writing
110 __up_write (struct rw_semaphore *sem)
116 new = old - RWSEM_ACTIVE_WRITE_BIAS;
117 } while (cmpxchg_rel(&sem->count, old, new) != old);
119 if (new < 0 && (new & RWSEM_ACTIVE_MASK) == 0)
124 * trylock for reading -- returns 1 if successful, 0 if contention
127 __down_read_trylock (struct rw_semaphore *sem)
130 while ((tmp = sem->count) >= 0) {
131 if (tmp == cmpxchg_acq(&sem->count, tmp, tmp+1)) {
139 * trylock for writing -- returns 1 if successful, 0 if contention
142 __down_write_trylock (struct rw_semaphore *sem)
144 long tmp = cmpxchg_acq(&sem->count, RWSEM_UNLOCKED_VALUE,
145 RWSEM_ACTIVE_WRITE_BIAS);
146 return tmp == RWSEM_UNLOCKED_VALUE;
150 * downgrade write lock to read lock
153 __downgrade_write (struct rw_semaphore *sem)
159 new = old - RWSEM_WAITING_BIAS;
160 } while (cmpxchg_rel(&sem->count, old, new) != old);
163 rwsem_downgrade_wake(sem);
167 * Implement atomic add functionality. These used to be "inline" functions, but GCC v3.1
168 * doesn't quite optimize this stuff right and ends up with bad calls to fetchandadd.
170 #define rwsem_atomic_add(delta, sem) atomic64_add(delta, (atomic64_t *)(&(sem)->count))
171 #define rwsem_atomic_update(delta, sem) atomic64_add_return(delta, (atomic64_t *)(&(sem)->count))
173 static inline int rwsem_is_locked(struct rw_semaphore *sem)
175 return (sem->count != 0);
178 #endif /* _ASM_IA64_RWSEM_H */