2 * Generic waiting primitives.
4 * (C) 2004 William Irwin, Oracle
6 #include <linux/init.h>
7 #include <linux/module.h>
8 #include <linux/sched.h>
10 #include <linux/wait.h>
11 #include <linux/hash.h>
13 struct lock_class_key waitqueue_lock_key;
15 EXPORT_SYMBOL(waitqueue_lock_key);
17 void fastcall add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
21 wait->flags &= ~WQ_FLAG_EXCLUSIVE;
22 spin_lock_irqsave(&q->lock, flags);
23 __add_wait_queue(q, wait);
24 spin_unlock_irqrestore(&q->lock, flags);
26 EXPORT_SYMBOL(add_wait_queue);
28 void fastcall add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
32 wait->flags |= WQ_FLAG_EXCLUSIVE;
33 spin_lock_irqsave(&q->lock, flags);
34 __add_wait_queue_tail(q, wait);
35 spin_unlock_irqrestore(&q->lock, flags);
37 EXPORT_SYMBOL(add_wait_queue_exclusive);
39 void fastcall remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
43 spin_lock_irqsave(&q->lock, flags);
44 __remove_wait_queue(q, wait);
45 spin_unlock_irqrestore(&q->lock, flags);
47 EXPORT_SYMBOL(remove_wait_queue);
51 * Note: we use "set_current_state()" _after_ the wait-queue add,
52 * because we need a memory barrier there on SMP, so that any
53 * wake-function that tests for the wait-queue being active
54 * will be guaranteed to see waitqueue addition _or_ subsequent
55 * tests in this thread will see the wakeup having taken place.
57 * The spin_unlock() itself is semi-permeable and only protects
58 * one way (it only protects stuff inside the critical region and
59 * stops them from bleeding out - it would still allow subsequent
60 * loads to move into the the critical region).
63 prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state)
67 wait->flags &= ~WQ_FLAG_EXCLUSIVE;
68 spin_lock_irqsave(&q->lock, flags);
69 if (list_empty(&wait->task_list))
70 __add_wait_queue(q, wait);
72 * don't alter the task state if this is just going to
73 * queue an async wait queue callback
75 if (is_sync_wait(wait))
76 set_current_state(state);
77 spin_unlock_irqrestore(&q->lock, flags);
79 EXPORT_SYMBOL(prepare_to_wait);
82 prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state)
86 wait->flags |= WQ_FLAG_EXCLUSIVE;
87 spin_lock_irqsave(&q->lock, flags);
88 if (list_empty(&wait->task_list))
89 __add_wait_queue_tail(q, wait);
91 * don't alter the task state if this is just going to
92 * queue an async wait queue callback
94 if (is_sync_wait(wait))
95 set_current_state(state);
96 spin_unlock_irqrestore(&q->lock, flags);
98 EXPORT_SYMBOL(prepare_to_wait_exclusive);
100 void fastcall finish_wait(wait_queue_head_t *q, wait_queue_t *wait)
104 __set_current_state(TASK_RUNNING);
106 * We can check for list emptiness outside the lock
108 * - we use the "careful" check that verifies both
109 * the next and prev pointers, so that there cannot
110 * be any half-pending updates in progress on other
111 * CPU's that we haven't seen yet (and that might
112 * still change the stack area.
114 * - all other users take the lock (ie we can only
115 * have _one_ other CPU that looks at or modifies
118 if (!list_empty_careful(&wait->task_list)) {
119 spin_lock_irqsave(&q->lock, flags);
120 list_del_init(&wait->task_list);
121 spin_unlock_irqrestore(&q->lock, flags);
124 EXPORT_SYMBOL(finish_wait);
126 int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key)
128 int ret = default_wake_function(wait, mode, sync, key);
131 list_del_init(&wait->task_list);
134 EXPORT_SYMBOL(autoremove_wake_function);
136 int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *arg)
138 struct wait_bit_key *key = arg;
139 struct wait_bit_queue *wait_bit
140 = container_of(wait, struct wait_bit_queue, wait);
142 if (wait_bit->key.flags != key->flags ||
143 wait_bit->key.bit_nr != key->bit_nr ||
144 test_bit(key->bit_nr, key->flags))
147 return autoremove_wake_function(wait, mode, sync, key);
149 EXPORT_SYMBOL(wake_bit_function);
152 * To allow interruptible waiting and asynchronous (i.e. nonblocking)
153 * waiting, the actions of __wait_on_bit() and __wait_on_bit_lock() are
154 * permitted return codes. Nonzero return codes halt waiting and return.
157 __wait_on_bit(wait_queue_head_t *wq, struct wait_bit_queue *q,
158 int (*action)(void *), unsigned mode)
163 prepare_to_wait(wq, &q->wait, mode);
164 if (test_bit(q->key.bit_nr, q->key.flags))
165 ret = (*action)(q->key.flags);
166 } while (test_bit(q->key.bit_nr, q->key.flags) && !ret);
167 finish_wait(wq, &q->wait);
170 EXPORT_SYMBOL(__wait_on_bit);
172 int __sched fastcall out_of_line_wait_on_bit(void *word, int bit,
173 int (*action)(void *), unsigned mode)
175 wait_queue_head_t *wq = bit_waitqueue(word, bit);
176 DEFINE_WAIT_BIT(wait, word, bit);
178 return __wait_on_bit(wq, &wait, action, mode);
180 EXPORT_SYMBOL(out_of_line_wait_on_bit);
183 __wait_on_bit_lock(wait_queue_head_t *wq, struct wait_bit_queue *q,
184 int (*action)(void *), unsigned mode)
189 prepare_to_wait_exclusive(wq, &q->wait, mode);
190 if (test_bit(q->key.bit_nr, q->key.flags)) {
191 if ((ret = (*action)(q->key.flags)))
194 } while (test_and_set_bit(q->key.bit_nr, q->key.flags));
195 finish_wait(wq, &q->wait);
198 EXPORT_SYMBOL(__wait_on_bit_lock);
200 int __sched fastcall out_of_line_wait_on_bit_lock(void *word, int bit,
201 int (*action)(void *), unsigned mode)
203 wait_queue_head_t *wq = bit_waitqueue(word, bit);
204 DEFINE_WAIT_BIT(wait, word, bit);
206 return __wait_on_bit_lock(wq, &wait, action, mode);
208 EXPORT_SYMBOL(out_of_line_wait_on_bit_lock);
210 void fastcall __wake_up_bit(wait_queue_head_t *wq, void *word, int bit)
212 struct wait_bit_key key = __WAIT_BIT_KEY_INITIALIZER(word, bit);
213 if (waitqueue_active(wq))
214 __wake_up(wq, TASK_INTERRUPTIBLE|TASK_UNINTERRUPTIBLE, 1, &key);
216 EXPORT_SYMBOL(__wake_up_bit);
219 * wake_up_bit - wake up a waiter on a bit
220 * @word: the word being waited on, a kernel virtual address
221 * @bit: the bit of the word being waited on
223 * There is a standard hashed waitqueue table for generic use. This
224 * is the part of the hashtable's accessor API that wakes up waiters
225 * on a bit. For instance, if one were to have waiters on a bitflag,
226 * one would call wake_up_bit() after clearing the bit.
228 * In order for this to function properly, as it uses waitqueue_active()
229 * internally, some kind of memory barrier must be done prior to calling
230 * this. Typically, this will be smp_mb__after_clear_bit(), but in some
231 * cases where bitflags are manipulated non-atomically under a lock, one
232 * may need to use a less regular barrier, such fs/inode.c's smp_mb(),
233 * because spin_unlock() does not guarantee a memory barrier.
235 void fastcall wake_up_bit(void *word, int bit)
237 __wake_up_bit(bit_waitqueue(word, bit), word, bit);
239 EXPORT_SYMBOL(wake_up_bit);
241 fastcall wait_queue_head_t *bit_waitqueue(void *word, int bit)
243 const int shift = BITS_PER_LONG == 32 ? 5 : 6;
244 const struct zone *zone = page_zone(virt_to_page(word));
245 unsigned long val = (unsigned long)word << shift | bit;
247 return &zone->wait_table[hash_long(val, zone->wait_table_bits)];
249 EXPORT_SYMBOL(bit_waitqueue);