int wake_nests = 0;
unsigned long flags;
struct task_struct *this_task = current;
- struct list_head *lsthead = &psw->wake_task_list, *lnk;
+ struct list_head *lsthead = &psw->wake_task_list;
struct wake_task_node *tncur;
struct wake_task_node tnode;
spin_lock_irqsave(&psw->lock, flags);
/* Try to see if the current task is already inside this wakeup call */
- list_for_each(lnk, lsthead) {
- tncur = list_entry(lnk, struct wake_task_node, llink);
+ list_for_each_entry(tncur, lsthead, llink) {
if (tncur->wq == wq ||
(tncur->task == this_task && ++wake_nests > EP_MAX_POLLWAKE_NESTS)) {
spin_unlock_irqrestore(&psw->lock, flags);
/* Do really wake up now */
- wake_up(wq);
+ wake_up_nested(wq, 1 + wake_nests);
/* Remove the current task from the list */
spin_lock_irqsave(&psw->lock, flags);
* holding "epmutex" we can be sure that no file cleanup code will hit
* us during this operation. So we can avoid the lock on "ep->lock".
*/
- while ((rbp = rb_first(&ep->rbr)) != 0) {
+ while ((rbp = rb_first(&ep->rbr)) != NULL) {
epi = rb_entry(rbp, struct epitem, rbn);
ep_remove(ep, epi);
}
* wait list.
*/
if (waitqueue_active(&ep->wq))
- __wake_up_locked(&ep->wq, TASK_UNINTERRUPTIBLE |
- TASK_INTERRUPTIBLE);
+ wake_up_locked(&ep->wq);
if (waitqueue_active(&ep->poll_wait))
pwake++;
/* Notify waiting tasks that events are available */
if (waitqueue_active(&ep->wq))
- __wake_up_locked(&ep->wq, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE);
+ wake_up_locked(&ep->wq);
if (waitqueue_active(&ep->poll_wait))
pwake++;
}
/* Notify waiting tasks that events are available */
if (waitqueue_active(&ep->wq))
- __wake_up_locked(&ep->wq, TASK_UNINTERRUPTIBLE |
- TASK_INTERRUPTIBLE);
+ wake_up_locked(&ep->wq);
if (waitqueue_active(&ep->poll_wait))
pwake++;
}
* wait list (delayed after we release the lock).
*/
if (waitqueue_active(&ep->wq))
- __wake_up_locked(&ep->wq, TASK_UNINTERRUPTIBLE |
- TASK_INTERRUPTIBLE);
+ wake_up_locked(&ep->wq);
if (waitqueue_active(&ep->poll_wait))
pwake++;
}