profile: suppress warning about large allocations when profile=1 is specified
[linux-2.6] / kernel / futex.c
1 /*
2  *  Fast Userspace Mutexes (which I call "Futexes!").
3  *  (C) Rusty Russell, IBM 2002
4  *
5  *  Generalized futexes, futex requeueing, misc fixes by Ingo Molnar
6  *  (C) Copyright 2003 Red Hat Inc, All Rights Reserved
7  *
8  *  Removed page pinning, fix privately mapped COW pages and other cleanups
9  *  (C) Copyright 2003, 2004 Jamie Lokier
10  *
11  *  Robust futex support started by Ingo Molnar
12  *  (C) Copyright 2006 Red Hat Inc, All Rights Reserved
13  *  Thanks to Thomas Gleixner for suggestions, analysis and fixes.
14  *
15  *  PI-futex support started by Ingo Molnar and Thomas Gleixner
16  *  Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
17  *  Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
18  *
19  *  PRIVATE futexes by Eric Dumazet
20  *  Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com>
21  *
22  *  Requeue-PI support by Darren Hart <dvhltc@us.ibm.com>
23  *  Copyright (C) IBM Corporation, 2009
24  *  Thanks to Thomas Gleixner for conceptual design and careful reviews.
25  *
26  *  Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly
27  *  enough at me, Linus for the original (flawed) idea, Matthew
28  *  Kirkwood for proof-of-concept implementation.
29  *
30  *  "The futexes are also cursed."
31  *  "But they come in a choice of three flavours!"
32  *
33  *  This program is free software; you can redistribute it and/or modify
34  *  it under the terms of the GNU General Public License as published by
35  *  the Free Software Foundation; either version 2 of the License, or
36  *  (at your option) any later version.
37  *
38  *  This program is distributed in the hope that it will be useful,
39  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
40  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
41  *  GNU General Public License for more details.
42  *
43  *  You should have received a copy of the GNU General Public License
44  *  along with this program; if not, write to the Free Software
45  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
46  */
47 #include <linux/slab.h>
48 #include <linux/poll.h>
49 #include <linux/fs.h>
50 #include <linux/file.h>
51 #include <linux/jhash.h>
52 #include <linux/init.h>
53 #include <linux/futex.h>
54 #include <linux/mount.h>
55 #include <linux/pagemap.h>
56 #include <linux/syscalls.h>
57 #include <linux/signal.h>
58 #include <linux/module.h>
59 #include <linux/magic.h>
60 #include <linux/pid.h>
61 #include <linux/nsproxy.h>
62
63 #include <asm/futex.h>
64
65 #include "rtmutex_common.h"
66
67 int __read_mostly futex_cmpxchg_enabled;
68
69 #define FUTEX_HASHBITS (CONFIG_BASE_SMALL ? 4 : 8)
70
71 /*
72  * Priority Inheritance state:
73  */
74 struct futex_pi_state {
75         /*
76          * list of 'owned' pi_state instances - these have to be
77          * cleaned up in do_exit() if the task exits prematurely:
78          */
79         struct list_head list;
80
81         /*
82          * The PI object:
83          */
84         struct rt_mutex pi_mutex;
85
86         struct task_struct *owner;
87         atomic_t refcount;
88
89         union futex_key key;
90 };
91
92 /*
93  * We use this hashed waitqueue instead of a normal wait_queue_t, so
94  * we can wake only the relevant ones (hashed queues may be shared).
95  *
96  * A futex_q has a woken state, just like tasks have TASK_RUNNING.
97  * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0.
98  * The order of wakup is always to make the first condition true, then
99  * wake up q->waiter, then make the second condition true.
100  */
101 struct futex_q {
102         struct plist_node list;
103         /* Waiter reference */
104         struct task_struct *task;
105
106         /* Which hash list lock to use: */
107         spinlock_t *lock_ptr;
108
109         /* Key which the futex is hashed on: */
110         union futex_key key;
111
112         /* Optional priority inheritance state: */
113         struct futex_pi_state *pi_state;
114
115         /* rt_waiter storage for requeue_pi: */
116         struct rt_mutex_waiter *rt_waiter;
117
118         /* Bitset for the optional bitmasked wakeup */
119         u32 bitset;
120 };
121
122 /*
123  * Hash buckets are shared by all the futex_keys that hash to the same
124  * location.  Each key may have multiple futex_q structures, one for each task
125  * waiting on a futex.
126  */
127 struct futex_hash_bucket {
128         spinlock_t lock;
129         struct plist_head chain;
130 };
131
132 static struct futex_hash_bucket futex_queues[1<<FUTEX_HASHBITS];
133
134 /*
135  * We hash on the keys returned from get_futex_key (see below).
136  */
137 static struct futex_hash_bucket *hash_futex(union futex_key *key)
138 {
139         u32 hash = jhash2((u32*)&key->both.word,
140                           (sizeof(key->both.word)+sizeof(key->both.ptr))/4,
141                           key->both.offset);
142         return &futex_queues[hash & ((1 << FUTEX_HASHBITS)-1)];
143 }
144
145 /*
146  * Return 1 if two futex_keys are equal, 0 otherwise.
147  */
148 static inline int match_futex(union futex_key *key1, union futex_key *key2)
149 {
150         return (key1->both.word == key2->both.word
151                 && key1->both.ptr == key2->both.ptr
152                 && key1->both.offset == key2->both.offset);
153 }
154
155 /*
156  * Take a reference to the resource addressed by a key.
157  * Can be called while holding spinlocks.
158  *
159  */
160 static void get_futex_key_refs(union futex_key *key)
161 {
162         if (!key->both.ptr)
163                 return;
164
165         switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
166         case FUT_OFF_INODE:
167                 atomic_inc(&key->shared.inode->i_count);
168                 break;
169         case FUT_OFF_MMSHARED:
170                 atomic_inc(&key->private.mm->mm_count);
171                 break;
172         }
173 }
174
175 /*
176  * Drop a reference to the resource addressed by a key.
177  * The hash bucket spinlock must not be held.
178  */
179 static void drop_futex_key_refs(union futex_key *key)
180 {
181         if (!key->both.ptr) {
182                 /* If we're here then we tried to put a key we failed to get */
183                 WARN_ON_ONCE(1);
184                 return;
185         }
186
187         switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
188         case FUT_OFF_INODE:
189                 iput(key->shared.inode);
190                 break;
191         case FUT_OFF_MMSHARED:
192                 mmdrop(key->private.mm);
193                 break;
194         }
195 }
196
197 /**
198  * get_futex_key - Get parameters which are the keys for a futex.
199  * @uaddr: virtual address of the futex
200  * @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED
201  * @key: address where result is stored.
202  * @rw: mapping needs to be read/write (values: VERIFY_READ, VERIFY_WRITE)
203  *
204  * Returns a negative error code or 0
205  * The key words are stored in *key on success.
206  *
207  * For shared mappings, it's (page->index, vma->vm_file->f_path.dentry->d_inode,
208  * offset_within_page).  For private mappings, it's (uaddr, current->mm).
209  * We can usually work out the index without swapping in the page.
210  *
211  * lock_page() might sleep, the caller should not hold a spinlock.
212  */
213 static int
214 get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
215 {
216         unsigned long address = (unsigned long)uaddr;
217         struct mm_struct *mm = current->mm;
218         struct page *page;
219         int err;
220
221         /*
222          * The futex address must be "naturally" aligned.
223          */
224         key->both.offset = address % PAGE_SIZE;
225         if (unlikely((address % sizeof(u32)) != 0))
226                 return -EINVAL;
227         address -= key->both.offset;
228
229         /*
230          * PROCESS_PRIVATE futexes are fast.
231          * As the mm cannot disappear under us and the 'key' only needs
232          * virtual address, we dont even have to find the underlying vma.
233          * Note : We do have to check 'uaddr' is a valid user address,
234          *        but access_ok() should be faster than find_vma()
235          */
236         if (!fshared) {
237                 if (unlikely(!access_ok(rw, uaddr, sizeof(u32))))
238                         return -EFAULT;
239                 key->private.mm = mm;
240                 key->private.address = address;
241                 get_futex_key_refs(key);
242                 return 0;
243         }
244
245 again:
246         err = get_user_pages_fast(address, 1, rw == VERIFY_WRITE, &page);
247         if (err < 0)
248                 return err;
249
250         page = compound_head(page);
251         lock_page(page);
252         if (!page->mapping) {
253                 unlock_page(page);
254                 put_page(page);
255                 goto again;
256         }
257
258         /*
259          * Private mappings are handled in a simple way.
260          *
261          * NOTE: When userspace waits on a MAP_SHARED mapping, even if
262          * it's a read-only handle, it's expected that futexes attach to
263          * the object not the particular process.
264          */
265         if (PageAnon(page)) {
266                 key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
267                 key->private.mm = mm;
268                 key->private.address = address;
269         } else {
270                 key->both.offset |= FUT_OFF_INODE; /* inode-based key */
271                 key->shared.inode = page->mapping->host;
272                 key->shared.pgoff = page->index;
273         }
274
275         get_futex_key_refs(key);
276
277         unlock_page(page);
278         put_page(page);
279         return 0;
280 }
281
282 static inline
283 void put_futex_key(int fshared, union futex_key *key)
284 {
285         drop_futex_key_refs(key);
286 }
287
288 /*
289  * fault_in_user_writeable - fault in user address and verify RW access
290  * @uaddr:      pointer to faulting user space address
291  *
292  * Slow path to fixup the fault we just took in the atomic write
293  * access to @uaddr.
294  *
295  * We have no generic implementation of a non destructive write to the
296  * user address. We know that we faulted in the atomic pagefault
297  * disabled section so we can as well avoid the #PF overhead by
298  * calling get_user_pages() right away.
299  */
300 static int fault_in_user_writeable(u32 __user *uaddr)
301 {
302         int ret = get_user_pages(current, current->mm, (unsigned long)uaddr,
303                                  1, 1, 0, NULL, NULL);
304         return ret < 0 ? ret : 0;
305 }
306
307 /**
308  * futex_top_waiter() - Return the highest priority waiter on a futex
309  * @hb:     the hash bucket the futex_q's reside in
310  * @key:    the futex key (to distinguish it from other futex futex_q's)
311  *
312  * Must be called with the hb lock held.
313  */
314 static struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb,
315                                         union futex_key *key)
316 {
317         struct futex_q *this;
318
319         plist_for_each_entry(this, &hb->chain, list) {
320                 if (match_futex(&this->key, key))
321                         return this;
322         }
323         return NULL;
324 }
325
326 static u32 cmpxchg_futex_value_locked(u32 __user *uaddr, u32 uval, u32 newval)
327 {
328         u32 curval;
329
330         pagefault_disable();
331         curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval);
332         pagefault_enable();
333
334         return curval;
335 }
336
337 static int get_futex_value_locked(u32 *dest, u32 __user *from)
338 {
339         int ret;
340
341         pagefault_disable();
342         ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
343         pagefault_enable();
344
345         return ret ? -EFAULT : 0;
346 }
347
348
349 /*
350  * PI code:
351  */
352 static int refill_pi_state_cache(void)
353 {
354         struct futex_pi_state *pi_state;
355
356         if (likely(current->pi_state_cache))
357                 return 0;
358
359         pi_state = kzalloc(sizeof(*pi_state), GFP_KERNEL);
360
361         if (!pi_state)
362                 return -ENOMEM;
363
364         INIT_LIST_HEAD(&pi_state->list);
365         /* pi_mutex gets initialized later */
366         pi_state->owner = NULL;
367         atomic_set(&pi_state->refcount, 1);
368         pi_state->key = FUTEX_KEY_INIT;
369
370         current->pi_state_cache = pi_state;
371
372         return 0;
373 }
374
375 static struct futex_pi_state * alloc_pi_state(void)
376 {
377         struct futex_pi_state *pi_state = current->pi_state_cache;
378
379         WARN_ON(!pi_state);
380         current->pi_state_cache = NULL;
381
382         return pi_state;
383 }
384
385 static void free_pi_state(struct futex_pi_state *pi_state)
386 {
387         if (!atomic_dec_and_test(&pi_state->refcount))
388                 return;
389
390         /*
391          * If pi_state->owner is NULL, the owner is most probably dying
392          * and has cleaned up the pi_state already
393          */
394         if (pi_state->owner) {
395                 spin_lock_irq(&pi_state->owner->pi_lock);
396                 list_del_init(&pi_state->list);
397                 spin_unlock_irq(&pi_state->owner->pi_lock);
398
399                 rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner);
400         }
401
402         if (current->pi_state_cache)
403                 kfree(pi_state);
404         else {
405                 /*
406                  * pi_state->list is already empty.
407                  * clear pi_state->owner.
408                  * refcount is at 0 - put it back to 1.
409                  */
410                 pi_state->owner = NULL;
411                 atomic_set(&pi_state->refcount, 1);
412                 current->pi_state_cache = pi_state;
413         }
414 }
415
416 /*
417  * Look up the task based on what TID userspace gave us.
418  * We dont trust it.
419  */
420 static struct task_struct * futex_find_get_task(pid_t pid)
421 {
422         struct task_struct *p;
423         const struct cred *cred = current_cred(), *pcred;
424
425         rcu_read_lock();
426         p = find_task_by_vpid(pid);
427         if (!p) {
428                 p = ERR_PTR(-ESRCH);
429         } else {
430                 pcred = __task_cred(p);
431                 if (cred->euid != pcred->euid &&
432                     cred->euid != pcred->uid)
433                         p = ERR_PTR(-ESRCH);
434                 else
435                         get_task_struct(p);
436         }
437
438         rcu_read_unlock();
439
440         return p;
441 }
442
443 /*
444  * This task is holding PI mutexes at exit time => bad.
445  * Kernel cleans up PI-state, but userspace is likely hosed.
446  * (Robust-futex cleanup is separate and might save the day for userspace.)
447  */
448 void exit_pi_state_list(struct task_struct *curr)
449 {
450         struct list_head *next, *head = &curr->pi_state_list;
451         struct futex_pi_state *pi_state;
452         struct futex_hash_bucket *hb;
453         union futex_key key = FUTEX_KEY_INIT;
454
455         if (!futex_cmpxchg_enabled)
456                 return;
457         /*
458          * We are a ZOMBIE and nobody can enqueue itself on
459          * pi_state_list anymore, but we have to be careful
460          * versus waiters unqueueing themselves:
461          */
462         spin_lock_irq(&curr->pi_lock);
463         while (!list_empty(head)) {
464
465                 next = head->next;
466                 pi_state = list_entry(next, struct futex_pi_state, list);
467                 key = pi_state->key;
468                 hb = hash_futex(&key);
469                 spin_unlock_irq(&curr->pi_lock);
470
471                 spin_lock(&hb->lock);
472
473                 spin_lock_irq(&curr->pi_lock);
474                 /*
475                  * We dropped the pi-lock, so re-check whether this
476                  * task still owns the PI-state:
477                  */
478                 if (head->next != next) {
479                         spin_unlock(&hb->lock);
480                         continue;
481                 }
482
483                 WARN_ON(pi_state->owner != curr);
484                 WARN_ON(list_empty(&pi_state->list));
485                 list_del_init(&pi_state->list);
486                 pi_state->owner = NULL;
487                 spin_unlock_irq(&curr->pi_lock);
488
489                 rt_mutex_unlock(&pi_state->pi_mutex);
490
491                 spin_unlock(&hb->lock);
492
493                 spin_lock_irq(&curr->pi_lock);
494         }
495         spin_unlock_irq(&curr->pi_lock);
496 }
497
498 static int
499 lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
500                 union futex_key *key, struct futex_pi_state **ps)
501 {
502         struct futex_pi_state *pi_state = NULL;
503         struct futex_q *this, *next;
504         struct plist_head *head;
505         struct task_struct *p;
506         pid_t pid = uval & FUTEX_TID_MASK;
507
508         head = &hb->chain;
509
510         plist_for_each_entry_safe(this, next, head, list) {
511                 if (match_futex(&this->key, key)) {
512                         /*
513                          * Another waiter already exists - bump up
514                          * the refcount and return its pi_state:
515                          */
516                         pi_state = this->pi_state;
517                         /*
518                          * Userspace might have messed up non PI and PI futexes
519                          */
520                         if (unlikely(!pi_state))
521                                 return -EINVAL;
522
523                         WARN_ON(!atomic_read(&pi_state->refcount));
524                         WARN_ON(pid && pi_state->owner &&
525                                 pi_state->owner->pid != pid);
526
527                         atomic_inc(&pi_state->refcount);
528                         *ps = pi_state;
529
530                         return 0;
531                 }
532         }
533
534         /*
535          * We are the first waiter - try to look up the real owner and attach
536          * the new pi_state to it, but bail out when TID = 0
537          */
538         if (!pid)
539                 return -ESRCH;
540         p = futex_find_get_task(pid);
541         if (IS_ERR(p))
542                 return PTR_ERR(p);
543
544         /*
545          * We need to look at the task state flags to figure out,
546          * whether the task is exiting. To protect against the do_exit
547          * change of the task flags, we do this protected by
548          * p->pi_lock:
549          */
550         spin_lock_irq(&p->pi_lock);
551         if (unlikely(p->flags & PF_EXITING)) {
552                 /*
553                  * The task is on the way out. When PF_EXITPIDONE is
554                  * set, we know that the task has finished the
555                  * cleanup:
556                  */
557                 int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN;
558
559                 spin_unlock_irq(&p->pi_lock);
560                 put_task_struct(p);
561                 return ret;
562         }
563
564         pi_state = alloc_pi_state();
565
566         /*
567          * Initialize the pi_mutex in locked state and make 'p'
568          * the owner of it:
569          */
570         rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p);
571
572         /* Store the key for possible exit cleanups: */
573         pi_state->key = *key;
574
575         WARN_ON(!list_empty(&pi_state->list));
576         list_add(&pi_state->list, &p->pi_state_list);
577         pi_state->owner = p;
578         spin_unlock_irq(&p->pi_lock);
579
580         put_task_struct(p);
581
582         *ps = pi_state;
583
584         return 0;
585 }
586
587 /**
588  * futex_lock_pi_atomic() - atomic work required to acquire a pi aware futex
589  * @uaddr:              the pi futex user address
590  * @hb:                 the pi futex hash bucket
591  * @key:                the futex key associated with uaddr and hb
592  * @ps:                 the pi_state pointer where we store the result of the
593  *                      lookup
594  * @task:               the task to perform the atomic lock work for.  This will
595  *                      be "current" except in the case of requeue pi.
596  * @set_waiters:        force setting the FUTEX_WAITERS bit (1) or not (0)
597  *
598  * Returns:
599  *  0 - ready to wait
600  *  1 - acquired the lock
601  * <0 - error
602  *
603  * The hb->lock and futex_key refs shall be held by the caller.
604  */
605 static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
606                                 union futex_key *key,
607                                 struct futex_pi_state **ps,
608                                 struct task_struct *task, int set_waiters)
609 {
610         int lock_taken, ret, ownerdied = 0;
611         u32 uval, newval, curval;
612
613 retry:
614         ret = lock_taken = 0;
615
616         /*
617          * To avoid races, we attempt to take the lock here again
618          * (by doing a 0 -> TID atomic cmpxchg), while holding all
619          * the locks. It will most likely not succeed.
620          */
621         newval = task_pid_vnr(task);
622         if (set_waiters)
623                 newval |= FUTEX_WAITERS;
624
625         curval = cmpxchg_futex_value_locked(uaddr, 0, newval);
626
627         if (unlikely(curval == -EFAULT))
628                 return -EFAULT;
629
630         /*
631          * Detect deadlocks.
632          */
633         if ((unlikely((curval & FUTEX_TID_MASK) == task_pid_vnr(task))))
634                 return -EDEADLK;
635
636         /*
637          * Surprise - we got the lock. Just return to userspace:
638          */
639         if (unlikely(!curval))
640                 return 1;
641
642         uval = curval;
643
644         /*
645          * Set the FUTEX_WAITERS flag, so the owner will know it has someone
646          * to wake at the next unlock.
647          */
648         newval = curval | FUTEX_WAITERS;
649
650         /*
651          * There are two cases, where a futex might have no owner (the
652          * owner TID is 0): OWNER_DIED. We take over the futex in this
653          * case. We also do an unconditional take over, when the owner
654          * of the futex died.
655          *
656          * This is safe as we are protected by the hash bucket lock !
657          */
658         if (unlikely(ownerdied || !(curval & FUTEX_TID_MASK))) {
659                 /* Keep the OWNER_DIED bit */
660                 newval = (curval & ~FUTEX_TID_MASK) | task_pid_vnr(task);
661                 ownerdied = 0;
662                 lock_taken = 1;
663         }
664
665         curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
666
667         if (unlikely(curval == -EFAULT))
668                 return -EFAULT;
669         if (unlikely(curval != uval))
670                 goto retry;
671
672         /*
673          * We took the lock due to owner died take over.
674          */
675         if (unlikely(lock_taken))
676                 return 1;
677
678         /*
679          * We dont have the lock. Look up the PI state (or create it if
680          * we are the first waiter):
681          */
682         ret = lookup_pi_state(uval, hb, key, ps);
683
684         if (unlikely(ret)) {
685                 switch (ret) {
686                 case -ESRCH:
687                         /*
688                          * No owner found for this futex. Check if the
689                          * OWNER_DIED bit is set to figure out whether
690                          * this is a robust futex or not.
691                          */
692                         if (get_futex_value_locked(&curval, uaddr))
693                                 return -EFAULT;
694
695                         /*
696                          * We simply start over in case of a robust
697                          * futex. The code above will take the futex
698                          * and return happy.
699                          */
700                         if (curval & FUTEX_OWNER_DIED) {
701                                 ownerdied = 1;
702                                 goto retry;
703                         }
704                 default:
705                         break;
706                 }
707         }
708
709         return ret;
710 }
711
712 /*
713  * The hash bucket lock must be held when this is called.
714  * Afterwards, the futex_q must not be accessed.
715  */
716 static void wake_futex(struct futex_q *q)
717 {
718         struct task_struct *p = q->task;
719
720         /*
721          * We set q->lock_ptr = NULL _before_ we wake up the task. If
722          * a non futex wake up happens on another CPU then the task
723          * might exit and p would dereference a non existing task
724          * struct. Prevent this by holding a reference on p across the
725          * wake up.
726          */
727         get_task_struct(p);
728
729         plist_del(&q->list, &q->list.plist);
730         /*
731          * The waiting task can free the futex_q as soon as
732          * q->lock_ptr = NULL is written, without taking any locks. A
733          * memory barrier is required here to prevent the following
734          * store to lock_ptr from getting ahead of the plist_del.
735          */
736         smp_wmb();
737         q->lock_ptr = NULL;
738
739         wake_up_state(p, TASK_NORMAL);
740         put_task_struct(p);
741 }
742
743 static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
744 {
745         struct task_struct *new_owner;
746         struct futex_pi_state *pi_state = this->pi_state;
747         u32 curval, newval;
748
749         if (!pi_state)
750                 return -EINVAL;
751
752         spin_lock(&pi_state->pi_mutex.wait_lock);
753         new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
754
755         /*
756          * This happens when we have stolen the lock and the original
757          * pending owner did not enqueue itself back on the rt_mutex.
758          * Thats not a tragedy. We know that way, that a lock waiter
759          * is on the fly. We make the futex_q waiter the pending owner.
760          */
761         if (!new_owner)
762                 new_owner = this->task;
763
764         /*
765          * We pass it to the next owner. (The WAITERS bit is always
766          * kept enabled while there is PI state around. We must also
767          * preserve the owner died bit.)
768          */
769         if (!(uval & FUTEX_OWNER_DIED)) {
770                 int ret = 0;
771
772                 newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
773
774                 curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
775
776                 if (curval == -EFAULT)
777                         ret = -EFAULT;
778                 else if (curval != uval)
779                         ret = -EINVAL;
780                 if (ret) {
781                         spin_unlock(&pi_state->pi_mutex.wait_lock);
782                         return ret;
783                 }
784         }
785
786         spin_lock_irq(&pi_state->owner->pi_lock);
787         WARN_ON(list_empty(&pi_state->list));
788         list_del_init(&pi_state->list);
789         spin_unlock_irq(&pi_state->owner->pi_lock);
790
791         spin_lock_irq(&new_owner->pi_lock);
792         WARN_ON(!list_empty(&pi_state->list));
793         list_add(&pi_state->list, &new_owner->pi_state_list);
794         pi_state->owner = new_owner;
795         spin_unlock_irq(&new_owner->pi_lock);
796
797         spin_unlock(&pi_state->pi_mutex.wait_lock);
798         rt_mutex_unlock(&pi_state->pi_mutex);
799
800         return 0;
801 }
802
803 static int unlock_futex_pi(u32 __user *uaddr, u32 uval)
804 {
805         u32 oldval;
806
807         /*
808          * There is no waiter, so we unlock the futex. The owner died
809          * bit has not to be preserved here. We are the owner:
810          */
811         oldval = cmpxchg_futex_value_locked(uaddr, uval, 0);
812
813         if (oldval == -EFAULT)
814                 return oldval;
815         if (oldval != uval)
816                 return -EAGAIN;
817
818         return 0;
819 }
820
821 /*
822  * Express the locking dependencies for lockdep:
823  */
824 static inline void
825 double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
826 {
827         if (hb1 <= hb2) {
828                 spin_lock(&hb1->lock);
829                 if (hb1 < hb2)
830                         spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING);
831         } else { /* hb1 > hb2 */
832                 spin_lock(&hb2->lock);
833                 spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING);
834         }
835 }
836
837 static inline void
838 double_unlock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
839 {
840         spin_unlock(&hb1->lock);
841         if (hb1 != hb2)
842                 spin_unlock(&hb2->lock);
843 }
844
845 /*
846  * Wake up waiters matching bitset queued on this futex (uaddr).
847  */
848 static int futex_wake(u32 __user *uaddr, int fshared, int nr_wake, u32 bitset)
849 {
850         struct futex_hash_bucket *hb;
851         struct futex_q *this, *next;
852         struct plist_head *head;
853         union futex_key key = FUTEX_KEY_INIT;
854         int ret;
855
856         if (!bitset)
857                 return -EINVAL;
858
859         ret = get_futex_key(uaddr, fshared, &key, VERIFY_READ);
860         if (unlikely(ret != 0))
861                 goto out;
862
863         hb = hash_futex(&key);
864         spin_lock(&hb->lock);
865         head = &hb->chain;
866
867         plist_for_each_entry_safe(this, next, head, list) {
868                 if (match_futex (&this->key, &key)) {
869                         if (this->pi_state || this->rt_waiter) {
870                                 ret = -EINVAL;
871                                 break;
872                         }
873
874                         /* Check if one of the bits is set in both bitsets */
875                         if (!(this->bitset & bitset))
876                                 continue;
877
878                         wake_futex(this);
879                         if (++ret >= nr_wake)
880                                 break;
881                 }
882         }
883
884         spin_unlock(&hb->lock);
885         put_futex_key(fshared, &key);
886 out:
887         return ret;
888 }
889
890 /*
891  * Wake up all waiters hashed on the physical page that is mapped
892  * to this virtual address:
893  */
894 static int
895 futex_wake_op(u32 __user *uaddr1, int fshared, u32 __user *uaddr2,
896               int nr_wake, int nr_wake2, int op)
897 {
898         union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
899         struct futex_hash_bucket *hb1, *hb2;
900         struct plist_head *head;
901         struct futex_q *this, *next;
902         int ret, op_ret;
903
904 retry:
905         ret = get_futex_key(uaddr1, fshared, &key1, VERIFY_READ);
906         if (unlikely(ret != 0))
907                 goto out;
908         ret = get_futex_key(uaddr2, fshared, &key2, VERIFY_WRITE);
909         if (unlikely(ret != 0))
910                 goto out_put_key1;
911
912         hb1 = hash_futex(&key1);
913         hb2 = hash_futex(&key2);
914
915         double_lock_hb(hb1, hb2);
916 retry_private:
917         op_ret = futex_atomic_op_inuser(op, uaddr2);
918         if (unlikely(op_ret < 0)) {
919
920                 double_unlock_hb(hb1, hb2);
921
922 #ifndef CONFIG_MMU
923                 /*
924                  * we don't get EFAULT from MMU faults if we don't have an MMU,
925                  * but we might get them from range checking
926                  */
927                 ret = op_ret;
928                 goto out_put_keys;
929 #endif
930
931                 if (unlikely(op_ret != -EFAULT)) {
932                         ret = op_ret;
933                         goto out_put_keys;
934                 }
935
936                 ret = fault_in_user_writeable(uaddr2);
937                 if (ret)
938                         goto out_put_keys;
939
940                 if (!fshared)
941                         goto retry_private;
942
943                 put_futex_key(fshared, &key2);
944                 put_futex_key(fshared, &key1);
945                 goto retry;
946         }
947
948         head = &hb1->chain;
949
950         plist_for_each_entry_safe(this, next, head, list) {
951                 if (match_futex (&this->key, &key1)) {
952                         wake_futex(this);
953                         if (++ret >= nr_wake)
954                                 break;
955                 }
956         }
957
958         if (op_ret > 0) {
959                 head = &hb2->chain;
960
961                 op_ret = 0;
962                 plist_for_each_entry_safe(this, next, head, list) {
963                         if (match_futex (&this->key, &key2)) {
964                                 wake_futex(this);
965                                 if (++op_ret >= nr_wake2)
966                                         break;
967                         }
968                 }
969                 ret += op_ret;
970         }
971
972         double_unlock_hb(hb1, hb2);
973 out_put_keys:
974         put_futex_key(fshared, &key2);
975 out_put_key1:
976         put_futex_key(fshared, &key1);
977 out:
978         return ret;
979 }
980
981 /**
982  * requeue_futex() - Requeue a futex_q from one hb to another
983  * @q:          the futex_q to requeue
984  * @hb1:        the source hash_bucket
985  * @hb2:        the target hash_bucket
986  * @key2:       the new key for the requeued futex_q
987  */
988 static inline
989 void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
990                    struct futex_hash_bucket *hb2, union futex_key *key2)
991 {
992
993         /*
994          * If key1 and key2 hash to the same bucket, no need to
995          * requeue.
996          */
997         if (likely(&hb1->chain != &hb2->chain)) {
998                 plist_del(&q->list, &hb1->chain);
999                 plist_add(&q->list, &hb2->chain);
1000                 q->lock_ptr = &hb2->lock;
1001 #ifdef CONFIG_DEBUG_PI_LIST
1002                 q->list.plist.lock = &hb2->lock;
1003 #endif
1004         }
1005         get_futex_key_refs(key2);
1006         q->key = *key2;
1007 }
1008
1009 /**
1010  * requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue
1011  * q:   the futex_q
1012  * key: the key of the requeue target futex
1013  *
1014  * During futex_requeue, with requeue_pi=1, it is possible to acquire the
1015  * target futex if it is uncontended or via a lock steal.  Set the futex_q key
1016  * to the requeue target futex so the waiter can detect the wakeup on the right
1017  * futex, but remove it from the hb and NULL the rt_waiter so it can detect
1018  * atomic lock acquisition.  Must be called with the q->lock_ptr held.
1019  */
1020 static inline
1021 void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key)
1022 {
1023         drop_futex_key_refs(&q->key);
1024         get_futex_key_refs(key);
1025         q->key = *key;
1026
1027         WARN_ON(plist_node_empty(&q->list));
1028         plist_del(&q->list, &q->list.plist);
1029
1030         WARN_ON(!q->rt_waiter);
1031         q->rt_waiter = NULL;
1032
1033         wake_up_state(q->task, TASK_NORMAL);
1034 }
1035
1036 /**
1037  * futex_proxy_trylock_atomic() - Attempt an atomic lock for the top waiter
1038  * @pifutex:            the user address of the to futex
1039  * @hb1:                the from futex hash bucket, must be locked by the caller
1040  * @hb2:                the to futex hash bucket, must be locked by the caller
1041  * @key1:               the from futex key
1042  * @key2:               the to futex key
1043  * @ps:                 address to store the pi_state pointer
1044  * @set_waiters:        force setting the FUTEX_WAITERS bit (1) or not (0)
1045  *
1046  * Try and get the lock on behalf of the top waiter if we can do it atomically.
1047  * Wake the top waiter if we succeed.  If the caller specified set_waiters,
1048  * then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit.
1049  * hb1 and hb2 must be held by the caller.
1050  *
1051  * Returns:
1052  *  0 - failed to acquire the lock atomicly
1053  *  1 - acquired the lock
1054  * <0 - error
1055  */
1056 static int futex_proxy_trylock_atomic(u32 __user *pifutex,
1057                                  struct futex_hash_bucket *hb1,
1058                                  struct futex_hash_bucket *hb2,
1059                                  union futex_key *key1, union futex_key *key2,
1060                                  struct futex_pi_state **ps, int set_waiters)
1061 {
1062         struct futex_q *top_waiter = NULL;
1063         u32 curval;
1064         int ret;
1065
1066         if (get_futex_value_locked(&curval, pifutex))
1067                 return -EFAULT;
1068
1069         /*
1070          * Find the top_waiter and determine if there are additional waiters.
1071          * If the caller intends to requeue more than 1 waiter to pifutex,
1072          * force futex_lock_pi_atomic() to set the FUTEX_WAITERS bit now,
1073          * as we have means to handle the possible fault.  If not, don't set
1074          * the bit unecessarily as it will force the subsequent unlock to enter
1075          * the kernel.
1076          */
1077         top_waiter = futex_top_waiter(hb1, key1);
1078
1079         /* There are no waiters, nothing for us to do. */
1080         if (!top_waiter)
1081                 return 0;
1082
1083         /*
1084          * Try to take the lock for top_waiter.  Set the FUTEX_WAITERS bit in
1085          * the contended case or if set_waiters is 1.  The pi_state is returned
1086          * in ps in contended cases.
1087          */
1088         ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task,
1089                                    set_waiters);
1090         if (ret == 1)
1091                 requeue_pi_wake_futex(top_waiter, key2);
1092
1093         return ret;
1094 }
1095
1096 /**
1097  * futex_requeue() - Requeue waiters from uaddr1 to uaddr2
1098  * uaddr1:      source futex user address
1099  * uaddr2:      target futex user address
1100  * nr_wake:     number of waiters to wake (must be 1 for requeue_pi)
1101  * nr_requeue:  number of waiters to requeue (0-INT_MAX)
1102  * requeue_pi:  if we are attempting to requeue from a non-pi futex to a
1103  *              pi futex (pi to pi requeue is not supported)
1104  *
1105  * Requeue waiters on uaddr1 to uaddr2. In the requeue_pi case, try to acquire
1106  * uaddr2 atomically on behalf of the top waiter.
1107  *
1108  * Returns:
1109  * >=0 - on success, the number of tasks requeued or woken
1110  *  <0 - on error
1111  */
1112 static int futex_requeue(u32 __user *uaddr1, int fshared, u32 __user *uaddr2,
1113                          int nr_wake, int nr_requeue, u32 *cmpval,
1114                          int requeue_pi)
1115 {
1116         union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
1117         int drop_count = 0, task_count = 0, ret;
1118         struct futex_pi_state *pi_state = NULL;
1119         struct futex_hash_bucket *hb1, *hb2;
1120         struct plist_head *head1;
1121         struct futex_q *this, *next;
1122         u32 curval2;
1123
1124         if (requeue_pi) {
1125                 /*
1126                  * requeue_pi requires a pi_state, try to allocate it now
1127                  * without any locks in case it fails.
1128                  */
1129                 if (refill_pi_state_cache())
1130                         return -ENOMEM;
1131                 /*
1132                  * requeue_pi must wake as many tasks as it can, up to nr_wake
1133                  * + nr_requeue, since it acquires the rt_mutex prior to
1134                  * returning to userspace, so as to not leave the rt_mutex with
1135                  * waiters and no owner.  However, second and third wake-ups
1136                  * cannot be predicted as they involve race conditions with the
1137                  * first wake and a fault while looking up the pi_state.  Both
1138                  * pthread_cond_signal() and pthread_cond_broadcast() should
1139                  * use nr_wake=1.
1140                  */
1141                 if (nr_wake != 1)
1142                         return -EINVAL;
1143         }
1144
1145 retry:
1146         if (pi_state != NULL) {
1147                 /*
1148                  * We will have to lookup the pi_state again, so free this one
1149                  * to keep the accounting correct.
1150                  */
1151                 free_pi_state(pi_state);
1152                 pi_state = NULL;
1153         }
1154
1155         ret = get_futex_key(uaddr1, fshared, &key1, VERIFY_READ);
1156         if (unlikely(ret != 0))
1157                 goto out;
1158         ret = get_futex_key(uaddr2, fshared, &key2,
1159                             requeue_pi ? VERIFY_WRITE : VERIFY_READ);
1160         if (unlikely(ret != 0))
1161                 goto out_put_key1;
1162
1163         hb1 = hash_futex(&key1);
1164         hb2 = hash_futex(&key2);
1165
1166 retry_private:
1167         double_lock_hb(hb1, hb2);
1168
1169         if (likely(cmpval != NULL)) {
1170                 u32 curval;
1171
1172                 ret = get_futex_value_locked(&curval, uaddr1);
1173
1174                 if (unlikely(ret)) {
1175                         double_unlock_hb(hb1, hb2);
1176
1177                         ret = get_user(curval, uaddr1);
1178                         if (ret)
1179                                 goto out_put_keys;
1180
1181                         if (!fshared)
1182                                 goto retry_private;
1183
1184                         put_futex_key(fshared, &key2);
1185                         put_futex_key(fshared, &key1);
1186                         goto retry;
1187                 }
1188                 if (curval != *cmpval) {
1189                         ret = -EAGAIN;
1190                         goto out_unlock;
1191                 }
1192         }
1193
1194         if (requeue_pi && (task_count - nr_wake < nr_requeue)) {
1195                 /*
1196                  * Attempt to acquire uaddr2 and wake the top waiter. If we
1197                  * intend to requeue waiters, force setting the FUTEX_WAITERS
1198                  * bit.  We force this here where we are able to easily handle
1199                  * faults rather in the requeue loop below.
1200                  */
1201                 ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1,
1202                                                  &key2, &pi_state, nr_requeue);
1203
1204                 /*
1205                  * At this point the top_waiter has either taken uaddr2 or is
1206                  * waiting on it.  If the former, then the pi_state will not
1207                  * exist yet, look it up one more time to ensure we have a
1208                  * reference to it.
1209                  */
1210                 if (ret == 1) {
1211                         WARN_ON(pi_state);
1212                         task_count++;
1213                         ret = get_futex_value_locked(&curval2, uaddr2);
1214                         if (!ret)
1215                                 ret = lookup_pi_state(curval2, hb2, &key2,
1216                                                       &pi_state);
1217                 }
1218
1219                 switch (ret) {
1220                 case 0:
1221                         break;
1222                 case -EFAULT:
1223                         double_unlock_hb(hb1, hb2);
1224                         put_futex_key(fshared, &key2);
1225                         put_futex_key(fshared, &key1);
1226                         ret = fault_in_user_writeable(uaddr2);
1227                         if (!ret)
1228                                 goto retry;
1229                         goto out;
1230                 case -EAGAIN:
1231                         /* The owner was exiting, try again. */
1232                         double_unlock_hb(hb1, hb2);
1233                         put_futex_key(fshared, &key2);
1234                         put_futex_key(fshared, &key1);
1235                         cond_resched();
1236                         goto retry;
1237                 default:
1238                         goto out_unlock;
1239                 }
1240         }
1241
1242         head1 = &hb1->chain;
1243         plist_for_each_entry_safe(this, next, head1, list) {
1244                 if (task_count - nr_wake >= nr_requeue)
1245                         break;
1246
1247                 if (!match_futex(&this->key, &key1))
1248                         continue;
1249
1250                 WARN_ON(!requeue_pi && this->rt_waiter);
1251                 WARN_ON(requeue_pi && !this->rt_waiter);
1252
1253                 /*
1254                  * Wake nr_wake waiters.  For requeue_pi, if we acquired the
1255                  * lock, we already woke the top_waiter.  If not, it will be
1256                  * woken by futex_unlock_pi().
1257                  */
1258                 if (++task_count <= nr_wake && !requeue_pi) {
1259                         wake_futex(this);
1260                         continue;
1261                 }
1262
1263                 /*
1264                  * Requeue nr_requeue waiters and possibly one more in the case
1265                  * of requeue_pi if we couldn't acquire the lock atomically.
1266                  */
1267                 if (requeue_pi) {
1268                         /* Prepare the waiter to take the rt_mutex. */
1269                         atomic_inc(&pi_state->refcount);
1270                         this->pi_state = pi_state;
1271                         ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex,
1272                                                         this->rt_waiter,
1273                                                         this->task, 1);
1274                         if (ret == 1) {
1275                                 /* We got the lock. */
1276                                 requeue_pi_wake_futex(this, &key2);
1277                                 continue;
1278                         } else if (ret) {
1279                                 /* -EDEADLK */
1280                                 this->pi_state = NULL;
1281                                 free_pi_state(pi_state);
1282                                 goto out_unlock;
1283                         }
1284                 }
1285                 requeue_futex(this, hb1, hb2, &key2);
1286                 drop_count++;
1287         }
1288
1289 out_unlock:
1290         double_unlock_hb(hb1, hb2);
1291
1292         /*
1293          * drop_futex_key_refs() must be called outside the spinlocks. During
1294          * the requeue we moved futex_q's from the hash bucket at key1 to the
1295          * one at key2 and updated their key pointer.  We no longer need to
1296          * hold the references to key1.
1297          */
1298         while (--drop_count >= 0)
1299                 drop_futex_key_refs(&key1);
1300
1301 out_put_keys:
1302         put_futex_key(fshared, &key2);
1303 out_put_key1:
1304         put_futex_key(fshared, &key1);
1305 out:
1306         if (pi_state != NULL)
1307                 free_pi_state(pi_state);
1308         return ret ? ret : task_count;
1309 }
1310
1311 /* The key must be already stored in q->key. */
1312 static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
1313 {
1314         struct futex_hash_bucket *hb;
1315
1316         get_futex_key_refs(&q->key);
1317         hb = hash_futex(&q->key);
1318         q->lock_ptr = &hb->lock;
1319
1320         spin_lock(&hb->lock);
1321         return hb;
1322 }
1323
1324 static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
1325 {
1326         int prio;
1327
1328         /*
1329          * The priority used to register this element is
1330          * - either the real thread-priority for the real-time threads
1331          * (i.e. threads with a priority lower than MAX_RT_PRIO)
1332          * - or MAX_RT_PRIO for non-RT threads.
1333          * Thus, all RT-threads are woken first in priority order, and
1334          * the others are woken last, in FIFO order.
1335          */
1336         prio = min(current->normal_prio, MAX_RT_PRIO);
1337
1338         plist_node_init(&q->list, prio);
1339 #ifdef CONFIG_DEBUG_PI_LIST
1340         q->list.plist.lock = &hb->lock;
1341 #endif
1342         plist_add(&q->list, &hb->chain);
1343         q->task = current;
1344         spin_unlock(&hb->lock);
1345 }
1346
1347 static inline void
1348 queue_unlock(struct futex_q *q, struct futex_hash_bucket *hb)
1349 {
1350         spin_unlock(&hb->lock);
1351         drop_futex_key_refs(&q->key);
1352 }
1353
1354 /*
1355  * queue_me and unqueue_me must be called as a pair, each
1356  * exactly once.  They are called with the hashed spinlock held.
1357  */
1358
1359 /* Return 1 if we were still queued (ie. 0 means we were woken) */
1360 static int unqueue_me(struct futex_q *q)
1361 {
1362         spinlock_t *lock_ptr;
1363         int ret = 0;
1364
1365         /* In the common case we don't take the spinlock, which is nice. */
1366 retry:
1367         lock_ptr = q->lock_ptr;
1368         barrier();
1369         if (lock_ptr != NULL) {
1370                 spin_lock(lock_ptr);
1371                 /*
1372                  * q->lock_ptr can change between reading it and
1373                  * spin_lock(), causing us to take the wrong lock.  This
1374                  * corrects the race condition.
1375                  *
1376                  * Reasoning goes like this: if we have the wrong lock,
1377                  * q->lock_ptr must have changed (maybe several times)
1378                  * between reading it and the spin_lock().  It can
1379                  * change again after the spin_lock() but only if it was
1380                  * already changed before the spin_lock().  It cannot,
1381                  * however, change back to the original value.  Therefore
1382                  * we can detect whether we acquired the correct lock.
1383                  */
1384                 if (unlikely(lock_ptr != q->lock_ptr)) {
1385                         spin_unlock(lock_ptr);
1386                         goto retry;
1387                 }
1388                 WARN_ON(plist_node_empty(&q->list));
1389                 plist_del(&q->list, &q->list.plist);
1390
1391                 BUG_ON(q->pi_state);
1392
1393                 spin_unlock(lock_ptr);
1394                 ret = 1;
1395         }
1396
1397         drop_futex_key_refs(&q->key);
1398         return ret;
1399 }
1400
1401 /*
1402  * PI futexes can not be requeued and must remove themself from the
1403  * hash bucket. The hash bucket lock (i.e. lock_ptr) is held on entry
1404  * and dropped here.
1405  */
1406 static void unqueue_me_pi(struct futex_q *q)
1407 {
1408         WARN_ON(plist_node_empty(&q->list));
1409         plist_del(&q->list, &q->list.plist);
1410
1411         BUG_ON(!q->pi_state);
1412         free_pi_state(q->pi_state);
1413         q->pi_state = NULL;
1414
1415         spin_unlock(q->lock_ptr);
1416
1417         drop_futex_key_refs(&q->key);
1418 }
1419
1420 /*
1421  * Fixup the pi_state owner with the new owner.
1422  *
1423  * Must be called with hash bucket lock held and mm->sem held for non
1424  * private futexes.
1425  */
1426 static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
1427                                 struct task_struct *newowner, int fshared)
1428 {
1429         u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
1430         struct futex_pi_state *pi_state = q->pi_state;
1431         struct task_struct *oldowner = pi_state->owner;
1432         u32 uval, curval, newval;
1433         int ret;
1434
1435         /* Owner died? */
1436         if (!pi_state->owner)
1437                 newtid |= FUTEX_OWNER_DIED;
1438
1439         /*
1440          * We are here either because we stole the rtmutex from the
1441          * pending owner or we are the pending owner which failed to
1442          * get the rtmutex. We have to replace the pending owner TID
1443          * in the user space variable. This must be atomic as we have
1444          * to preserve the owner died bit here.
1445          *
1446          * Note: We write the user space value _before_ changing the pi_state
1447          * because we can fault here. Imagine swapped out pages or a fork
1448          * that marked all the anonymous memory readonly for cow.
1449          *
1450          * Modifying pi_state _before_ the user space value would
1451          * leave the pi_state in an inconsistent state when we fault
1452          * here, because we need to drop the hash bucket lock to
1453          * handle the fault. This might be observed in the PID check
1454          * in lookup_pi_state.
1455          */
1456 retry:
1457         if (get_futex_value_locked(&uval, uaddr))
1458                 goto handle_fault;
1459
1460         while (1) {
1461                 newval = (uval & FUTEX_OWNER_DIED) | newtid;
1462
1463                 curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
1464
1465                 if (curval == -EFAULT)
1466                         goto handle_fault;
1467                 if (curval == uval)
1468                         break;
1469                 uval = curval;
1470         }
1471
1472         /*
1473          * We fixed up user space. Now we need to fix the pi_state
1474          * itself.
1475          */
1476         if (pi_state->owner != NULL) {
1477                 spin_lock_irq(&pi_state->owner->pi_lock);
1478                 WARN_ON(list_empty(&pi_state->list));
1479                 list_del_init(&pi_state->list);
1480                 spin_unlock_irq(&pi_state->owner->pi_lock);
1481         }
1482
1483         pi_state->owner = newowner;
1484
1485         spin_lock_irq(&newowner->pi_lock);
1486         WARN_ON(!list_empty(&pi_state->list));
1487         list_add(&pi_state->list, &newowner->pi_state_list);
1488         spin_unlock_irq(&newowner->pi_lock);
1489         return 0;
1490
1491         /*
1492          * To handle the page fault we need to drop the hash bucket
1493          * lock here. That gives the other task (either the pending
1494          * owner itself or the task which stole the rtmutex) the
1495          * chance to try the fixup of the pi_state. So once we are
1496          * back from handling the fault we need to check the pi_state
1497          * after reacquiring the hash bucket lock and before trying to
1498          * do another fixup. When the fixup has been done already we
1499          * simply return.
1500          */
1501 handle_fault:
1502         spin_unlock(q->lock_ptr);
1503
1504         ret = fault_in_user_writeable(uaddr);
1505
1506         spin_lock(q->lock_ptr);
1507
1508         /*
1509          * Check if someone else fixed it for us:
1510          */
1511         if (pi_state->owner != oldowner)
1512                 return 0;
1513
1514         if (ret)
1515                 return ret;
1516
1517         goto retry;
1518 }
1519
1520 /*
1521  * In case we must use restart_block to restart a futex_wait,
1522  * we encode in the 'flags' shared capability
1523  */
1524 #define FLAGS_SHARED            0x01
1525 #define FLAGS_CLOCKRT           0x02
1526 #define FLAGS_HAS_TIMEOUT       0x04
1527
1528 static long futex_wait_restart(struct restart_block *restart);
1529
1530 /**
1531  * fixup_owner() - Post lock pi_state and corner case management
1532  * @uaddr:      user address of the futex
1533  * @fshared:    whether the futex is shared (1) or not (0)
1534  * @q:          futex_q (contains pi_state and access to the rt_mutex)
1535  * @locked:     if the attempt to take the rt_mutex succeeded (1) or not (0)
1536  *
1537  * After attempting to lock an rt_mutex, this function is called to cleanup
1538  * the pi_state owner as well as handle race conditions that may allow us to
1539  * acquire the lock. Must be called with the hb lock held.
1540  *
1541  * Returns:
1542  *  1 - success, lock taken
1543  *  0 - success, lock not taken
1544  * <0 - on error (-EFAULT)
1545  */
1546 static int fixup_owner(u32 __user *uaddr, int fshared, struct futex_q *q,
1547                        int locked)
1548 {
1549         struct task_struct *owner;
1550         int ret = 0;
1551
1552         if (locked) {
1553                 /*
1554                  * Got the lock. We might not be the anticipated owner if we
1555                  * did a lock-steal - fix up the PI-state in that case:
1556                  */
1557                 if (q->pi_state->owner != current)
1558                         ret = fixup_pi_state_owner(uaddr, q, current, fshared);
1559                 goto out;
1560         }
1561
1562         /*
1563          * Catch the rare case, where the lock was released when we were on the
1564          * way back before we locked the hash bucket.
1565          */
1566         if (q->pi_state->owner == current) {
1567                 /*
1568                  * Try to get the rt_mutex now. This might fail as some other
1569                  * task acquired the rt_mutex after we removed ourself from the
1570                  * rt_mutex waiters list.
1571                  */
1572                 if (rt_mutex_trylock(&q->pi_state->pi_mutex)) {
1573                         locked = 1;
1574                         goto out;
1575                 }
1576
1577                 /*
1578                  * pi_state is incorrect, some other task did a lock steal and
1579                  * we returned due to timeout or signal without taking the
1580                  * rt_mutex. Too late. We can access the rt_mutex_owner without
1581                  * locking, as the other task is now blocked on the hash bucket
1582                  * lock. Fix the state up.
1583                  */
1584                 owner = rt_mutex_owner(&q->pi_state->pi_mutex);
1585                 ret = fixup_pi_state_owner(uaddr, q, owner, fshared);
1586                 goto out;
1587         }
1588
1589         /*
1590          * Paranoia check. If we did not take the lock, then we should not be
1591          * the owner, nor the pending owner, of the rt_mutex.
1592          */
1593         if (rt_mutex_owner(&q->pi_state->pi_mutex) == current)
1594                 printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p "
1595                                 "pi-state %p\n", ret,
1596                                 q->pi_state->pi_mutex.owner,
1597                                 q->pi_state->owner);
1598
1599 out:
1600         return ret ? ret : locked;
1601 }
1602
1603 /**
1604  * futex_wait_queue_me() - queue_me() and wait for wakeup, timeout, or signal
1605  * @hb:         the futex hash bucket, must be locked by the caller
1606  * @q:          the futex_q to queue up on
1607  * @timeout:    the prepared hrtimer_sleeper, or null for no timeout
1608  */
1609 static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q,
1610                                 struct hrtimer_sleeper *timeout)
1611 {
1612         queue_me(q, hb);
1613
1614         /*
1615          * There might have been scheduling since the queue_me(), as we
1616          * cannot hold a spinlock across the get_user() in case it
1617          * faults, and we cannot just set TASK_INTERRUPTIBLE state when
1618          * queueing ourselves into the futex hash. This code thus has to
1619          * rely on the futex_wake() code removing us from hash when it
1620          * wakes us up.
1621          */
1622         set_current_state(TASK_INTERRUPTIBLE);
1623
1624         /* Arm the timer */
1625         if (timeout) {
1626                 hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
1627                 if (!hrtimer_active(&timeout->timer))
1628                         timeout->task = NULL;
1629         }
1630
1631         /*
1632          * !plist_node_empty() is safe here without any lock.
1633          * q.lock_ptr != 0 is not safe, because of ordering against wakeup.
1634          */
1635         if (likely(!plist_node_empty(&q->list))) {
1636                 /*
1637                  * If the timer has already expired, current will already be
1638                  * flagged for rescheduling. Only call schedule if there
1639                  * is no timeout, or if it has yet to expire.
1640                  */
1641                 if (!timeout || timeout->task)
1642                         schedule();
1643         }
1644         __set_current_state(TASK_RUNNING);
1645 }
1646
1647 /**
1648  * futex_wait_setup() - Prepare to wait on a futex
1649  * @uaddr:      the futex userspace address
1650  * @val:        the expected value
1651  * @fshared:    whether the futex is shared (1) or not (0)
1652  * @q:          the associated futex_q
1653  * @hb:         storage for hash_bucket pointer to be returned to caller
1654  *
1655  * Setup the futex_q and locate the hash_bucket.  Get the futex value and
1656  * compare it with the expected value.  Handle atomic faults internally.
1657  * Return with the hb lock held and a q.key reference on success, and unlocked
1658  * with no q.key reference on failure.
1659  *
1660  * Returns:
1661  *  0 - uaddr contains val and hb has been locked
1662  * <1 - -EFAULT or -EWOULDBLOCK (uaddr does not contain val) and hb is unlcoked
1663  */
1664 static int futex_wait_setup(u32 __user *uaddr, u32 val, int fshared,
1665                            struct futex_q *q, struct futex_hash_bucket **hb)
1666 {
1667         u32 uval;
1668         int ret;
1669
1670         /*
1671          * Access the page AFTER the hash-bucket is locked.
1672          * Order is important:
1673          *
1674          *   Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val);
1675          *   Userspace waker:  if (cond(var)) { var = new; futex_wake(&var); }
1676          *
1677          * The basic logical guarantee of a futex is that it blocks ONLY
1678          * if cond(var) is known to be true at the time of blocking, for
1679          * any cond.  If we queued after testing *uaddr, that would open
1680          * a race condition where we could block indefinitely with
1681          * cond(var) false, which would violate the guarantee.
1682          *
1683          * A consequence is that futex_wait() can return zero and absorb
1684          * a wakeup when *uaddr != val on entry to the syscall.  This is
1685          * rare, but normal.
1686          */
1687 retry:
1688         q->key = FUTEX_KEY_INIT;
1689         ret = get_futex_key(uaddr, fshared, &q->key, VERIFY_READ);
1690         if (unlikely(ret != 0))
1691                 return ret;
1692
1693 retry_private:
1694         *hb = queue_lock(q);
1695
1696         ret = get_futex_value_locked(&uval, uaddr);
1697
1698         if (ret) {
1699                 queue_unlock(q, *hb);
1700
1701                 ret = get_user(uval, uaddr);
1702                 if (ret)
1703                         goto out;
1704
1705                 if (!fshared)
1706                         goto retry_private;
1707
1708                 put_futex_key(fshared, &q->key);
1709                 goto retry;
1710         }
1711
1712         if (uval != val) {
1713                 queue_unlock(q, *hb);
1714                 ret = -EWOULDBLOCK;
1715         }
1716
1717 out:
1718         if (ret)
1719                 put_futex_key(fshared, &q->key);
1720         return ret;
1721 }
1722
1723 static int futex_wait(u32 __user *uaddr, int fshared,
1724                       u32 val, ktime_t *abs_time, u32 bitset, int clockrt)
1725 {
1726         struct hrtimer_sleeper timeout, *to = NULL;
1727         struct restart_block *restart;
1728         struct futex_hash_bucket *hb;
1729         struct futex_q q;
1730         int ret;
1731
1732         if (!bitset)
1733                 return -EINVAL;
1734
1735         q.pi_state = NULL;
1736         q.bitset = bitset;
1737         q.rt_waiter = NULL;
1738
1739         if (abs_time) {
1740                 to = &timeout;
1741
1742                 hrtimer_init_on_stack(&to->timer, clockrt ? CLOCK_REALTIME :
1743                                       CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1744                 hrtimer_init_sleeper(to, current);
1745                 hrtimer_set_expires_range_ns(&to->timer, *abs_time,
1746                                              current->timer_slack_ns);
1747         }
1748
1749         /* Prepare to wait on uaddr. */
1750         ret = futex_wait_setup(uaddr, val, fshared, &q, &hb);
1751         if (ret)
1752                 goto out;
1753
1754         /* queue_me and wait for wakeup, timeout, or a signal. */
1755         futex_wait_queue_me(hb, &q, to);
1756
1757         /* If we were woken (and unqueued), we succeeded, whatever. */
1758         ret = 0;
1759         if (!unqueue_me(&q))
1760                 goto out_put_key;
1761         ret = -ETIMEDOUT;
1762         if (to && !to->task)
1763                 goto out_put_key;
1764
1765         /*
1766          * We expect signal_pending(current), but another thread may
1767          * have handled it for us already.
1768          */
1769         ret = -ERESTARTSYS;
1770         if (!abs_time)
1771                 goto out_put_key;
1772
1773         restart = &current_thread_info()->restart_block;
1774         restart->fn = futex_wait_restart;
1775         restart->futex.uaddr = (u32 *)uaddr;
1776         restart->futex.val = val;
1777         restart->futex.time = abs_time->tv64;
1778         restart->futex.bitset = bitset;
1779         restart->futex.flags = FLAGS_HAS_TIMEOUT;
1780
1781         if (fshared)
1782                 restart->futex.flags |= FLAGS_SHARED;
1783         if (clockrt)
1784                 restart->futex.flags |= FLAGS_CLOCKRT;
1785
1786         ret = -ERESTART_RESTARTBLOCK;
1787
1788 out_put_key:
1789         put_futex_key(fshared, &q.key);
1790 out:
1791         if (to) {
1792                 hrtimer_cancel(&to->timer);
1793                 destroy_hrtimer_on_stack(&to->timer);
1794         }
1795         return ret;
1796 }
1797
1798
1799 static long futex_wait_restart(struct restart_block *restart)
1800 {
1801         u32 __user *uaddr = (u32 __user *)restart->futex.uaddr;
1802         int fshared = 0;
1803         ktime_t t, *tp = NULL;
1804
1805         if (restart->futex.flags & FLAGS_HAS_TIMEOUT) {
1806                 t.tv64 = restart->futex.time;
1807                 tp = &t;
1808         }
1809         restart->fn = do_no_restart_syscall;
1810         if (restart->futex.flags & FLAGS_SHARED)
1811                 fshared = 1;
1812         return (long)futex_wait(uaddr, fshared, restart->futex.val, tp,
1813                                 restart->futex.bitset,
1814                                 restart->futex.flags & FLAGS_CLOCKRT);
1815 }
1816
1817
1818 /*
1819  * Userspace tried a 0 -> TID atomic transition of the futex value
1820  * and failed. The kernel side here does the whole locking operation:
1821  * if there are waiters then it will block, it does PI, etc. (Due to
1822  * races the kernel might see a 0 value of the futex too.)
1823  */
1824 static int futex_lock_pi(u32 __user *uaddr, int fshared,
1825                          int detect, ktime_t *time, int trylock)
1826 {
1827         struct hrtimer_sleeper timeout, *to = NULL;
1828         struct futex_hash_bucket *hb;
1829         struct futex_q q;
1830         int res, ret;
1831
1832         if (refill_pi_state_cache())
1833                 return -ENOMEM;
1834
1835         if (time) {
1836                 to = &timeout;
1837                 hrtimer_init_on_stack(&to->timer, CLOCK_REALTIME,
1838                                       HRTIMER_MODE_ABS);
1839                 hrtimer_init_sleeper(to, current);
1840                 hrtimer_set_expires(&to->timer, *time);
1841         }
1842
1843         q.pi_state = NULL;
1844         q.rt_waiter = NULL;
1845 retry:
1846         q.key = FUTEX_KEY_INIT;
1847         ret = get_futex_key(uaddr, fshared, &q.key, VERIFY_WRITE);
1848         if (unlikely(ret != 0))
1849                 goto out;
1850
1851 retry_private:
1852         hb = queue_lock(&q);
1853
1854         ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current, 0);
1855         if (unlikely(ret)) {
1856                 switch (ret) {
1857                 case 1:
1858                         /* We got the lock. */
1859                         ret = 0;
1860                         goto out_unlock_put_key;
1861                 case -EFAULT:
1862                         goto uaddr_faulted;
1863                 case -EAGAIN:
1864                         /*
1865                          * Task is exiting and we just wait for the
1866                          * exit to complete.
1867                          */
1868                         queue_unlock(&q, hb);
1869                         put_futex_key(fshared, &q.key);
1870                         cond_resched();
1871                         goto retry;
1872                 default:
1873                         goto out_unlock_put_key;
1874                 }
1875         }
1876
1877         /*
1878          * Only actually queue now that the atomic ops are done:
1879          */
1880         queue_me(&q, hb);
1881
1882         WARN_ON(!q.pi_state);
1883         /*
1884          * Block on the PI mutex:
1885          */
1886         if (!trylock)
1887                 ret = rt_mutex_timed_lock(&q.pi_state->pi_mutex, to, 1);
1888         else {
1889                 ret = rt_mutex_trylock(&q.pi_state->pi_mutex);
1890                 /* Fixup the trylock return value: */
1891                 ret = ret ? 0 : -EWOULDBLOCK;
1892         }
1893
1894         spin_lock(q.lock_ptr);
1895         /*
1896          * Fixup the pi_state owner and possibly acquire the lock if we
1897          * haven't already.
1898          */
1899         res = fixup_owner(uaddr, fshared, &q, !ret);
1900         /*
1901          * If fixup_owner() returned an error, proprogate that.  If it acquired
1902          * the lock, clear our -ETIMEDOUT or -EINTR.
1903          */
1904         if (res)
1905                 ret = (res < 0) ? res : 0;
1906
1907         /*
1908          * If fixup_owner() faulted and was unable to handle the fault, unlock
1909          * it and return the fault to userspace.
1910          */
1911         if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current))
1912                 rt_mutex_unlock(&q.pi_state->pi_mutex);
1913
1914         /* Unqueue and drop the lock */
1915         unqueue_me_pi(&q);
1916
1917         goto out;
1918
1919 out_unlock_put_key:
1920         queue_unlock(&q, hb);
1921
1922 out_put_key:
1923         put_futex_key(fshared, &q.key);
1924 out:
1925         if (to)
1926                 destroy_hrtimer_on_stack(&to->timer);
1927         return ret != -EINTR ? ret : -ERESTARTNOINTR;
1928
1929 uaddr_faulted:
1930         queue_unlock(&q, hb);
1931
1932         ret = fault_in_user_writeable(uaddr);
1933         if (ret)
1934                 goto out_put_key;
1935
1936         if (!fshared)
1937                 goto retry_private;
1938
1939         put_futex_key(fshared, &q.key);
1940         goto retry;
1941 }
1942
1943 /*
1944  * Userspace attempted a TID -> 0 atomic transition, and failed.
1945  * This is the in-kernel slowpath: we look up the PI state (if any),
1946  * and do the rt-mutex unlock.
1947  */
1948 static int futex_unlock_pi(u32 __user *uaddr, int fshared)
1949 {
1950         struct futex_hash_bucket *hb;
1951         struct futex_q *this, *next;
1952         u32 uval;
1953         struct plist_head *head;
1954         union futex_key key = FUTEX_KEY_INIT;
1955         int ret;
1956
1957 retry:
1958         if (get_user(uval, uaddr))
1959                 return -EFAULT;
1960         /*
1961          * We release only a lock we actually own:
1962          */
1963         if ((uval & FUTEX_TID_MASK) != task_pid_vnr(current))
1964                 return -EPERM;
1965
1966         ret = get_futex_key(uaddr, fshared, &key, VERIFY_WRITE);
1967         if (unlikely(ret != 0))
1968                 goto out;
1969
1970         hb = hash_futex(&key);
1971         spin_lock(&hb->lock);
1972
1973         /*
1974          * To avoid races, try to do the TID -> 0 atomic transition
1975          * again. If it succeeds then we can return without waking
1976          * anyone else up:
1977          */
1978         if (!(uval & FUTEX_OWNER_DIED))
1979                 uval = cmpxchg_futex_value_locked(uaddr, task_pid_vnr(current), 0);
1980
1981
1982         if (unlikely(uval == -EFAULT))
1983                 goto pi_faulted;
1984         /*
1985          * Rare case: we managed to release the lock atomically,
1986          * no need to wake anyone else up:
1987          */
1988         if (unlikely(uval == task_pid_vnr(current)))
1989                 goto out_unlock;
1990
1991         /*
1992          * Ok, other tasks may need to be woken up - check waiters
1993          * and do the wakeup if necessary:
1994          */
1995         head = &hb->chain;
1996
1997         plist_for_each_entry_safe(this, next, head, list) {
1998                 if (!match_futex (&this->key, &key))
1999                         continue;
2000                 ret = wake_futex_pi(uaddr, uval, this);
2001                 /*
2002                  * The atomic access to the futex value
2003                  * generated a pagefault, so retry the
2004                  * user-access and the wakeup:
2005                  */
2006                 if (ret == -EFAULT)
2007                         goto pi_faulted;
2008                 goto out_unlock;
2009         }
2010         /*
2011          * No waiters - kernel unlocks the futex:
2012          */
2013         if (!(uval & FUTEX_OWNER_DIED)) {
2014                 ret = unlock_futex_pi(uaddr, uval);
2015                 if (ret == -EFAULT)
2016                         goto pi_faulted;
2017         }
2018
2019 out_unlock:
2020         spin_unlock(&hb->lock);
2021         put_futex_key(fshared, &key);
2022
2023 out:
2024         return ret;
2025
2026 pi_faulted:
2027         spin_unlock(&hb->lock);
2028         put_futex_key(fshared, &key);
2029
2030         ret = fault_in_user_writeable(uaddr);
2031         if (!ret)
2032                 goto retry;
2033
2034         return ret;
2035 }
2036
2037 /**
2038  * handle_early_requeue_pi_wakeup() - Detect early wakeup on the initial futex
2039  * @hb:         the hash_bucket futex_q was original enqueued on
2040  * @q:          the futex_q woken while waiting to be requeued
2041  * @key2:       the futex_key of the requeue target futex
2042  * @timeout:    the timeout associated with the wait (NULL if none)
2043  *
2044  * Detect if the task was woken on the initial futex as opposed to the requeue
2045  * target futex.  If so, determine if it was a timeout or a signal that caused
2046  * the wakeup and return the appropriate error code to the caller.  Must be
2047  * called with the hb lock held.
2048  *
2049  * Returns
2050  *  0 - no early wakeup detected
2051  * <0 - -ETIMEDOUT or -ERESTARTNOINTR
2052  */
2053 static inline
2054 int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
2055                                    struct futex_q *q, union futex_key *key2,
2056                                    struct hrtimer_sleeper *timeout)
2057 {
2058         int ret = 0;
2059
2060         /*
2061          * With the hb lock held, we avoid races while we process the wakeup.
2062          * We only need to hold hb (and not hb2) to ensure atomicity as the
2063          * wakeup code can't change q.key from uaddr to uaddr2 if we hold hb.
2064          * It can't be requeued from uaddr2 to something else since we don't
2065          * support a PI aware source futex for requeue.
2066          */
2067         if (!match_futex(&q->key, key2)) {
2068                 WARN_ON(q->lock_ptr && (&hb->lock != q->lock_ptr));
2069                 /*
2070                  * We were woken prior to requeue by a timeout or a signal.
2071                  * Unqueue the futex_q and determine which it was.
2072                  */
2073                 plist_del(&q->list, &q->list.plist);
2074                 drop_futex_key_refs(&q->key);
2075
2076                 if (timeout && !timeout->task)
2077                         ret = -ETIMEDOUT;
2078                 else
2079                         ret = -ERESTARTNOINTR;
2080         }
2081         return ret;
2082 }
2083
2084 /**
2085  * futex_wait_requeue_pi() - Wait on uaddr and take uaddr2
2086  * @uaddr:      the futex we initialyl wait on (non-pi)
2087  * @fshared:    whether the futexes are shared (1) or not (0).  They must be
2088  *              the same type, no requeueing from private to shared, etc.
2089  * @val:        the expected value of uaddr
2090  * @abs_time:   absolute timeout
2091  * @bitset:     32 bit wakeup bitset set by userspace, defaults to all.
2092  * @clockrt:    whether to use CLOCK_REALTIME (1) or CLOCK_MONOTONIC (0)
2093  * @uaddr2:     the pi futex we will take prior to returning to user-space
2094  *
2095  * The caller will wait on uaddr and will be requeued by futex_requeue() to
2096  * uaddr2 which must be PI aware.  Normal wakeup will wake on uaddr2 and
2097  * complete the acquisition of the rt_mutex prior to returning to userspace.
2098  * This ensures the rt_mutex maintains an owner when it has waiters; without
2099  * one, the pi logic wouldn't know which task to boost/deboost, if there was a
2100  * need to.
2101  *
2102  * We call schedule in futex_wait_queue_me() when we enqueue and return there
2103  * via the following:
2104  * 1) wakeup on uaddr2 after an atomic lock acquisition by futex_requeue()
2105  * 2) wakeup on uaddr2 after a requeue and subsequent unlock
2106  * 3) signal (before or after requeue)
2107  * 4) timeout (before or after requeue)
2108  *
2109  * If 3, we setup a restart_block with futex_wait_requeue_pi() as the function.
2110  *
2111  * If 2, we may then block on trying to take the rt_mutex and return via:
2112  * 5) successful lock
2113  * 6) signal
2114  * 7) timeout
2115  * 8) other lock acquisition failure
2116  *
2117  * If 6, we setup a restart_block with futex_lock_pi() as the function.
2118  *
2119  * If 4 or 7, we cleanup and return with -ETIMEDOUT.
2120  *
2121  * Returns:
2122  *  0 - On success
2123  * <0 - On error
2124  */
2125 static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared,
2126                                  u32 val, ktime_t *abs_time, u32 bitset,
2127                                  int clockrt, u32 __user *uaddr2)
2128 {
2129         struct hrtimer_sleeper timeout, *to = NULL;
2130         struct rt_mutex_waiter rt_waiter;
2131         struct rt_mutex *pi_mutex = NULL;
2132         struct futex_hash_bucket *hb;
2133         union futex_key key2;
2134         struct futex_q q;
2135         int res, ret;
2136
2137         if (!bitset)
2138                 return -EINVAL;
2139
2140         if (abs_time) {
2141                 to = &timeout;
2142                 hrtimer_init_on_stack(&to->timer, clockrt ? CLOCK_REALTIME :
2143                                       CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
2144                 hrtimer_init_sleeper(to, current);
2145                 hrtimer_set_expires_range_ns(&to->timer, *abs_time,
2146                                              current->timer_slack_ns);
2147         }
2148
2149         /*
2150          * The waiter is allocated on our stack, manipulated by the requeue
2151          * code while we sleep on uaddr.
2152          */
2153         debug_rt_mutex_init_waiter(&rt_waiter);
2154         rt_waiter.task = NULL;
2155
2156         q.pi_state = NULL;
2157         q.bitset = bitset;
2158         q.rt_waiter = &rt_waiter;
2159
2160         key2 = FUTEX_KEY_INIT;
2161         ret = get_futex_key(uaddr2, fshared, &key2, VERIFY_WRITE);
2162         if (unlikely(ret != 0))
2163                 goto out;
2164
2165         /* Prepare to wait on uaddr. */
2166         ret = futex_wait_setup(uaddr, val, fshared, &q, &hb);
2167         if (ret)
2168                 goto out_key2;
2169
2170         /* Queue the futex_q, drop the hb lock, wait for wakeup. */
2171         futex_wait_queue_me(hb, &q, to);
2172
2173         spin_lock(&hb->lock);
2174         ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
2175         spin_unlock(&hb->lock);
2176         if (ret)
2177                 goto out_put_keys;
2178
2179         /*
2180          * In order for us to be here, we know our q.key == key2, and since
2181          * we took the hb->lock above, we also know that futex_requeue() has
2182          * completed and we no longer have to concern ourselves with a wakeup
2183          * race with the atomic proxy lock acquition by the requeue code.
2184          */
2185
2186         /* Check if the requeue code acquired the second futex for us. */
2187         if (!q.rt_waiter) {
2188                 /*
2189                  * Got the lock. We might not be the anticipated owner if we
2190                  * did a lock-steal - fix up the PI-state in that case.
2191                  */
2192                 if (q.pi_state && (q.pi_state->owner != current)) {
2193                         spin_lock(q.lock_ptr);
2194                         ret = fixup_pi_state_owner(uaddr2, &q, current,
2195                                                    fshared);
2196                         spin_unlock(q.lock_ptr);
2197                 }
2198         } else {
2199                 /*
2200                  * We have been woken up by futex_unlock_pi(), a timeout, or a
2201                  * signal.  futex_unlock_pi() will not destroy the lock_ptr nor
2202                  * the pi_state.
2203                  */
2204                 WARN_ON(!&q.pi_state);
2205                 pi_mutex = &q.pi_state->pi_mutex;
2206                 ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter, 1);
2207                 debug_rt_mutex_free_waiter(&rt_waiter);
2208
2209                 spin_lock(q.lock_ptr);
2210                 /*
2211                  * Fixup the pi_state owner and possibly acquire the lock if we
2212                  * haven't already.
2213                  */
2214                 res = fixup_owner(uaddr2, fshared, &q, !ret);
2215                 /*
2216                  * If fixup_owner() returned an error, proprogate that.  If it
2217                  * acquired the lock, clear our -ETIMEDOUT or -EINTR.
2218                  */
2219                 if (res)
2220                         ret = (res < 0) ? res : 0;
2221
2222                 /* Unqueue and drop the lock. */
2223                 unqueue_me_pi(&q);
2224         }
2225
2226         /*
2227          * If fixup_pi_state_owner() faulted and was unable to handle the
2228          * fault, unlock the rt_mutex and return the fault to userspace.
2229          */
2230         if (ret == -EFAULT) {
2231                 if (rt_mutex_owner(pi_mutex) == current)
2232                         rt_mutex_unlock(pi_mutex);
2233         } else if (ret == -EINTR) {
2234                 /*
2235                  * We've already been requeued, but we have no way to
2236                  * restart by calling futex_lock_pi() directly. We
2237                  * could restart the syscall, but that will look at
2238                  * the user space value and return right away. So we
2239                  * drop back with EWOULDBLOCK to tell user space that
2240                  * "val" has been changed. That's the same what the
2241                  * restart of the syscall would do in
2242                  * futex_wait_setup().
2243                  */
2244                 ret = -EWOULDBLOCK;
2245         }
2246
2247 out_put_keys:
2248         put_futex_key(fshared, &q.key);
2249 out_key2:
2250         put_futex_key(fshared, &key2);
2251
2252 out:
2253         if (to) {
2254                 hrtimer_cancel(&to->timer);
2255                 destroy_hrtimer_on_stack(&to->timer);
2256         }
2257         return ret;
2258 }
2259
2260 /*
2261  * Support for robust futexes: the kernel cleans up held futexes at
2262  * thread exit time.
2263  *
2264  * Implementation: user-space maintains a per-thread list of locks it
2265  * is holding. Upon do_exit(), the kernel carefully walks this list,
2266  * and marks all locks that are owned by this thread with the
2267  * FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is
2268  * always manipulated with the lock held, so the list is private and
2269  * per-thread. Userspace also maintains a per-thread 'list_op_pending'
2270  * field, to allow the kernel to clean up if the thread dies after
2271  * acquiring the lock, but just before it could have added itself to
2272  * the list. There can only be one such pending lock.
2273  */
2274
2275 /**
2276  * sys_set_robust_list - set the robust-futex list head of a task
2277  * @head: pointer to the list-head
2278  * @len: length of the list-head, as userspace expects
2279  */
2280 SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head,
2281                 size_t, len)
2282 {
2283         if (!futex_cmpxchg_enabled)
2284                 return -ENOSYS;
2285         /*
2286          * The kernel knows only one size for now:
2287          */
2288         if (unlikely(len != sizeof(*head)))
2289                 return -EINVAL;
2290
2291         current->robust_list = head;
2292
2293         return 0;
2294 }
2295
2296 /**
2297  * sys_get_robust_list - get the robust-futex list head of a task
2298  * @pid: pid of the process [zero for current task]
2299  * @head_ptr: pointer to a list-head pointer, the kernel fills it in
2300  * @len_ptr: pointer to a length field, the kernel fills in the header size
2301  */
2302 SYSCALL_DEFINE3(get_robust_list, int, pid,
2303                 struct robust_list_head __user * __user *, head_ptr,
2304                 size_t __user *, len_ptr)
2305 {
2306         struct robust_list_head __user *head;
2307         unsigned long ret;
2308         const struct cred *cred = current_cred(), *pcred;
2309
2310         if (!futex_cmpxchg_enabled)
2311                 return -ENOSYS;
2312
2313         if (!pid)
2314                 head = current->robust_list;
2315         else {
2316                 struct task_struct *p;
2317
2318                 ret = -ESRCH;
2319                 rcu_read_lock();
2320                 p = find_task_by_vpid(pid);
2321                 if (!p)
2322                         goto err_unlock;
2323                 ret = -EPERM;
2324                 pcred = __task_cred(p);
2325                 if (cred->euid != pcred->euid &&
2326                     cred->euid != pcred->uid &&
2327                     !capable(CAP_SYS_PTRACE))
2328                         goto err_unlock;
2329                 head = p->robust_list;
2330                 rcu_read_unlock();
2331         }
2332
2333         if (put_user(sizeof(*head), len_ptr))
2334                 return -EFAULT;
2335         return put_user(head, head_ptr);
2336
2337 err_unlock:
2338         rcu_read_unlock();
2339
2340         return ret;
2341 }
2342
2343 /*
2344  * Process a futex-list entry, check whether it's owned by the
2345  * dying task, and do notification if so:
2346  */
2347 int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
2348 {
2349         u32 uval, nval, mval;
2350
2351 retry:
2352         if (get_user(uval, uaddr))
2353                 return -1;
2354
2355         if ((uval & FUTEX_TID_MASK) == task_pid_vnr(curr)) {
2356                 /*
2357                  * Ok, this dying thread is truly holding a futex
2358                  * of interest. Set the OWNER_DIED bit atomically
2359                  * via cmpxchg, and if the value had FUTEX_WAITERS
2360                  * set, wake up a waiter (if any). (We have to do a
2361                  * futex_wake() even if OWNER_DIED is already set -
2362                  * to handle the rare but possible case of recursive
2363                  * thread-death.) The rest of the cleanup is done in
2364                  * userspace.
2365                  */
2366                 mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
2367                 nval = futex_atomic_cmpxchg_inatomic(uaddr, uval, mval);
2368
2369                 if (nval == -EFAULT)
2370                         return -1;
2371
2372                 if (nval != uval)
2373                         goto retry;
2374
2375                 /*
2376                  * Wake robust non-PI futexes here. The wakeup of
2377                  * PI futexes happens in exit_pi_state():
2378                  */
2379                 if (!pi && (uval & FUTEX_WAITERS))
2380                         futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
2381         }
2382         return 0;
2383 }
2384
2385 /*
2386  * Fetch a robust-list pointer. Bit 0 signals PI futexes:
2387  */
2388 static inline int fetch_robust_entry(struct robust_list __user **entry,
2389                                      struct robust_list __user * __user *head,
2390                                      int *pi)
2391 {
2392         unsigned long uentry;
2393
2394         if (get_user(uentry, (unsigned long __user *)head))
2395                 return -EFAULT;
2396
2397         *entry = (void __user *)(uentry & ~1UL);
2398         *pi = uentry & 1;
2399
2400         return 0;
2401 }
2402
2403 /*
2404  * Walk curr->robust_list (very carefully, it's a userspace list!)
2405  * and mark any locks found there dead, and notify any waiters.
2406  *
2407  * We silently return on any sign of list-walking problem.
2408  */
2409 void exit_robust_list(struct task_struct *curr)
2410 {
2411         struct robust_list_head __user *head = curr->robust_list;
2412         struct robust_list __user *entry, *next_entry, *pending;
2413         unsigned int limit = ROBUST_LIST_LIMIT, pi, next_pi, pip;
2414         unsigned long futex_offset;
2415         int rc;
2416
2417         if (!futex_cmpxchg_enabled)
2418                 return;
2419
2420         /*
2421          * Fetch the list head (which was registered earlier, via
2422          * sys_set_robust_list()):
2423          */
2424         if (fetch_robust_entry(&entry, &head->list.next, &pi))
2425                 return;
2426         /*
2427          * Fetch the relative futex offset:
2428          */
2429         if (get_user(futex_offset, &head->futex_offset))
2430                 return;
2431         /*
2432          * Fetch any possibly pending lock-add first, and handle it
2433          * if it exists:
2434          */
2435         if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
2436                 return;
2437
2438         next_entry = NULL;      /* avoid warning with gcc */
2439         while (entry != &head->list) {
2440                 /*
2441                  * Fetch the next entry in the list before calling
2442                  * handle_futex_death:
2443                  */
2444                 rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi);
2445                 /*
2446                  * A pending lock might already be on the list, so
2447                  * don't process it twice:
2448                  */
2449                 if (entry != pending)
2450                         if (handle_futex_death((void __user *)entry + futex_offset,
2451                                                 curr, pi))
2452                                 return;
2453                 if (rc)
2454                         return;
2455                 entry = next_entry;
2456                 pi = next_pi;
2457                 /*
2458                  * Avoid excessively long or circular lists:
2459                  */
2460                 if (!--limit)
2461                         break;
2462
2463                 cond_resched();
2464         }
2465
2466         if (pending)
2467                 handle_futex_death((void __user *)pending + futex_offset,
2468                                    curr, pip);
2469 }
2470
2471 long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
2472                 u32 __user *uaddr2, u32 val2, u32 val3)
2473 {
2474         int clockrt, ret = -ENOSYS;
2475         int cmd = op & FUTEX_CMD_MASK;
2476         int fshared = 0;
2477
2478         if (!(op & FUTEX_PRIVATE_FLAG))
2479                 fshared = 1;
2480
2481         clockrt = op & FUTEX_CLOCK_REALTIME;
2482         if (clockrt && cmd != FUTEX_WAIT_BITSET && cmd != FUTEX_WAIT_REQUEUE_PI)
2483                 return -ENOSYS;
2484
2485         switch (cmd) {
2486         case FUTEX_WAIT:
2487                 val3 = FUTEX_BITSET_MATCH_ANY;
2488         case FUTEX_WAIT_BITSET:
2489                 ret = futex_wait(uaddr, fshared, val, timeout, val3, clockrt);
2490                 break;
2491         case FUTEX_WAKE:
2492                 val3 = FUTEX_BITSET_MATCH_ANY;
2493         case FUTEX_WAKE_BITSET:
2494                 ret = futex_wake(uaddr, fshared, val, val3);
2495                 break;
2496         case FUTEX_REQUEUE:
2497                 ret = futex_requeue(uaddr, fshared, uaddr2, val, val2, NULL, 0);
2498                 break;
2499         case FUTEX_CMP_REQUEUE:
2500                 ret = futex_requeue(uaddr, fshared, uaddr2, val, val2, &val3,
2501                                     0);
2502                 break;
2503         case FUTEX_WAKE_OP:
2504                 ret = futex_wake_op(uaddr, fshared, uaddr2, val, val2, val3);
2505                 break;
2506         case FUTEX_LOCK_PI:
2507                 if (futex_cmpxchg_enabled)
2508                         ret = futex_lock_pi(uaddr, fshared, val, timeout, 0);
2509                 break;
2510         case FUTEX_UNLOCK_PI:
2511                 if (futex_cmpxchg_enabled)
2512                         ret = futex_unlock_pi(uaddr, fshared);
2513                 break;
2514         case FUTEX_TRYLOCK_PI:
2515                 if (futex_cmpxchg_enabled)
2516                         ret = futex_lock_pi(uaddr, fshared, 0, timeout, 1);
2517                 break;
2518         case FUTEX_WAIT_REQUEUE_PI:
2519                 val3 = FUTEX_BITSET_MATCH_ANY;
2520                 ret = futex_wait_requeue_pi(uaddr, fshared, val, timeout, val3,
2521                                             clockrt, uaddr2);
2522                 break;
2523         case FUTEX_CMP_REQUEUE_PI:
2524                 ret = futex_requeue(uaddr, fshared, uaddr2, val, val2, &val3,
2525                                     1);
2526                 break;
2527         default:
2528                 ret = -ENOSYS;
2529         }
2530         return ret;
2531 }
2532
2533
2534 SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
2535                 struct timespec __user *, utime, u32 __user *, uaddr2,
2536                 u32, val3)
2537 {
2538         struct timespec ts;
2539         ktime_t t, *tp = NULL;
2540         u32 val2 = 0;
2541         int cmd = op & FUTEX_CMD_MASK;
2542
2543         if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
2544                       cmd == FUTEX_WAIT_BITSET ||
2545                       cmd == FUTEX_WAIT_REQUEUE_PI)) {
2546                 if (copy_from_user(&ts, utime, sizeof(ts)) != 0)
2547                         return -EFAULT;
2548                 if (!timespec_valid(&ts))
2549                         return -EINVAL;
2550
2551                 t = timespec_to_ktime(ts);
2552                 if (cmd == FUTEX_WAIT)
2553                         t = ktime_add_safe(ktime_get(), t);
2554                 tp = &t;
2555         }
2556         /*
2557          * requeue parameter in 'utime' if cmd == FUTEX_*_REQUEUE_*.
2558          * number of waiters to wake in 'utime' if cmd == FUTEX_WAKE_OP.
2559          */
2560         if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
2561             cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
2562                 val2 = (u32) (unsigned long) utime;
2563
2564         return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
2565 }
2566
2567 static int __init futex_init(void)
2568 {
2569         u32 curval;
2570         int i;
2571
2572         /*
2573          * This will fail and we want it. Some arch implementations do
2574          * runtime detection of the futex_atomic_cmpxchg_inatomic()
2575          * functionality. We want to know that before we call in any
2576          * of the complex code paths. Also we want to prevent
2577          * registration of robust lists in that case. NULL is
2578          * guaranteed to fault and we get -EFAULT on functional
2579          * implementation, the non functional ones will return
2580          * -ENOSYS.
2581          */
2582         curval = cmpxchg_futex_value_locked(NULL, 0, 0);
2583         if (curval == -EFAULT)
2584                 futex_cmpxchg_enabled = 1;
2585
2586         for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
2587                 plist_head_init(&futex_queues[i].chain, &futex_queues[i].lock);
2588                 spin_lock_init(&futex_queues[i].lock);
2589         }
2590
2591         return 0;
2592 }
2593 __initcall(futex_init);