Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
[linux-2.6] / kernel / rtmutex.c
1 /*
2  * RT-Mutexes: simple blocking mutual exclusion locks with PI support
3  *
4  * started by Ingo Molnar and Thomas Gleixner.
5  *
6  *  Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
7  *  Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
8  *  Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt
9  *  Copyright (C) 2006 Esben Nielsen
10  *
11  *  See Documentation/rt-mutex-design.txt for details.
12  */
13 #include <linux/spinlock.h>
14 #include <linux/module.h>
15 #include <linux/sched.h>
16 #include <linux/timer.h>
17
18 #include "rtmutex_common.h"
19
20 /*
21  * lock->owner state tracking:
22  *
23  * lock->owner holds the task_struct pointer of the owner. Bit 0 and 1
24  * are used to keep track of the "owner is pending" and "lock has
25  * waiters" state.
26  *
27  * owner        bit1    bit0
28  * NULL         0       0       lock is free (fast acquire possible)
29  * NULL         0       1       invalid state
30  * NULL         1       0       Transitional State*
31  * NULL         1       1       invalid state
32  * taskpointer  0       0       lock is held (fast release possible)
33  * taskpointer  0       1       task is pending owner
34  * taskpointer  1       0       lock is held and has waiters
35  * taskpointer  1       1       task is pending owner and lock has more waiters
36  *
37  * Pending ownership is assigned to the top (highest priority)
38  * waiter of the lock, when the lock is released. The thread is woken
39  * up and can now take the lock. Until the lock is taken (bit 0
40  * cleared) a competing higher priority thread can steal the lock
41  * which puts the woken up thread back on the waiters list.
42  *
43  * The fast atomic compare exchange based acquire and release is only
44  * possible when bit 0 and 1 of lock->owner are 0.
45  *
46  * (*) There's a small time where the owner can be NULL and the
47  * "lock has waiters" bit is set.  This can happen when grabbing the lock.
48  * To prevent a cmpxchg of the owner releasing the lock, we need to set this
49  * bit before looking at the lock, hence the reason this is a transitional
50  * state.
51  */
52
53 static void
54 rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner,
55                    unsigned long mask)
56 {
57         unsigned long val = (unsigned long)owner | mask;
58
59         if (rt_mutex_has_waiters(lock))
60                 val |= RT_MUTEX_HAS_WAITERS;
61
62         lock->owner = (struct task_struct *)val;
63 }
64
65 static inline void clear_rt_mutex_waiters(struct rt_mutex *lock)
66 {
67         lock->owner = (struct task_struct *)
68                         ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS);
69 }
70
71 static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
72 {
73         if (!rt_mutex_has_waiters(lock))
74                 clear_rt_mutex_waiters(lock);
75 }
76
77 /*
78  * We can speed up the acquire/release, if the architecture
79  * supports cmpxchg and if there's no debugging state to be set up
80  */
81 #if defined(__HAVE_ARCH_CMPXCHG) && !defined(CONFIG_DEBUG_RT_MUTEXES)
82 # define rt_mutex_cmpxchg(l,c,n)        (cmpxchg(&l->owner, c, n) == c)
83 static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
84 {
85         unsigned long owner, *p = (unsigned long *) &lock->owner;
86
87         do {
88                 owner = *p;
89         } while (cmpxchg(p, owner, owner | RT_MUTEX_HAS_WAITERS) != owner);
90 }
91 #else
92 # define rt_mutex_cmpxchg(l,c,n)        (0)
93 static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
94 {
95         lock->owner = (struct task_struct *)
96                         ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS);
97 }
98 #endif
99
100 /*
101  * Calculate task priority from the waiter list priority
102  *
103  * Return task->normal_prio when the waiter list is empty or when
104  * the waiter is not allowed to do priority boosting
105  */
106 int rt_mutex_getprio(struct task_struct *task)
107 {
108         if (likely(!task_has_pi_waiters(task)))
109                 return task->normal_prio;
110
111         return min(task_top_pi_waiter(task)->pi_list_entry.prio,
112                    task->normal_prio);
113 }
114
115 /*
116  * Adjust the priority of a task, after its pi_waiters got modified.
117  *
118  * This can be both boosting and unboosting. task->pi_lock must be held.
119  */
120 static void __rt_mutex_adjust_prio(struct task_struct *task)
121 {
122         int prio = rt_mutex_getprio(task);
123
124         if (task->prio != prio)
125                 rt_mutex_setprio(task, prio);
126 }
127
128 /*
129  * Adjust task priority (undo boosting). Called from the exit path of
130  * rt_mutex_slowunlock() and rt_mutex_slowlock().
131  *
132  * (Note: We do this outside of the protection of lock->wait_lock to
133  * allow the lock to be taken while or before we readjust the priority
134  * of task. We do not use the spin_xx_mutex() variants here as we are
135  * outside of the debug path.)
136  */
137 static void rt_mutex_adjust_prio(struct task_struct *task)
138 {
139         unsigned long flags;
140
141         spin_lock_irqsave(&task->pi_lock, flags);
142         __rt_mutex_adjust_prio(task);
143         spin_unlock_irqrestore(&task->pi_lock, flags);
144 }
145
146 /*
147  * Max number of times we'll walk the boosting chain:
148  */
149 int max_lock_depth = 1024;
150
151 /*
152  * Adjust the priority chain. Also used for deadlock detection.
153  * Decreases task's usage by one - may thus free the task.
154  * Returns 0 or -EDEADLK.
155  */
156 static int rt_mutex_adjust_prio_chain(struct task_struct *task,
157                                       int deadlock_detect,
158                                       struct rt_mutex *orig_lock,
159                                       struct rt_mutex_waiter *orig_waiter,
160                                       struct task_struct *top_task)
161 {
162         struct rt_mutex *lock;
163         struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter;
164         int detect_deadlock, ret = 0, depth = 0;
165         unsigned long flags;
166
167         detect_deadlock = debug_rt_mutex_detect_deadlock(orig_waiter,
168                                                          deadlock_detect);
169
170         /*
171          * The (de)boosting is a step by step approach with a lot of
172          * pitfalls. We want this to be preemptible and we want hold a
173          * maximum of two locks per step. So we have to check
174          * carefully whether things change under us.
175          */
176  again:
177         if (++depth > max_lock_depth) {
178                 static int prev_max;
179
180                 /*
181                  * Print this only once. If the admin changes the limit,
182                  * print a new message when reaching the limit again.
183                  */
184                 if (prev_max != max_lock_depth) {
185                         prev_max = max_lock_depth;
186                         printk(KERN_WARNING "Maximum lock depth %d reached "
187                                "task: %s (%d)\n", max_lock_depth,
188                                top_task->comm, top_task->pid);
189                 }
190                 put_task_struct(task);
191
192                 return deadlock_detect ? -EDEADLK : 0;
193         }
194  retry:
195         /*
196          * Task can not go away as we did a get_task() before !
197          */
198         spin_lock_irqsave(&task->pi_lock, flags);
199
200         waiter = task->pi_blocked_on;
201         /*
202          * Check whether the end of the boosting chain has been
203          * reached or the state of the chain has changed while we
204          * dropped the locks.
205          */
206         if (!waiter || !waiter->task)
207                 goto out_unlock_pi;
208
209         /*
210          * Check the orig_waiter state. After we dropped the locks,
211          * the previous owner of the lock might have released the lock
212          * and made us the pending owner:
213          */
214         if (orig_waiter && !orig_waiter->task)
215                 goto out_unlock_pi;
216
217         /*
218          * Drop out, when the task has no waiters. Note,
219          * top_waiter can be NULL, when we are in the deboosting
220          * mode!
221          */
222         if (top_waiter && (!task_has_pi_waiters(task) ||
223                            top_waiter != task_top_pi_waiter(task)))
224                 goto out_unlock_pi;
225
226         /*
227          * When deadlock detection is off then we check, if further
228          * priority adjustment is necessary.
229          */
230         if (!detect_deadlock && waiter->list_entry.prio == task->prio)
231                 goto out_unlock_pi;
232
233         lock = waiter->lock;
234         if (!spin_trylock(&lock->wait_lock)) {
235                 spin_unlock_irqrestore(&task->pi_lock, flags);
236                 cpu_relax();
237                 goto retry;
238         }
239
240         /* Deadlock detection */
241         if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
242                 debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock);
243                 spin_unlock(&lock->wait_lock);
244                 ret = deadlock_detect ? -EDEADLK : 0;
245                 goto out_unlock_pi;
246         }
247
248         top_waiter = rt_mutex_top_waiter(lock);
249
250         /* Requeue the waiter */
251         plist_del(&waiter->list_entry, &lock->wait_list);
252         waiter->list_entry.prio = task->prio;
253         plist_add(&waiter->list_entry, &lock->wait_list);
254
255         /* Release the task */
256         spin_unlock_irqrestore(&task->pi_lock, flags);
257         put_task_struct(task);
258
259         /* Grab the next task */
260         task = rt_mutex_owner(lock);
261         get_task_struct(task);
262         spin_lock_irqsave(&task->pi_lock, flags);
263
264         if (waiter == rt_mutex_top_waiter(lock)) {
265                 /* Boost the owner */
266                 plist_del(&top_waiter->pi_list_entry, &task->pi_waiters);
267                 waiter->pi_list_entry.prio = waiter->list_entry.prio;
268                 plist_add(&waiter->pi_list_entry, &task->pi_waiters);
269                 __rt_mutex_adjust_prio(task);
270
271         } else if (top_waiter == waiter) {
272                 /* Deboost the owner */
273                 plist_del(&waiter->pi_list_entry, &task->pi_waiters);
274                 waiter = rt_mutex_top_waiter(lock);
275                 waiter->pi_list_entry.prio = waiter->list_entry.prio;
276                 plist_add(&waiter->pi_list_entry, &task->pi_waiters);
277                 __rt_mutex_adjust_prio(task);
278         }
279
280         spin_unlock_irqrestore(&task->pi_lock, flags);
281
282         top_waiter = rt_mutex_top_waiter(lock);
283         spin_unlock(&lock->wait_lock);
284
285         if (!detect_deadlock && waiter != top_waiter)
286                 goto out_put_task;
287
288         goto again;
289
290  out_unlock_pi:
291         spin_unlock_irqrestore(&task->pi_lock, flags);
292  out_put_task:
293         put_task_struct(task);
294
295         return ret;
296 }
297
298 /*
299  * Optimization: check if we can steal the lock from the
300  * assigned pending owner [which might not have taken the
301  * lock yet]:
302  */
303 static inline int try_to_steal_lock(struct rt_mutex *lock)
304 {
305         struct task_struct *pendowner = rt_mutex_owner(lock);
306         struct rt_mutex_waiter *next;
307         unsigned long flags;
308
309         if (!rt_mutex_owner_pending(lock))
310                 return 0;
311
312         if (pendowner == current)
313                 return 1;
314
315         spin_lock_irqsave(&pendowner->pi_lock, flags);
316         if (current->prio >= pendowner->prio) {
317                 spin_unlock_irqrestore(&pendowner->pi_lock, flags);
318                 return 0;
319         }
320
321         /*
322          * Check if a waiter is enqueued on the pending owners
323          * pi_waiters list. Remove it and readjust pending owners
324          * priority.
325          */
326         if (likely(!rt_mutex_has_waiters(lock))) {
327                 spin_unlock_irqrestore(&pendowner->pi_lock, flags);
328                 return 1;
329         }
330
331         /* No chain handling, pending owner is not blocked on anything: */
332         next = rt_mutex_top_waiter(lock);
333         plist_del(&next->pi_list_entry, &pendowner->pi_waiters);
334         __rt_mutex_adjust_prio(pendowner);
335         spin_unlock_irqrestore(&pendowner->pi_lock, flags);
336
337         /*
338          * We are going to steal the lock and a waiter was
339          * enqueued on the pending owners pi_waiters queue. So
340          * we have to enqueue this waiter into
341          * current->pi_waiters list. This covers the case,
342          * where current is boosted because it holds another
343          * lock and gets unboosted because the booster is
344          * interrupted, so we would delay a waiter with higher
345          * priority as current->normal_prio.
346          *
347          * Note: in the rare case of a SCHED_OTHER task changing
348          * its priority and thus stealing the lock, next->task
349          * might be current:
350          */
351         if (likely(next->task != current)) {
352                 spin_lock_irqsave(&current->pi_lock, flags);
353                 plist_add(&next->pi_list_entry, &current->pi_waiters);
354                 __rt_mutex_adjust_prio(current);
355                 spin_unlock_irqrestore(&current->pi_lock, flags);
356         }
357         return 1;
358 }
359
360 /*
361  * Try to take an rt-mutex
362  *
363  * This fails
364  * - when the lock has a real owner
365  * - when a different pending owner exists and has higher priority than current
366  *
367  * Must be called with lock->wait_lock held.
368  */
369 static int try_to_take_rt_mutex(struct rt_mutex *lock)
370 {
371         /*
372          * We have to be careful here if the atomic speedups are
373          * enabled, such that, when
374          *  - no other waiter is on the lock
375          *  - the lock has been released since we did the cmpxchg
376          * the lock can be released or taken while we are doing the
377          * checks and marking the lock with RT_MUTEX_HAS_WAITERS.
378          *
379          * The atomic acquire/release aware variant of
380          * mark_rt_mutex_waiters uses a cmpxchg loop. After setting
381          * the WAITERS bit, the atomic release / acquire can not
382          * happen anymore and lock->wait_lock protects us from the
383          * non-atomic case.
384          *
385          * Note, that this might set lock->owner =
386          * RT_MUTEX_HAS_WAITERS in the case the lock is not contended
387          * any more. This is fixed up when we take the ownership.
388          * This is the transitional state explained at the top of this file.
389          */
390         mark_rt_mutex_waiters(lock);
391
392         if (rt_mutex_owner(lock) && !try_to_steal_lock(lock))
393                 return 0;
394
395         /* We got the lock. */
396         debug_rt_mutex_lock(lock);
397
398         rt_mutex_set_owner(lock, current, 0);
399
400         rt_mutex_deadlock_account_lock(lock, current);
401
402         return 1;
403 }
404
405 /*
406  * Task blocks on lock.
407  *
408  * Prepare waiter and propagate pi chain
409  *
410  * This must be called with lock->wait_lock held.
411  */
412 static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
413                                    struct rt_mutex_waiter *waiter,
414                                    int detect_deadlock)
415 {
416         struct task_struct *owner = rt_mutex_owner(lock);
417         struct rt_mutex_waiter *top_waiter = waiter;
418         unsigned long flags;
419         int chain_walk = 0, res;
420
421         spin_lock_irqsave(&current->pi_lock, flags);
422         __rt_mutex_adjust_prio(current);
423         waiter->task = current;
424         waiter->lock = lock;
425         plist_node_init(&waiter->list_entry, current->prio);
426         plist_node_init(&waiter->pi_list_entry, current->prio);
427
428         /* Get the top priority waiter on the lock */
429         if (rt_mutex_has_waiters(lock))
430                 top_waiter = rt_mutex_top_waiter(lock);
431         plist_add(&waiter->list_entry, &lock->wait_list);
432
433         current->pi_blocked_on = waiter;
434
435         spin_unlock_irqrestore(&current->pi_lock, flags);
436
437         if (waiter == rt_mutex_top_waiter(lock)) {
438                 spin_lock_irqsave(&owner->pi_lock, flags);
439                 plist_del(&top_waiter->pi_list_entry, &owner->pi_waiters);
440                 plist_add(&waiter->pi_list_entry, &owner->pi_waiters);
441
442                 __rt_mutex_adjust_prio(owner);
443                 if (owner->pi_blocked_on)
444                         chain_walk = 1;
445                 spin_unlock_irqrestore(&owner->pi_lock, flags);
446         }
447         else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock))
448                 chain_walk = 1;
449
450         if (!chain_walk)
451                 return 0;
452
453         /*
454          * The owner can't disappear while holding a lock,
455          * so the owner struct is protected by wait_lock.
456          * Gets dropped in rt_mutex_adjust_prio_chain()!
457          */
458         get_task_struct(owner);
459
460         spin_unlock(&lock->wait_lock);
461
462         res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter,
463                                          current);
464
465         spin_lock(&lock->wait_lock);
466
467         return res;
468 }
469
470 /*
471  * Wake up the next waiter on the lock.
472  *
473  * Remove the top waiter from the current tasks waiter list and from
474  * the lock waiter list. Set it as pending owner. Then wake it up.
475  *
476  * Called with lock->wait_lock held.
477  */
478 static void wakeup_next_waiter(struct rt_mutex *lock)
479 {
480         struct rt_mutex_waiter *waiter;
481         struct task_struct *pendowner;
482         unsigned long flags;
483
484         spin_lock_irqsave(&current->pi_lock, flags);
485
486         waiter = rt_mutex_top_waiter(lock);
487         plist_del(&waiter->list_entry, &lock->wait_list);
488
489         /*
490          * Remove it from current->pi_waiters. We do not adjust a
491          * possible priority boost right now. We execute wakeup in the
492          * boosted mode and go back to normal after releasing
493          * lock->wait_lock.
494          */
495         plist_del(&waiter->pi_list_entry, &current->pi_waiters);
496         pendowner = waiter->task;
497         waiter->task = NULL;
498
499         rt_mutex_set_owner(lock, pendowner, RT_MUTEX_OWNER_PENDING);
500
501         spin_unlock_irqrestore(&current->pi_lock, flags);
502
503         /*
504          * Clear the pi_blocked_on variable and enqueue a possible
505          * waiter into the pi_waiters list of the pending owner. This
506          * prevents that in case the pending owner gets unboosted a
507          * waiter with higher priority than pending-owner->normal_prio
508          * is blocked on the unboosted (pending) owner.
509          */
510         spin_lock_irqsave(&pendowner->pi_lock, flags);
511
512         WARN_ON(!pendowner->pi_blocked_on);
513         WARN_ON(pendowner->pi_blocked_on != waiter);
514         WARN_ON(pendowner->pi_blocked_on->lock != lock);
515
516         pendowner->pi_blocked_on = NULL;
517
518         if (rt_mutex_has_waiters(lock)) {
519                 struct rt_mutex_waiter *next;
520
521                 next = rt_mutex_top_waiter(lock);
522                 plist_add(&next->pi_list_entry, &pendowner->pi_waiters);
523         }
524         spin_unlock_irqrestore(&pendowner->pi_lock, flags);
525
526         wake_up_process(pendowner);
527 }
528
529 /*
530  * Remove a waiter from a lock
531  *
532  * Must be called with lock->wait_lock held
533  */
534 static void remove_waiter(struct rt_mutex *lock,
535                           struct rt_mutex_waiter *waiter)
536 {
537         int first = (waiter == rt_mutex_top_waiter(lock));
538         struct task_struct *owner = rt_mutex_owner(lock);
539         unsigned long flags;
540         int chain_walk = 0;
541
542         spin_lock_irqsave(&current->pi_lock, flags);
543         plist_del(&waiter->list_entry, &lock->wait_list);
544         waiter->task = NULL;
545         current->pi_blocked_on = NULL;
546         spin_unlock_irqrestore(&current->pi_lock, flags);
547
548         if (first && owner != current) {
549
550                 spin_lock_irqsave(&owner->pi_lock, flags);
551
552                 plist_del(&waiter->pi_list_entry, &owner->pi_waiters);
553
554                 if (rt_mutex_has_waiters(lock)) {
555                         struct rt_mutex_waiter *next;
556
557                         next = rt_mutex_top_waiter(lock);
558                         plist_add(&next->pi_list_entry, &owner->pi_waiters);
559                 }
560                 __rt_mutex_adjust_prio(owner);
561
562                 if (owner->pi_blocked_on)
563                         chain_walk = 1;
564
565                 spin_unlock_irqrestore(&owner->pi_lock, flags);
566         }
567
568         WARN_ON(!plist_node_empty(&waiter->pi_list_entry));
569
570         if (!chain_walk)
571                 return;
572
573         /* gets dropped in rt_mutex_adjust_prio_chain()! */
574         get_task_struct(owner);
575
576         spin_unlock(&lock->wait_lock);
577
578         rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current);
579
580         spin_lock(&lock->wait_lock);
581 }
582
583 /*
584  * Recheck the pi chain, in case we got a priority setting
585  *
586  * Called from sched_setscheduler
587  */
588 void rt_mutex_adjust_pi(struct task_struct *task)
589 {
590         struct rt_mutex_waiter *waiter;
591         unsigned long flags;
592
593         spin_lock_irqsave(&task->pi_lock, flags);
594
595         waiter = task->pi_blocked_on;
596         if (!waiter || waiter->list_entry.prio == task->prio) {
597                 spin_unlock_irqrestore(&task->pi_lock, flags);
598                 return;
599         }
600
601         spin_unlock_irqrestore(&task->pi_lock, flags);
602
603         /* gets dropped in rt_mutex_adjust_prio_chain()! */
604         get_task_struct(task);
605         rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task);
606 }
607
608 /*
609  * Slow path lock function:
610  */
611 static int __sched
612 rt_mutex_slowlock(struct rt_mutex *lock, int state,
613                   struct hrtimer_sleeper *timeout,
614                   int detect_deadlock)
615 {
616         struct rt_mutex_waiter waiter;
617         int ret = 0;
618
619         debug_rt_mutex_init_waiter(&waiter);
620         waiter.task = NULL;
621
622         spin_lock(&lock->wait_lock);
623
624         /* Try to acquire the lock again: */
625         if (try_to_take_rt_mutex(lock)) {
626                 spin_unlock(&lock->wait_lock);
627                 return 0;
628         }
629
630         set_current_state(state);
631
632         /* Setup the timer, when timeout != NULL */
633         if (unlikely(timeout))
634                 hrtimer_start(&timeout->timer, timeout->timer.expires,
635                               HRTIMER_MODE_ABS);
636
637         for (;;) {
638                 /* Try to acquire the lock: */
639                 if (try_to_take_rt_mutex(lock))
640                         break;
641
642                 /*
643                  * TASK_INTERRUPTIBLE checks for signals and
644                  * timeout. Ignored otherwise.
645                  */
646                 if (unlikely(state == TASK_INTERRUPTIBLE)) {
647                         /* Signal pending? */
648                         if (signal_pending(current))
649                                 ret = -EINTR;
650                         if (timeout && !timeout->task)
651                                 ret = -ETIMEDOUT;
652                         if (ret)
653                                 break;
654                 }
655
656                 /*
657                  * waiter.task is NULL the first time we come here and
658                  * when we have been woken up by the previous owner
659                  * but the lock got stolen by a higher prio task.
660                  */
661                 if (!waiter.task) {
662                         ret = task_blocks_on_rt_mutex(lock, &waiter,
663                                                       detect_deadlock);
664                         /*
665                          * If we got woken up by the owner then start loop
666                          * all over without going into schedule to try
667                          * to get the lock now:
668                          */
669                         if (unlikely(!waiter.task)) {
670                                 /*
671                                  * Reset the return value. We might
672                                  * have returned with -EDEADLK and the
673                                  * owner released the lock while we
674                                  * were walking the pi chain.
675                                  */
676                                 ret = 0;
677                                 continue;
678                         }
679                         if (unlikely(ret))
680                                 break;
681                 }
682
683                 spin_unlock(&lock->wait_lock);
684
685                 debug_rt_mutex_print_deadlock(&waiter);
686
687                 if (waiter.task)
688                         schedule_rt_mutex(lock);
689
690                 spin_lock(&lock->wait_lock);
691                 set_current_state(state);
692         }
693
694         set_current_state(TASK_RUNNING);
695
696         if (unlikely(waiter.task))
697                 remove_waiter(lock, &waiter);
698
699         /*
700          * try_to_take_rt_mutex() sets the waiter bit
701          * unconditionally. We might have to fix that up.
702          */
703         fixup_rt_mutex_waiters(lock);
704
705         spin_unlock(&lock->wait_lock);
706
707         /* Remove pending timer: */
708         if (unlikely(timeout))
709                 hrtimer_cancel(&timeout->timer);
710
711         /*
712          * Readjust priority, when we did not get the lock. We might
713          * have been the pending owner and boosted. Since we did not
714          * take the lock, the PI boost has to go.
715          */
716         if (unlikely(ret))
717                 rt_mutex_adjust_prio(current);
718
719         debug_rt_mutex_free_waiter(&waiter);
720
721         return ret;
722 }
723
724 /*
725  * Slow path try-lock function:
726  */
727 static inline int
728 rt_mutex_slowtrylock(struct rt_mutex *lock)
729 {
730         int ret = 0;
731
732         spin_lock(&lock->wait_lock);
733
734         if (likely(rt_mutex_owner(lock) != current)) {
735
736                 ret = try_to_take_rt_mutex(lock);
737                 /*
738                  * try_to_take_rt_mutex() sets the lock waiters
739                  * bit unconditionally. Clean this up.
740                  */
741                 fixup_rt_mutex_waiters(lock);
742         }
743
744         spin_unlock(&lock->wait_lock);
745
746         return ret;
747 }
748
749 /*
750  * Slow path to release a rt-mutex:
751  */
752 static void __sched
753 rt_mutex_slowunlock(struct rt_mutex *lock)
754 {
755         spin_lock(&lock->wait_lock);
756
757         debug_rt_mutex_unlock(lock);
758
759         rt_mutex_deadlock_account_unlock(current);
760
761         if (!rt_mutex_has_waiters(lock)) {
762                 lock->owner = NULL;
763                 spin_unlock(&lock->wait_lock);
764                 return;
765         }
766
767         wakeup_next_waiter(lock);
768
769         spin_unlock(&lock->wait_lock);
770
771         /* Undo pi boosting if necessary: */
772         rt_mutex_adjust_prio(current);
773 }
774
775 /*
776  * debug aware fast / slowpath lock,trylock,unlock
777  *
778  * The atomic acquire/release ops are compiled away, when either the
779  * architecture does not support cmpxchg or when debugging is enabled.
780  */
781 static inline int
782 rt_mutex_fastlock(struct rt_mutex *lock, int state,
783                   int detect_deadlock,
784                   int (*slowfn)(struct rt_mutex *lock, int state,
785                                 struct hrtimer_sleeper *timeout,
786                                 int detect_deadlock))
787 {
788         if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) {
789                 rt_mutex_deadlock_account_lock(lock, current);
790                 return 0;
791         } else
792                 return slowfn(lock, state, NULL, detect_deadlock);
793 }
794
795 static inline int
796 rt_mutex_timed_fastlock(struct rt_mutex *lock, int state,
797                         struct hrtimer_sleeper *timeout, int detect_deadlock,
798                         int (*slowfn)(struct rt_mutex *lock, int state,
799                                       struct hrtimer_sleeper *timeout,
800                                       int detect_deadlock))
801 {
802         if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) {
803                 rt_mutex_deadlock_account_lock(lock, current);
804                 return 0;
805         } else
806                 return slowfn(lock, state, timeout, detect_deadlock);
807 }
808
809 static inline int
810 rt_mutex_fasttrylock(struct rt_mutex *lock,
811                      int (*slowfn)(struct rt_mutex *lock))
812 {
813         if (likely(rt_mutex_cmpxchg(lock, NULL, current))) {
814                 rt_mutex_deadlock_account_lock(lock, current);
815                 return 1;
816         }
817         return slowfn(lock);
818 }
819
820 static inline void
821 rt_mutex_fastunlock(struct rt_mutex *lock,
822                     void (*slowfn)(struct rt_mutex *lock))
823 {
824         if (likely(rt_mutex_cmpxchg(lock, current, NULL)))
825                 rt_mutex_deadlock_account_unlock(current);
826         else
827                 slowfn(lock);
828 }
829
830 /**
831  * rt_mutex_lock - lock a rt_mutex
832  *
833  * @lock: the rt_mutex to be locked
834  */
835 void __sched rt_mutex_lock(struct rt_mutex *lock)
836 {
837         might_sleep();
838
839         rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, 0, rt_mutex_slowlock);
840 }
841 EXPORT_SYMBOL_GPL(rt_mutex_lock);
842
843 /**
844  * rt_mutex_lock_interruptible - lock a rt_mutex interruptible
845  *
846  * @lock:               the rt_mutex to be locked
847  * @detect_deadlock:    deadlock detection on/off
848  *
849  * Returns:
850  *  0           on success
851  * -EINTR       when interrupted by a signal
852  * -EDEADLK     when the lock would deadlock (when deadlock detection is on)
853  */
854 int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock,
855                                                  int detect_deadlock)
856 {
857         might_sleep();
858
859         return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE,
860                                  detect_deadlock, rt_mutex_slowlock);
861 }
862 EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
863
864 /**
865  * rt_mutex_lock_interruptible_ktime - lock a rt_mutex interruptible
866  *                                     the timeout structure is provided
867  *                                     by the caller
868  *
869  * @lock:               the rt_mutex to be locked
870  * @timeout:            timeout structure or NULL (no timeout)
871  * @detect_deadlock:    deadlock detection on/off
872  *
873  * Returns:
874  *  0           on success
875  * -EINTR       when interrupted by a signal
876  * -ETIMEOUT    when the timeout expired
877  * -EDEADLK     when the lock would deadlock (when deadlock detection is on)
878  */
879 int
880 rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout,
881                     int detect_deadlock)
882 {
883         might_sleep();
884
885         return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
886                                        detect_deadlock, rt_mutex_slowlock);
887 }
888 EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
889
890 /**
891  * rt_mutex_trylock - try to lock a rt_mutex
892  *
893  * @lock:       the rt_mutex to be locked
894  *
895  * Returns 1 on success and 0 on contention
896  */
897 int __sched rt_mutex_trylock(struct rt_mutex *lock)
898 {
899         return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
900 }
901 EXPORT_SYMBOL_GPL(rt_mutex_trylock);
902
903 /**
904  * rt_mutex_unlock - unlock a rt_mutex
905  *
906  * @lock: the rt_mutex to be unlocked
907  */
908 void __sched rt_mutex_unlock(struct rt_mutex *lock)
909 {
910         rt_mutex_fastunlock(lock, rt_mutex_slowunlock);
911 }
912 EXPORT_SYMBOL_GPL(rt_mutex_unlock);
913
914 /***
915  * rt_mutex_destroy - mark a mutex unusable
916  * @lock: the mutex to be destroyed
917  *
918  * This function marks the mutex uninitialized, and any subsequent
919  * use of the mutex is forbidden. The mutex must not be locked when
920  * this function is called.
921  */
922 void rt_mutex_destroy(struct rt_mutex *lock)
923 {
924         WARN_ON(rt_mutex_is_locked(lock));
925 #ifdef CONFIG_DEBUG_RT_MUTEXES
926         lock->magic = NULL;
927 #endif
928 }
929
930 EXPORT_SYMBOL_GPL(rt_mutex_destroy);
931
932 /**
933  * __rt_mutex_init - initialize the rt lock
934  *
935  * @lock: the rt lock to be initialized
936  *
937  * Initialize the rt lock to unlocked state.
938  *
939  * Initializing of a locked rt lock is not allowed
940  */
941 void __rt_mutex_init(struct rt_mutex *lock, const char *name)
942 {
943         lock->owner = NULL;
944         spin_lock_init(&lock->wait_lock);
945         plist_head_init(&lock->wait_list, &lock->wait_lock);
946
947         debug_rt_mutex_init(lock, name);
948 }
949 EXPORT_SYMBOL_GPL(__rt_mutex_init);
950
951 /**
952  * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
953  *                              proxy owner
954  *
955  * @lock:       the rt_mutex to be locked
956  * @proxy_owner:the task to set as owner
957  *
958  * No locking. Caller has to do serializing itself
959  * Special API call for PI-futex support
960  */
961 void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
962                                 struct task_struct *proxy_owner)
963 {
964         __rt_mutex_init(lock, NULL);
965         debug_rt_mutex_proxy_lock(lock, proxy_owner);
966         rt_mutex_set_owner(lock, proxy_owner, 0);
967         rt_mutex_deadlock_account_lock(lock, proxy_owner);
968 }
969
970 /**
971  * rt_mutex_proxy_unlock - release a lock on behalf of owner
972  *
973  * @lock:       the rt_mutex to be locked
974  *
975  * No locking. Caller has to do serializing itself
976  * Special API call for PI-futex support
977  */
978 void rt_mutex_proxy_unlock(struct rt_mutex *lock,
979                            struct task_struct *proxy_owner)
980 {
981         debug_rt_mutex_proxy_unlock(lock);
982         rt_mutex_set_owner(lock, NULL, 0);
983         rt_mutex_deadlock_account_unlock(proxy_owner);
984 }
985
986 /**
987  * rt_mutex_next_owner - return the next owner of the lock
988  *
989  * @lock: the rt lock query
990  *
991  * Returns the next owner of the lock or NULL
992  *
993  * Caller has to serialize against other accessors to the lock
994  * itself.
995  *
996  * Special API call for PI-futex support
997  */
998 struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock)
999 {
1000         if (!rt_mutex_has_waiters(lock))
1001                 return NULL;
1002
1003         return rt_mutex_top_waiter(lock)->task;
1004 }