[PATCH] genirq: cleanup: remove irq_descp()
[linux-2.6] / kernel / rtmutex-debug.c
1 /*
2  * RT-Mutexes: blocking mutual exclusion locks with PI support
3  *
4  * started by Ingo Molnar and Thomas Gleixner:
5  *
6  *  Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
7  *  Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
8  *
9  * This code is based on the rt.c implementation in the preempt-rt tree.
10  * Portions of said code are
11  *
12  *  Copyright (C) 2004  LynuxWorks, Inc., Igor Manyilov, Bill Huey
13  *  Copyright (C) 2006  Esben Nielsen
14  *  Copyright (C) 2006  Kihon Technologies Inc.,
15  *                      Steven Rostedt <rostedt@goodmis.org>
16  *
17  * See rt.c in preempt-rt for proper credits and further information
18  */
19 #include <linux/config.h>
20 #include <linux/sched.h>
21 #include <linux/delay.h>
22 #include <linux/module.h>
23 #include <linux/spinlock.h>
24 #include <linux/kallsyms.h>
25 #include <linux/syscalls.h>
26 #include <linux/interrupt.h>
27 #include <linux/plist.h>
28 #include <linux/fs.h>
29
30 #include "rtmutex_common.h"
31
32 #ifdef CONFIG_DEBUG_RT_MUTEXES
33 # include "rtmutex-debug.h"
34 #else
35 # include "rtmutex.h"
36 #endif
37
38 # define TRACE_WARN_ON(x)                       WARN_ON(x)
39 # define TRACE_BUG_ON(x)                        BUG_ON(x)
40
41 # define TRACE_OFF()                                            \
42 do {                                                            \
43         if (rt_trace_on) {                                      \
44                 rt_trace_on = 0;                                \
45                 console_verbose();                              \
46                 if (spin_is_locked(&current->pi_lock))          \
47                         spin_unlock(&current->pi_lock);         \
48                 if (spin_is_locked(&current->held_list_lock))   \
49                         spin_unlock(&current->held_list_lock);  \
50         }                                                       \
51 } while (0)
52
53 # define TRACE_OFF_NOLOCK()                                     \
54 do {                                                            \
55         if (rt_trace_on) {                                      \
56                 rt_trace_on = 0;                                \
57                 console_verbose();                              \
58         }                                                       \
59 } while (0)
60
61 # define TRACE_BUG_LOCKED()                     \
62 do {                                            \
63         TRACE_OFF();                            \
64         BUG();                                  \
65 } while (0)
66
67 # define TRACE_WARN_ON_LOCKED(c)                \
68 do {                                            \
69         if (unlikely(c)) {                      \
70                 TRACE_OFF();                    \
71                 WARN_ON(1);                     \
72         }                                       \
73 } while (0)
74
75 # define TRACE_BUG_ON_LOCKED(c)                 \
76 do {                                            \
77         if (unlikely(c))                        \
78                 TRACE_BUG_LOCKED();             \
79 } while (0)
80
81 #ifdef CONFIG_SMP
82 # define SMP_TRACE_BUG_ON_LOCKED(c)     TRACE_BUG_ON_LOCKED(c)
83 #else
84 # define SMP_TRACE_BUG_ON_LOCKED(c)     do { } while (0)
85 #endif
86
87 /*
88  * deadlock detection flag. We turn it off when we detect
89  * the first problem because we dont want to recurse back
90  * into the tracing code when doing error printk or
91  * executing a BUG():
92  */
93 int rt_trace_on = 1;
94
95 void deadlock_trace_off(void)
96 {
97         rt_trace_on = 0;
98 }
99
100 static void printk_task(task_t *p)
101 {
102         if (p)
103                 printk("%16s:%5d [%p, %3d]", p->comm, p->pid, p, p->prio);
104         else
105                 printk("<none>");
106 }
107
108 static void printk_task_short(task_t *p)
109 {
110         if (p)
111                 printk("%s/%d [%p, %3d]", p->comm, p->pid, p, p->prio);
112         else
113                 printk("<none>");
114 }
115
116 static void printk_lock(struct rt_mutex *lock, int print_owner)
117 {
118         if (lock->name)
119                 printk(" [%p] {%s}\n",
120                         lock, lock->name);
121         else
122                 printk(" [%p] {%s:%d}\n",
123                         lock, lock->file, lock->line);
124
125         if (print_owner && rt_mutex_owner(lock)) {
126                 printk(".. ->owner: %p\n", lock->owner);
127                 printk(".. held by:  ");
128                 printk_task(rt_mutex_owner(lock));
129                 printk("\n");
130         }
131         if (rt_mutex_owner(lock)) {
132                 printk("... acquired at:               ");
133                 print_symbol("%s\n", lock->acquire_ip);
134         }
135 }
136
137 static void printk_waiter(struct rt_mutex_waiter *w)
138 {
139         printk("-------------------------\n");
140         printk("| waiter struct %p:\n", w);
141         printk("| w->list_entry: [DP:%p/%p|SP:%p/%p|PRI:%d]\n",
142                w->list_entry.plist.prio_list.prev, w->list_entry.plist.prio_list.next,
143                w->list_entry.plist.node_list.prev, w->list_entry.plist.node_list.next,
144                w->list_entry.prio);
145         printk("| w->pi_list_entry: [DP:%p/%p|SP:%p/%p|PRI:%d]\n",
146                w->pi_list_entry.plist.prio_list.prev, w->pi_list_entry.plist.prio_list.next,
147                w->pi_list_entry.plist.node_list.prev, w->pi_list_entry.plist.node_list.next,
148                w->pi_list_entry.prio);
149         printk("\n| lock:\n");
150         printk_lock(w->lock, 1);
151         printk("| w->ti->task:\n");
152         printk_task(w->task);
153         printk("| blocked at:  ");
154         print_symbol("%s\n", w->ip);
155         printk("-------------------------\n");
156 }
157
158 static void show_task_locks(task_t *p)
159 {
160         switch (p->state) {
161         case TASK_RUNNING:              printk("R"); break;
162         case TASK_INTERRUPTIBLE:        printk("S"); break;
163         case TASK_UNINTERRUPTIBLE:      printk("D"); break;
164         case TASK_STOPPED:              printk("T"); break;
165         case EXIT_ZOMBIE:               printk("Z"); break;
166         case EXIT_DEAD:                 printk("X"); break;
167         default:                        printk("?"); break;
168         }
169         printk_task(p);
170         if (p->pi_blocked_on) {
171                 struct rt_mutex *lock = p->pi_blocked_on->lock;
172
173                 printk(" blocked on:");
174                 printk_lock(lock, 1);
175         } else
176                 printk(" (not blocked)\n");
177 }
178
179 void rt_mutex_show_held_locks(task_t *task, int verbose)
180 {
181         struct list_head *curr, *cursor = NULL;
182         struct rt_mutex *lock;
183         task_t *t;
184         unsigned long flags;
185         int count = 0;
186
187         if (!rt_trace_on)
188                 return;
189
190         if (verbose) {
191                 printk("------------------------------\n");
192                 printk("| showing all locks held by: |  (");
193                 printk_task_short(task);
194                 printk("):\n");
195                 printk("------------------------------\n");
196         }
197
198 next:
199         spin_lock_irqsave(&task->held_list_lock, flags);
200         list_for_each(curr, &task->held_list_head) {
201                 if (cursor && curr != cursor)
202                         continue;
203                 lock = list_entry(curr, struct rt_mutex, held_list_entry);
204                 t = rt_mutex_owner(lock);
205                 WARN_ON(t != task);
206                 count++;
207                 cursor = curr->next;
208                 spin_unlock_irqrestore(&task->held_list_lock, flags);
209
210                 printk("\n#%03d:            ", count);
211                 printk_lock(lock, 0);
212                 goto next;
213         }
214         spin_unlock_irqrestore(&task->held_list_lock, flags);
215
216         printk("\n");
217 }
218
219 void rt_mutex_show_all_locks(void)
220 {
221         task_t *g, *p;
222         int count = 10;
223         int unlock = 1;
224
225         printk("\n");
226         printk("----------------------\n");
227         printk("| showing all tasks: |\n");
228         printk("----------------------\n");
229
230         /*
231          * Here we try to get the tasklist_lock as hard as possible,
232          * if not successful after 2 seconds we ignore it (but keep
233          * trying). This is to enable a debug printout even if a
234          * tasklist_lock-holding task deadlocks or crashes.
235          */
236 retry:
237         if (!read_trylock(&tasklist_lock)) {
238                 if (count == 10)
239                         printk("hm, tasklist_lock locked, retrying... ");
240                 if (count) {
241                         count--;
242                         printk(" #%d", 10-count);
243                         mdelay(200);
244                         goto retry;
245                 }
246                 printk(" ignoring it.\n");
247                 unlock = 0;
248         }
249         if (count != 10)
250                 printk(" locked it.\n");
251
252         do_each_thread(g, p) {
253                 show_task_locks(p);
254                 if (!unlock)
255                         if (read_trylock(&tasklist_lock))
256                                 unlock = 1;
257         } while_each_thread(g, p);
258
259         printk("\n");
260
261         printk("-----------------------------------------\n");
262         printk("| showing all locks held in the system: |\n");
263         printk("-----------------------------------------\n");
264
265         do_each_thread(g, p) {
266                 rt_mutex_show_held_locks(p, 0);
267                 if (!unlock)
268                         if (read_trylock(&tasklist_lock))
269                                 unlock = 1;
270         } while_each_thread(g, p);
271
272
273         printk("=============================================\n\n");
274
275         if (unlock)
276                 read_unlock(&tasklist_lock);
277 }
278
279 void rt_mutex_debug_check_no_locks_held(task_t *task)
280 {
281         struct rt_mutex_waiter *w;
282         struct list_head *curr;
283         struct rt_mutex *lock;
284
285         if (!rt_trace_on)
286                 return;
287         if (!rt_prio(task->normal_prio) && rt_prio(task->prio)) {
288                 printk("BUG: PI priority boost leaked!\n");
289                 printk_task(task);
290                 printk("\n");
291         }
292         if (list_empty(&task->held_list_head))
293                 return;
294
295         spin_lock(&task->pi_lock);
296         plist_for_each_entry(w, &task->pi_waiters, pi_list_entry) {
297                 TRACE_OFF();
298
299                 printk("hm, PI interest held at exit time? Task:\n");
300                 printk_task(task);
301                 printk_waiter(w);
302                 return;
303         }
304         spin_unlock(&task->pi_lock);
305
306         list_for_each(curr, &task->held_list_head) {
307                 lock = list_entry(curr, struct rt_mutex, held_list_entry);
308
309                 printk("BUG: %s/%d, lock held at task exit time!\n",
310                        task->comm, task->pid);
311                 printk_lock(lock, 1);
312                 if (rt_mutex_owner(lock) != task)
313                         printk("exiting task is not even the owner??\n");
314         }
315 }
316
317 int rt_mutex_debug_check_no_locks_freed(const void *from, unsigned long len)
318 {
319         const void *to = from + len;
320         struct list_head *curr;
321         struct rt_mutex *lock;
322         unsigned long flags;
323         void *lock_addr;
324
325         if (!rt_trace_on)
326                 return 0;
327
328         spin_lock_irqsave(&current->held_list_lock, flags);
329         list_for_each(curr, &current->held_list_head) {
330                 lock = list_entry(curr, struct rt_mutex, held_list_entry);
331                 lock_addr = lock;
332                 if (lock_addr < from || lock_addr >= to)
333                         continue;
334                 TRACE_OFF();
335
336                 printk("BUG: %s/%d, active lock [%p(%p-%p)] freed!\n",
337                         current->comm, current->pid, lock, from, to);
338                 dump_stack();
339                 printk_lock(lock, 1);
340                 if (rt_mutex_owner(lock) != current)
341                         printk("freeing task is not even the owner??\n");
342                 return 1;
343         }
344         spin_unlock_irqrestore(&current->held_list_lock, flags);
345
346         return 0;
347 }
348
349 void rt_mutex_debug_task_free(struct task_struct *task)
350 {
351         WARN_ON(!plist_head_empty(&task->pi_waiters));
352         WARN_ON(task->pi_blocked_on);
353 }
354
355 /*
356  * We fill out the fields in the waiter to store the information about
357  * the deadlock. We print when we return. act_waiter can be NULL in
358  * case of a remove waiter operation.
359  */
360 void debug_rt_mutex_deadlock(int detect, struct rt_mutex_waiter *act_waiter,
361                              struct rt_mutex *lock)
362 {
363         struct task_struct *task;
364
365         if (!rt_trace_on || detect || !act_waiter)
366                 return;
367
368         task = rt_mutex_owner(act_waiter->lock);
369         if (task && task != current) {
370                 act_waiter->deadlock_task_pid = task->pid;
371                 act_waiter->deadlock_lock = lock;
372         }
373 }
374
375 void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter)
376 {
377         struct task_struct *task;
378
379         if (!waiter->deadlock_lock || !rt_trace_on)
380                 return;
381
382         task = find_task_by_pid(waiter->deadlock_task_pid);
383         if (!task)
384                 return;
385
386         TRACE_OFF_NOLOCK();
387
388         printk("\n============================================\n");
389         printk(  "[ BUG: circular locking deadlock detected! ]\n");
390         printk(  "--------------------------------------------\n");
391         printk("%s/%d is deadlocking current task %s/%d\n\n",
392                task->comm, task->pid, current->comm, current->pid);
393
394         printk("\n1) %s/%d is trying to acquire this lock:\n",
395                current->comm, current->pid);
396         printk_lock(waiter->lock, 1);
397
398         printk("... trying at:                 ");
399         print_symbol("%s\n", waiter->ip);
400
401         printk("\n2) %s/%d is blocked on this lock:\n", task->comm, task->pid);
402         printk_lock(waiter->deadlock_lock, 1);
403
404         rt_mutex_show_held_locks(current, 1);
405         rt_mutex_show_held_locks(task, 1);
406
407         printk("\n%s/%d's [blocked] stackdump:\n\n", task->comm, task->pid);
408         show_stack(task, NULL);
409         printk("\n%s/%d's [current] stackdump:\n\n",
410                current->comm, current->pid);
411         dump_stack();
412         rt_mutex_show_all_locks();
413         printk("[ turning off deadlock detection."
414                "Please report this trace. ]\n\n");
415         local_irq_disable();
416 }
417
418 void debug_rt_mutex_lock(struct rt_mutex *lock __IP_DECL__)
419 {
420         unsigned long flags;
421
422         if (rt_trace_on) {
423                 TRACE_WARN_ON_LOCKED(!list_empty(&lock->held_list_entry));
424
425                 spin_lock_irqsave(&current->held_list_lock, flags);
426                 list_add_tail(&lock->held_list_entry, &current->held_list_head);
427                 spin_unlock_irqrestore(&current->held_list_lock, flags);
428
429                 lock->acquire_ip = ip;
430         }
431 }
432
433 void debug_rt_mutex_unlock(struct rt_mutex *lock)
434 {
435         unsigned long flags;
436
437         if (rt_trace_on) {
438                 TRACE_WARN_ON_LOCKED(rt_mutex_owner(lock) != current);
439                 TRACE_WARN_ON_LOCKED(list_empty(&lock->held_list_entry));
440
441                 spin_lock_irqsave(&current->held_list_lock, flags);
442                 list_del_init(&lock->held_list_entry);
443                 spin_unlock_irqrestore(&current->held_list_lock, flags);
444         }
445 }
446
447 void debug_rt_mutex_proxy_lock(struct rt_mutex *lock,
448                                struct task_struct *powner __IP_DECL__)
449 {
450         unsigned long flags;
451
452         if (rt_trace_on) {
453                 TRACE_WARN_ON_LOCKED(!list_empty(&lock->held_list_entry));
454
455                 spin_lock_irqsave(&powner->held_list_lock, flags);
456                 list_add_tail(&lock->held_list_entry, &powner->held_list_head);
457                 spin_unlock_irqrestore(&powner->held_list_lock, flags);
458
459                 lock->acquire_ip = ip;
460         }
461 }
462
463 void debug_rt_mutex_proxy_unlock(struct rt_mutex *lock)
464 {
465         unsigned long flags;
466
467         if (rt_trace_on) {
468                 struct task_struct *owner = rt_mutex_owner(lock);
469
470                 TRACE_WARN_ON_LOCKED(!owner);
471                 TRACE_WARN_ON_LOCKED(list_empty(&lock->held_list_entry));
472
473                 spin_lock_irqsave(&owner->held_list_lock, flags);
474                 list_del_init(&lock->held_list_entry);
475                 spin_unlock_irqrestore(&owner->held_list_lock, flags);
476         }
477 }
478
479 void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter)
480 {
481         memset(waiter, 0x11, sizeof(*waiter));
482         plist_node_init(&waiter->list_entry, MAX_PRIO);
483         plist_node_init(&waiter->pi_list_entry, MAX_PRIO);
484 }
485
486 void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter)
487 {
488         TRACE_WARN_ON(!plist_node_empty(&waiter->list_entry));
489         TRACE_WARN_ON(!plist_node_empty(&waiter->pi_list_entry));
490         TRACE_WARN_ON(waiter->task);
491         memset(waiter, 0x22, sizeof(*waiter));
492 }
493
494 void debug_rt_mutex_init(struct rt_mutex *lock, const char *name)
495 {
496         void *addr = lock;
497
498         if (rt_trace_on) {
499                 rt_mutex_debug_check_no_locks_freed(addr,
500                                                     sizeof(struct rt_mutex));
501                 INIT_LIST_HEAD(&lock->held_list_entry);
502                 lock->name = name;
503         }
504 }
505
506 void rt_mutex_deadlock_account_lock(struct rt_mutex *lock, task_t *task)
507 {
508 }
509
510 void rt_mutex_deadlock_account_unlock(struct task_struct *task)
511 {
512 }
513