2 * kernel/lockdep_proc.c
4 * Runtime locking correctness validator
6 * Started by Ingo Molnar:
8 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
9 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
11 * Code for /proc/lockdep and /proc/lockdep_stats:
14 #include <linux/module.h>
15 #include <linux/proc_fs.h>
16 #include <linux/seq_file.h>
17 #include <linux/kallsyms.h>
18 #include <linux/debug_locks.h>
19 #include <linux/vmalloc.h>
20 #include <linux/sort.h>
21 #include <asm/uaccess.h>
22 #include <asm/div64.h>
24 #include "lockdep_internals.h"
26 static void *l_next(struct seq_file *m, void *v, loff_t *pos)
28 struct lock_class *class = v;
32 if (class->lock_entry.next != &all_lock_classes)
33 class = list_entry(class->lock_entry.next, struct lock_class,
42 static void *l_start(struct seq_file *m, loff_t *pos)
44 struct lock_class *class = m->private;
46 if (&class->lock_entry == all_lock_classes.next)
47 seq_printf(m, "all lock classes:\n");
52 static void l_stop(struct seq_file *m, void *v)
56 static unsigned long count_forward_deps(struct lock_class *class)
58 struct lock_list *entry;
59 unsigned long ret = 1;
62 * Recurse this class's dependency list:
64 list_for_each_entry(entry, &class->locks_after, entry)
65 ret += count_forward_deps(entry->class);
70 static unsigned long count_backward_deps(struct lock_class *class)
72 struct lock_list *entry;
73 unsigned long ret = 1;
76 * Recurse this class's dependency list:
78 list_for_each_entry(entry, &class->locks_before, entry)
79 ret += count_backward_deps(entry->class);
84 static void print_name(struct seq_file *m, struct lock_class *class)
87 const char *name = class->name;
90 name = __get_key_name(class->key, str);
91 seq_printf(m, "%s", name);
93 seq_printf(m, "%s", name);
94 if (class->name_version > 1)
95 seq_printf(m, "#%d", class->name_version);
97 seq_printf(m, "/%d", class->subclass);
101 static int l_show(struct seq_file *m, void *v)
103 unsigned long nr_forward_deps, nr_backward_deps;
104 struct lock_class *class = m->private;
105 struct lock_list *entry;
108 seq_printf(m, "%p", class->key);
109 #ifdef CONFIG_DEBUG_LOCKDEP
110 seq_printf(m, " OPS:%8ld", class->ops);
112 nr_forward_deps = count_forward_deps(class);
113 seq_printf(m, " FD:%5ld", nr_forward_deps);
115 nr_backward_deps = count_backward_deps(class);
116 seq_printf(m, " BD:%5ld", nr_backward_deps);
118 get_usage_chars(class, &c1, &c2, &c3, &c4);
119 seq_printf(m, " %c%c%c%c", c1, c2, c3, c4);
122 print_name(m, class);
125 list_for_each_entry(entry, &class->locks_after, entry) {
126 if (entry->distance == 1) {
127 seq_printf(m, " -> [%p] ", entry->class);
128 print_name(m, entry->class);
137 static const struct seq_operations lockdep_ops = {
144 static int lockdep_open(struct inode *inode, struct file *file)
146 int res = seq_open(file, &lockdep_ops);
148 struct seq_file *m = file->private_data;
150 if (!list_empty(&all_lock_classes))
151 m->private = list_entry(all_lock_classes.next,
152 struct lock_class, lock_entry);
159 static const struct file_operations proc_lockdep_operations = {
160 .open = lockdep_open,
163 .release = seq_release,
166 static void lockdep_stats_debug_show(struct seq_file *m)
168 #ifdef CONFIG_DEBUG_LOCKDEP
169 unsigned int hi1 = debug_atomic_read(&hardirqs_on_events),
170 hi2 = debug_atomic_read(&hardirqs_off_events),
171 hr1 = debug_atomic_read(&redundant_hardirqs_on),
172 hr2 = debug_atomic_read(&redundant_hardirqs_off),
173 si1 = debug_atomic_read(&softirqs_on_events),
174 si2 = debug_atomic_read(&softirqs_off_events),
175 sr1 = debug_atomic_read(&redundant_softirqs_on),
176 sr2 = debug_atomic_read(&redundant_softirqs_off);
178 seq_printf(m, " chain lookup misses: %11u\n",
179 debug_atomic_read(&chain_lookup_misses));
180 seq_printf(m, " chain lookup hits: %11u\n",
181 debug_atomic_read(&chain_lookup_hits));
182 seq_printf(m, " cyclic checks: %11u\n",
183 debug_atomic_read(&nr_cyclic_checks));
184 seq_printf(m, " cyclic-check recursions: %11u\n",
185 debug_atomic_read(&nr_cyclic_check_recursions));
186 seq_printf(m, " find-mask forwards checks: %11u\n",
187 debug_atomic_read(&nr_find_usage_forwards_checks));
188 seq_printf(m, " find-mask forwards recursions: %11u\n",
189 debug_atomic_read(&nr_find_usage_forwards_recursions));
190 seq_printf(m, " find-mask backwards checks: %11u\n",
191 debug_atomic_read(&nr_find_usage_backwards_checks));
192 seq_printf(m, " find-mask backwards recursions:%11u\n",
193 debug_atomic_read(&nr_find_usage_backwards_recursions));
195 seq_printf(m, " hardirq on events: %11u\n", hi1);
196 seq_printf(m, " hardirq off events: %11u\n", hi2);
197 seq_printf(m, " redundant hardirq ons: %11u\n", hr1);
198 seq_printf(m, " redundant hardirq offs: %11u\n", hr2);
199 seq_printf(m, " softirq on events: %11u\n", si1);
200 seq_printf(m, " softirq off events: %11u\n", si2);
201 seq_printf(m, " redundant softirq ons: %11u\n", sr1);
202 seq_printf(m, " redundant softirq offs: %11u\n", sr2);
206 static int lockdep_stats_show(struct seq_file *m, void *v)
208 struct lock_class *class;
209 unsigned long nr_unused = 0, nr_uncategorized = 0,
210 nr_irq_safe = 0, nr_irq_unsafe = 0,
211 nr_softirq_safe = 0, nr_softirq_unsafe = 0,
212 nr_hardirq_safe = 0, nr_hardirq_unsafe = 0,
213 nr_irq_read_safe = 0, nr_irq_read_unsafe = 0,
214 nr_softirq_read_safe = 0, nr_softirq_read_unsafe = 0,
215 nr_hardirq_read_safe = 0, nr_hardirq_read_unsafe = 0,
216 sum_forward_deps = 0, factor = 0;
218 list_for_each_entry(class, &all_lock_classes, lock_entry) {
220 if (class->usage_mask == 0)
222 if (class->usage_mask == LOCKF_USED)
224 if (class->usage_mask & LOCKF_USED_IN_IRQ)
226 if (class->usage_mask & LOCKF_ENABLED_IRQS)
228 if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ)
230 if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS)
232 if (class->usage_mask & LOCKF_USED_IN_HARDIRQ)
234 if (class->usage_mask & LOCKF_ENABLED_HARDIRQS)
236 if (class->usage_mask & LOCKF_USED_IN_IRQ_READ)
238 if (class->usage_mask & LOCKF_ENABLED_IRQS_READ)
239 nr_irq_read_unsafe++;
240 if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ_READ)
241 nr_softirq_read_safe++;
242 if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS_READ)
243 nr_softirq_read_unsafe++;
244 if (class->usage_mask & LOCKF_USED_IN_HARDIRQ_READ)
245 nr_hardirq_read_safe++;
246 if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ)
247 nr_hardirq_read_unsafe++;
249 sum_forward_deps += count_forward_deps(class);
251 #ifdef CONFIG_DEBUG_LOCKDEP
252 DEBUG_LOCKS_WARN_ON(debug_atomic_read(&nr_unused_locks) != nr_unused);
254 seq_printf(m, " lock-classes: %11lu [max: %lu]\n",
255 nr_lock_classes, MAX_LOCKDEP_KEYS);
256 seq_printf(m, " direct dependencies: %11lu [max: %lu]\n",
257 nr_list_entries, MAX_LOCKDEP_ENTRIES);
258 seq_printf(m, " indirect dependencies: %11lu\n",
262 * Total number of dependencies:
264 * All irq-safe locks may nest inside irq-unsafe locks,
265 * plus all the other known dependencies:
267 seq_printf(m, " all direct dependencies: %11lu\n",
268 nr_irq_unsafe * nr_irq_safe +
269 nr_hardirq_unsafe * nr_hardirq_safe +
273 * Estimated factor between direct and indirect
277 factor = sum_forward_deps / nr_list_entries;
279 #ifdef CONFIG_PROVE_LOCKING
280 seq_printf(m, " dependency chains: %11lu [max: %lu]\n",
281 nr_lock_chains, MAX_LOCKDEP_CHAINS);
284 #ifdef CONFIG_TRACE_IRQFLAGS
285 seq_printf(m, " in-hardirq chains: %11u\n",
287 seq_printf(m, " in-softirq chains: %11u\n",
290 seq_printf(m, " in-process chains: %11u\n",
292 seq_printf(m, " stack-trace entries: %11lu [max: %lu]\n",
293 nr_stack_trace_entries, MAX_STACK_TRACE_ENTRIES);
294 seq_printf(m, " combined max dependencies: %11u\n",
295 (nr_hardirq_chains + 1) *
296 (nr_softirq_chains + 1) *
297 (nr_process_chains + 1)
299 seq_printf(m, " hardirq-safe locks: %11lu\n",
301 seq_printf(m, " hardirq-unsafe locks: %11lu\n",
303 seq_printf(m, " softirq-safe locks: %11lu\n",
305 seq_printf(m, " softirq-unsafe locks: %11lu\n",
307 seq_printf(m, " irq-safe locks: %11lu\n",
309 seq_printf(m, " irq-unsafe locks: %11lu\n",
312 seq_printf(m, " hardirq-read-safe locks: %11lu\n",
313 nr_hardirq_read_safe);
314 seq_printf(m, " hardirq-read-unsafe locks: %11lu\n",
315 nr_hardirq_read_unsafe);
316 seq_printf(m, " softirq-read-safe locks: %11lu\n",
317 nr_softirq_read_safe);
318 seq_printf(m, " softirq-read-unsafe locks: %11lu\n",
319 nr_softirq_read_unsafe);
320 seq_printf(m, " irq-read-safe locks: %11lu\n",
322 seq_printf(m, " irq-read-unsafe locks: %11lu\n",
325 seq_printf(m, " uncategorized locks: %11lu\n",
327 seq_printf(m, " unused locks: %11lu\n",
329 seq_printf(m, " max locking depth: %11u\n",
331 seq_printf(m, " max recursion depth: %11u\n",
332 max_recursion_depth);
333 lockdep_stats_debug_show(m);
334 seq_printf(m, " debug_locks: %11u\n",
340 static int lockdep_stats_open(struct inode *inode, struct file *file)
342 return single_open(file, lockdep_stats_show, NULL);
345 static const struct file_operations proc_lockdep_stats_operations = {
346 .open = lockdep_stats_open,
349 .release = seq_release,
352 #ifdef CONFIG_LOCK_STAT
354 struct lock_stat_data {
355 struct lock_class *class;
356 struct lock_class_stats stats;
359 struct lock_stat_seq {
360 struct lock_stat_data *iter;
361 struct lock_stat_data *iter_end;
362 struct lock_stat_data stats[MAX_LOCKDEP_KEYS];
366 * sort on absolute number of contentions
368 static int lock_stat_cmp(const void *l, const void *r)
370 const struct lock_stat_data *dl = l, *dr = r;
371 unsigned long nl, nr;
373 nl = dl->stats.read_waittime.nr + dl->stats.write_waittime.nr;
374 nr = dr->stats.read_waittime.nr + dr->stats.write_waittime.nr;
379 static void seq_line(struct seq_file *m, char c, int offset, int length)
383 for (i = 0; i < offset; i++)
385 for (i = 0; i < length; i++)
386 seq_printf(m, "%c", c);
390 static void snprint_time(char *buf, size_t bufsiz, s64 nr)
394 rem = do_div(nr, 1000); /* XXX: do_div_signed */
395 snprintf(buf, bufsiz, "%lld.%02d", (long long)nr, ((int)rem+5)/10);
398 static void seq_time(struct seq_file *m, s64 time)
402 snprint_time(num, sizeof(num), time);
403 seq_printf(m, " %14s", num);
406 static void seq_lock_time(struct seq_file *m, struct lock_time *lt)
408 seq_printf(m, "%14lu", lt->nr);
409 seq_time(m, lt->min);
410 seq_time(m, lt->max);
411 seq_time(m, lt->total);
414 static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
417 struct lock_class *class;
418 struct lock_class_stats *stats;
422 stats = &data->stats;
425 if (class->name_version > 1)
426 namelen -= 2; /* XXX truncates versions > 9 */
431 char str[KSYM_NAME_LEN];
432 const char *key_name;
434 key_name = __get_key_name(class->key, str);
435 snprintf(name, namelen, "%s", key_name);
437 snprintf(name, namelen, "%s", class->name);
439 namelen = strlen(name);
440 if (class->name_version > 1) {
441 snprintf(name+namelen, 3, "#%d", class->name_version);
444 if (class->subclass) {
445 snprintf(name+namelen, 3, "/%d", class->subclass);
449 if (stats->write_holdtime.nr) {
450 if (stats->read_holdtime.nr)
451 seq_printf(m, "%38s-W:", name);
453 seq_printf(m, "%40s:", name);
455 seq_printf(m, "%14lu ", stats->bounces[bounce_contended_write]);
456 seq_lock_time(m, &stats->write_waittime);
457 seq_printf(m, " %14lu ", stats->bounces[bounce_acquired_write]);
458 seq_lock_time(m, &stats->write_holdtime);
462 if (stats->read_holdtime.nr) {
463 seq_printf(m, "%38s-R:", name);
464 seq_printf(m, "%14lu ", stats->bounces[bounce_contended_read]);
465 seq_lock_time(m, &stats->read_waittime);
466 seq_printf(m, " %14lu ", stats->bounces[bounce_acquired_read]);
467 seq_lock_time(m, &stats->read_holdtime);
471 if (stats->read_waittime.nr + stats->write_waittime.nr == 0)
474 if (stats->read_holdtime.nr)
477 for (i = 0; i < ARRAY_SIZE(class->contention_point); i++) {
478 char sym[KSYM_SYMBOL_LEN];
481 if (class->contention_point[i] == 0)
485 seq_line(m, '-', 40-namelen, namelen);
487 sprint_symbol(sym, class->contention_point[i]);
488 snprintf(ip, sizeof(ip), "[<%p>]",
489 (void *)class->contention_point[i]);
490 seq_printf(m, "%40s %14lu %29s %s\n", name,
491 stats->contention_point[i],
496 seq_line(m, '.', 0, 40 + 1 + 10 * (14 + 1));
501 static void seq_header(struct seq_file *m)
503 seq_printf(m, "lock_stat version 0.2\n");
504 seq_line(m, '-', 0, 40 + 1 + 10 * (14 + 1));
505 seq_printf(m, "%40s %14s %14s %14s %14s %14s %14s %14s %14s "
518 seq_line(m, '-', 0, 40 + 1 + 10 * (14 + 1));
522 static void *ls_start(struct seq_file *m, loff_t *pos)
524 struct lock_stat_seq *data = m->private;
526 if (data->iter == data->stats)
529 if (data->iter == data->iter_end)
535 static void *ls_next(struct seq_file *m, void *v, loff_t *pos)
537 struct lock_stat_seq *data = m->private;
543 if (data->iter == data->iter_end)
549 static void ls_stop(struct seq_file *m, void *v)
553 static int ls_show(struct seq_file *m, void *v)
555 struct lock_stat_seq *data = m->private;
557 seq_stats(m, data->iter);
561 static struct seq_operations lockstat_ops = {
568 static int lock_stat_open(struct inode *inode, struct file *file)
571 struct lock_class *class;
572 struct lock_stat_seq *data = vmalloc(sizeof(struct lock_stat_seq));
577 res = seq_open(file, &lockstat_ops);
579 struct lock_stat_data *iter = data->stats;
580 struct seq_file *m = file->private_data;
583 list_for_each_entry(class, &all_lock_classes, lock_entry) {
585 iter->stats = lock_stats(class);
588 data->iter_end = iter;
590 sort(data->stats, data->iter_end - data->iter,
591 sizeof(struct lock_stat_data),
592 lock_stat_cmp, NULL);
601 static ssize_t lock_stat_write(struct file *file, const char __user *buf,
602 size_t count, loff_t *ppos)
604 struct lock_class *class;
608 if (get_user(c, buf))
614 list_for_each_entry(class, &all_lock_classes, lock_entry)
615 clear_lock_stats(class);
620 static int lock_stat_release(struct inode *inode, struct file *file)
622 struct seq_file *seq = file->private_data;
626 return seq_release(inode, file);
629 static const struct file_operations proc_lock_stat_operations = {
630 .open = lock_stat_open,
631 .write = lock_stat_write,
634 .release = lock_stat_release,
636 #endif /* CONFIG_LOCK_STAT */
638 static int __init lockdep_proc_init(void)
640 struct proc_dir_entry *entry;
642 entry = create_proc_entry("lockdep", S_IRUSR, NULL);
644 entry->proc_fops = &proc_lockdep_operations;
646 entry = create_proc_entry("lockdep_stats", S_IRUSR, NULL);
648 entry->proc_fops = &proc_lockdep_stats_operations;
650 #ifdef CONFIG_LOCK_STAT
651 entry = create_proc_entry("lock_stat", S_IRUSR, NULL);
653 entry->proc_fops = &proc_lock_stat_operations;
659 __initcall(lockdep_proc_init);