#include <linux/stacktrace.h>
#include <linux/debug_locks.h>
#include <linux/irqflags.h>
+#include <linux/utsname.h>
#include <asm/sections.h>
#include "lockdep_internals.h"
/*
- * hash_lock: protects the lockdep hashes and class/list/hash allocators.
+ * lockdep_lock: protects the lockdep graph, the hashes and the
+ * class/list/hash allocators.
*
* This is one of the rare exceptions where it's justified
* to use a raw spinlock - we really dont want the spinlock
- * code to recurse back into the lockdep code.
+ * code to recurse back into the lockdep code...
*/
-static raw_spinlock_t hash_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+static raw_spinlock_t lockdep_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+
+static int graph_lock(void)
+{
+ __raw_spin_lock(&lockdep_lock);
+ /*
+ * Make sure that if another CPU detected a bug while
+ * walking the graph we dont change it (while the other
+ * CPU is busy printing out stuff with the graph lock
+ * dropped already)
+ */
+ if (!debug_locks) {
+ __raw_spin_unlock(&lockdep_lock);
+ return 0;
+ }
+ return 1;
+}
+
+static inline int graph_unlock(void)
+{
+ __raw_spin_unlock(&lockdep_lock);
+ return 0;
+}
+
+/*
+ * Turn lock debugging off and return with 0 if it was off already,
+ * and also release the graph lock:
+ */
+static inline int debug_locks_off_graph_unlock(void)
+{
+ int ret = debug_locks_off();
+
+ __raw_spin_unlock(&lockdep_lock);
+
+ return ret;
+}
static int lockdep_initialized;
static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
/*
- * Allocate a lockdep entry. (assumes hash_lock held, returns
+ * Allocate a lockdep entry. (assumes the graph_lock held, returns
* with NULL on failure)
*/
static struct lock_list *alloc_list_entry(void)
{
if (nr_list_entries >= MAX_LOCKDEP_ENTRIES) {
- __raw_spin_unlock(&hash_lock);
- debug_locks_off();
+ if (!debug_locks_off_graph_unlock())
+ return NULL;
+
printk("BUG: MAX_LOCKDEP_ENTRIES too low!\n");
printk("turning off the locking correctness validator.\n");
return NULL;
* unique.
*/
#define iterate_chain_key(key1, key2) \
- (((key1) << MAX_LOCKDEP_KEYS_BITS/2) ^ \
- ((key1) >> (64-MAX_LOCKDEP_KEYS_BITS/2)) ^ \
+ (((key1) << MAX_LOCKDEP_KEYS_BITS) ^ \
+ ((key1) >> (64-MAX_LOCKDEP_KEYS_BITS)) ^ \
(key2))
void lockdep_off(void)
EXPORT_SYMBOL(lockdep_on);
-int lockdep_internal(void)
-{
- return current->lockdep_recursion != 0;
-}
-
-EXPORT_SYMBOL(lockdep_internal);
-
/*
* Debugging switches:
*/
#define VERBOSE 0
-#ifdef VERBOSE
-# define VERY_VERBOSE 0
-#endif
+#define VERY_VERBOSE 0
#if VERBOSE
# define HARDIRQ_VERBOSE 1
*/
static int class_filter(struct lock_class *class)
{
+#if 0
+ /* Example */
if (class->name_version == 1 &&
- !strcmp(class->name, "&rl->lock"))
- return 1;
- if (class->name_version == 1 &&
- !strcmp(class->name, "&ni->mrec_lock"))
- return 1;
- if (class->name_version == 1 &&
- !strcmp(class->name, "mft_ni_runlist_lock"))
- return 1;
- if (class->name_version == 1 &&
- !strcmp(class->name, "mft_ni_mrec_lock"))
+ !strcmp(class->name, "lockname"))
return 1;
if (class->name_version == 1 &&
- !strcmp(class->name, "&vol->lcnbmp_lock"))
+ !strcmp(class->name, "&struct->lockfield"))
return 1;
+#endif
+ /* Filter everything else. 1 would be to allow everything else */
return 0;
}
#endif
/*
* Stack-trace: tightly packed array of stack backtrace
- * addresses. Protected by the hash_lock.
+ * addresses. Protected by the graph_lock.
*/
unsigned long nr_stack_trace_entries;
static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES];
trace->max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries;
trace->entries = stack_trace + nr_stack_trace_entries;
- save_stack_trace(trace, NULL, 0, 3);
+ trace->skip = 3;
+ trace->all_contexts = 0;
+
+ save_stack_trace(trace, NULL);
trace->max_entries = trace->nr_entries;
nr_stack_trace_entries += trace->nr_entries;
- if (DEBUG_LOCKS_WARN_ON(nr_stack_trace_entries > MAX_STACK_TRACE_ENTRIES))
- return 0;
if (nr_stack_trace_entries == MAX_STACK_TRACE_ENTRIES) {
- __raw_spin_unlock(&hash_lock);
- if (debug_locks_off()) {
- printk("BUG: MAX_STACK_TRACE_ENTRIES too low!\n");
- printk("turning off the locking correctness validator.\n");
- dump_stack();
- }
+ if (!debug_locks_off_graph_unlock())
+ return 0;
+
+ printk("BUG: MAX_STACK_TRACE_ENTRIES too low!\n");
+ printk("turning off the locking correctness validator.\n");
+ dump_stack();
+
return 0;
}
static void print_lock_name(struct lock_class *class)
{
- char str[128], c1, c2, c3, c4;
+ char str[KSYM_NAME_LEN + 1], c1, c2, c3, c4;
const char *name;
get_usage_chars(class, &c1, &c2, &c3, &c4);
static void print_lockdep_cache(struct lockdep_map *lock)
{
const char *name;
- char str[128];
+ char str[KSYM_NAME_LEN + 1];
name = lock->name;
if (!name)
print_lock(curr->held_locks + i);
}
}
-/*
- * Helper to print a nice hierarchy of lock dependencies:
- */
-static void print_spaces(int nr)
-{
- int i;
-
- for (i = 0; i < nr; i++)
- printk(" ");
-}
static void print_lock_class_header(struct lock_class *class, int depth)
{
int bit;
- print_spaces(depth);
- printk("->");
+ printk("%*s->", depth, "");
print_lock_name(class);
printk(" ops: %lu", class->ops);
printk(" {\n");
if (class->usage_mask & (1 << bit)) {
int len = depth;
- print_spaces(depth);
- len += printk(" %s", usage_str[bit]);
+ len += printk("%*s %s", depth, "", usage_str[bit]);
len += printk(" at:\n");
print_stack_trace(class->usage_traces + bit, len);
}
}
- print_spaces(depth);
- printk(" }\n");
+ printk("%*s }\n", depth, "");
- print_spaces(depth);
- printk(" ... key at: ");
+ printk("%*s ... key at: ",depth,"");
print_ip_sym((unsigned long)class->key);
}
print_lock_class_header(class, depth);
list_for_each_entry(entry, &class->locks_after, entry) {
- DEBUG_LOCKS_WARN_ON(!entry->class);
+ if (DEBUG_LOCKS_WARN_ON(!entry->class))
+ return;
+
print_lock_dependencies(entry->class, depth + 1);
- print_spaces(depth);
- printk(" ... acquired at:\n");
+ printk("%*s ... acquired at:\n",depth,"");
print_stack_trace(&entry->trace, 2);
printk("\n");
}
return 0;
entry->class = this;
- save_trace(&entry->trace);
+ if (!save_trace(&entry->trace))
+ return 0;
/*
* Since we never remove from the dependency list, the list can
return 0;
}
+static void print_kernel_version(void)
+{
+ printk("%s %.*s\n", init_utsname()->release,
+ (int)strcspn(init_utsname()->version, " "),
+ init_utsname()->version);
+}
+
/*
* When a circular dependency is detected, print the
* header first:
{
struct task_struct *curr = current;
- __raw_spin_unlock(&hash_lock);
- debug_locks_off();
- if (debug_locks_silent)
+ if (!debug_locks_off_graph_unlock() || debug_locks_silent)
return 0;
printk("\n=======================================================\n");
printk( "[ INFO: possible circular locking dependency detected ]\n");
+ print_kernel_version();
printk( "-------------------------------------------------------\n");
printk("%s/%d is trying to acquire lock:\n",
curr->comm, curr->pid);
return 0;
this.class = check_source->class;
- save_trace(&this.trace);
+ if (!save_trace(&this.trace))
+ return 0;
+
print_circular_bug_entry(&this, 0);
printk("\nother info that might help us debug this:\n\n");
return 0;
}
+#define RECURSION_LIMIT 40
+
static int noinline print_infinite_recursion_bug(void)
{
- __raw_spin_unlock(&hash_lock);
- DEBUG_LOCKS_WARN_ON(1);
+ if (!debug_locks_off_graph_unlock())
+ return 0;
+
+ WARN_ON(1);
return 0;
}
debug_atomic_inc(&nr_cyclic_check_recursions);
if (depth > max_recursion_depth)
max_recursion_depth = depth;
- if (depth >= 20)
+ if (depth >= RECURSION_LIMIT)
return print_infinite_recursion_bug();
/*
* Check this lock's dependency list:
if (depth > max_recursion_depth)
max_recursion_depth = depth;
- if (depth >= 20)
+ if (depth >= RECURSION_LIMIT)
return print_infinite_recursion_bug();
debug_atomic_inc(&nr_find_usage_forwards_checks);
if (depth > max_recursion_depth)
max_recursion_depth = depth;
- if (depth >= 20)
+ if (depth >= RECURSION_LIMIT)
return print_infinite_recursion_bug();
debug_atomic_inc(&nr_find_usage_backwards_checks);
enum lock_usage_bit bit2,
const char *irqclass)
{
- __raw_spin_unlock(&hash_lock);
- debug_locks_off();
- if (debug_locks_silent)
+ if (!debug_locks_off_graph_unlock() || debug_locks_silent)
return 0;
printk("\n======================================================\n");
printk( "[ INFO: %s-safe -> %s-unsafe lock order detected ]\n",
irqclass, irqclass);
+ print_kernel_version();
printk( "------------------------------------------------------\n");
printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n",
curr->comm, curr->pid,
print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
struct held_lock *next)
{
- debug_locks_off();
- __raw_spin_unlock(&hash_lock);
- if (debug_locks_silent)
+ if (!debug_locks_off_graph_unlock() || debug_locks_silent)
return 0;
printk("\n=============================================\n");
printk( "[ INFO: possible recursive locking detected ]\n");
+ print_kernel_version();
printk( "---------------------------------------------\n");
printk("%s/%d is trying to acquire lock:\n",
curr->comm, curr->pid);
&prev->class->locks_after, next->acquire_ip);
if (!ret)
return 0;
- /*
- * Return value of 2 signals 'dependency already added',
- * in that case we dont have to add the backlink either.
- */
- if (ret == 2)
- return 2;
+
ret = add_lock_to_list(next->class, prev->class,
&next->class->locks_before, next->acquire_ip);
+ if (!ret)
+ return 0;
/*
* Debugging printouts:
*/
if (verbose(prev->class) || verbose(next->class)) {
- __raw_spin_unlock(&hash_lock);
+ graph_unlock();
printk("\n new dependency: ");
print_lock_name(prev->class);
printk(" => ");
print_lock_name(next->class);
printk("\n");
dump_stack();
- __raw_spin_lock(&hash_lock);
+ return graph_lock();
}
return 1;
}
* added:
*/
if (hlock->read != 2) {
- check_prev_add(curr, hlock, next);
+ if (!check_prev_add(curr, hlock, next))
+ return 0;
/*
* Stop after the first non-trylock entry,
* as non-trylock entries have added their
}
return 1;
out_bug:
- __raw_spin_unlock(&hash_lock);
- DEBUG_LOCKS_WARN_ON(1);
+ if (!debug_locks_off_graph_unlock())
+ return 0;
+
+ WARN_ON(1);
return 0;
}
*/
for_each_possible_cpu(i) {
start = (unsigned long) &__per_cpu_start + per_cpu_offset(i);
- end = (unsigned long) &__per_cpu_end + per_cpu_offset(i);
+ end = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM
+ + per_cpu_offset(i);
if ((addr >= start) && (addr < end))
return 1;
return count + 1;
}
-extern void __error_too_big_MAX_LOCKDEP_SUBCLASSES(void);
-
/*
* Register a lock's class in the hash-table, if the class is not present
* yet. Otherwise we look it up. We cache the result in the lock object
* itself, so actual lookup of the hash should be once per lock object.
*/
static inline struct lock_class *
-register_lock_class(struct lockdep_map *lock, unsigned int subclass)
+look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
{
struct lockdep_subclass_key *key;
struct list_head *hash_head;
* (or spin_lock_init()) call - which acts as the key. For static
* locks we use the lock object itself as the key.
*/
- if (sizeof(struct lock_class_key) > sizeof(struct lock_class))
- __error_too_big_MAX_LOCKDEP_SUBCLASSES();
+ BUILD_BUG_ON(sizeof(struct lock_class_key) > sizeof(struct lock_class));
key = lock->key->subkeys + subclass;
*/
list_for_each_entry(class, hash_head, hash_entry)
if (class->key == key)
- goto out_set;
+ return class;
+
+ return NULL;
+}
+
+/*
+ * Register a lock's class in the hash-table, if the class is not present
+ * yet. Otherwise we look it up. We cache the result in the lock object
+ * itself, so actual lookup of the hash should be once per lock object.
+ */
+static inline struct lock_class *
+register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
+{
+ struct lockdep_subclass_key *key;
+ struct list_head *hash_head;
+ struct lock_class *class;
+ unsigned long flags;
+
+ class = look_up_lock_class(lock, subclass);
+ if (likely(class))
+ return class;
/*
* Debug-check: all keys must be persistent!
return NULL;
}
- __raw_spin_lock(&hash_lock);
+ key = lock->key->subkeys + subclass;
+ hash_head = classhashentry(key);
+
+ raw_local_irq_save(flags);
+ if (!graph_lock()) {
+ raw_local_irq_restore(flags);
+ return NULL;
+ }
/*
* We have to do the hash-walk again, to avoid races
* with another CPU:
* the hash:
*/
if (nr_lock_classes >= MAX_LOCKDEP_KEYS) {
- __raw_spin_unlock(&hash_lock);
- debug_locks_off();
+ if (!debug_locks_off_graph_unlock()) {
+ raw_local_irq_restore(flags);
+ return NULL;
+ }
+ raw_local_irq_restore(flags);
+
printk("BUG: MAX_LOCKDEP_KEYS too low!\n");
printk("turning off the locking correctness validator.\n");
return NULL;
list_add_tail_rcu(&class->hash_entry, hash_head);
if (verbose(class)) {
- __raw_spin_unlock(&hash_lock);
+ graph_unlock();
+ raw_local_irq_restore(flags);
+
printk("\nnew class %p: %s", class->key, class->name);
if (class->name_version > 1)
printk("#%d", class->name_version);
printk("\n");
dump_stack();
- __raw_spin_lock(&hash_lock);
+
+ raw_local_irq_save(flags);
+ if (!graph_lock()) {
+ raw_local_irq_restore(flags);
+ return NULL;
+ }
}
out_unlock_set:
- __raw_spin_unlock(&hash_lock);
+ graph_unlock();
+ raw_local_irq_restore(flags);
-out_set:
- lock->class[subclass] = class;
+ if (!subclass || force)
+ lock->class_cache = class;
DEBUG_LOCKS_WARN_ON(class->subclass != subclass);
* add it and return 0 - in this case the new dependency chain is
* validated. If the key is already hashed, return 1.
*/
-static inline int lookup_chain_cache(u64 chain_key)
+static inline int lookup_chain_cache(u64 chain_key, struct lock_class *class)
{
struct list_head *hash_head = chainhashentry(chain_key);
struct lock_chain *chain;
if (chain->chain_key == chain_key) {
cache_hit:
debug_atomic_inc(&chain_lookup_hits);
- /*
- * In the debugging case, force redundant checking
- * by returning 1:
- */
-#ifdef CONFIG_DEBUG_LOCKDEP
- __raw_spin_lock(&hash_lock);
- return 1;
-#endif
+ if (very_verbose(class))
+ printk("\nhash chain already cached, key: "
+ "%016Lx tail class: [%p] %s\n",
+ (unsigned long long)chain_key,
+ class->key, class->name);
return 0;
}
}
+ if (very_verbose(class))
+ printk("\nnew hash chain, key: %016Lx tail class: [%p] %s\n",
+ (unsigned long long)chain_key, class->key, class->name);
/*
* Allocate a new chain entry from the static array, and add
* it to the hash:
*/
- __raw_spin_lock(&hash_lock);
+ if (!graph_lock())
+ return 0;
/*
* We have to walk the chain again locked - to avoid duplicates:
*/
list_for_each_entry(chain, hash_head, entry) {
if (chain->chain_key == chain_key) {
- __raw_spin_unlock(&hash_lock);
+ graph_unlock();
goto cache_hit;
}
}
if (unlikely(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) {
- __raw_spin_unlock(&hash_lock);
- debug_locks_off();
+ if (!debug_locks_off_graph_unlock())
+ return 0;
+
printk("BUG: MAX_LOCKDEP_CHAINS too low!\n");
printk("turning off the locking correctness validator.\n");
return 0;
struct held_lock *this, int forwards,
const char *irqclass)
{
- __raw_spin_unlock(&hash_lock);
- debug_locks_off();
- if (debug_locks_silent)
+ if (!debug_locks_off_graph_unlock() || debug_locks_silent)
return 0;
printk("\n=========================================================\n");
printk( "[ INFO: possible irq lock inversion dependency detected ]\n");
+ print_kernel_version();
printk( "---------------------------------------------------------\n");
printk("%s/%d just changed the state of lock:\n",
curr->comm, curr->pid);
return print_irq_inversion_bug(curr, backwards_match, this, 0, irqclass);
}
-static inline void print_irqtrace_events(struct task_struct *curr)
+void print_irqtrace_events(struct task_struct *curr)
{
printk("irq event stamp: %u\n", curr->irq_events);
printk("hardirqs last enabled at (%u): ", curr->hardirq_enable_event);
print_ip_sym(curr->softirq_disable_ip);
}
-#else
-static inline void print_irqtrace_events(struct task_struct *curr)
-{
-}
#endif
static int
print_usage_bug(struct task_struct *curr, struct held_lock *this,
enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit)
{
- __raw_spin_unlock(&hash_lock);
- debug_locks_off();
- if (debug_locks_silent)
+ if (!debug_locks_off_graph_unlock() || debug_locks_silent)
return 0;
printk("\n=================================\n");
printk( "[ INFO: inconsistent lock state ]\n");
+ print_kernel_version();
printk( "---------------------------------\n");
printk("inconsistent {%s} -> {%s} usage.\n",
if (likely(this->class->usage_mask & new_mask))
return 1;
- __raw_spin_lock(&hash_lock);
+ if (!graph_lock())
+ return 0;
/*
* Make sure we didnt race:
*/
if (unlikely(this->class->usage_mask & new_mask)) {
- __raw_spin_unlock(&hash_lock);
+ graph_unlock();
return 1;
}
debug_atomic_dec(&nr_unused_locks);
break;
default:
- debug_locks_off();
+ if (!debug_locks_off_graph_unlock())
+ return 0;
WARN_ON(1);
return 0;
}
- __raw_spin_unlock(&hash_lock);
+ graph_unlock();
/*
- * We must printk outside of the hash_lock:
+ * We must printk outside of the graph_lock:
*/
if (ret == 2) {
printk("\nmarked lock as {%s}:\n", usage_str[new_bit]);
* Initialize a lock instance's lock-class mapping info:
*/
void lockdep_init_map(struct lockdep_map *lock, const char *name,
- struct lock_class_key *key)
+ struct lock_class_key *key, int subclass)
{
if (unlikely(!debug_locks))
return;
}
lock->name = name;
lock->key = key;
- memset(lock->class, 0, sizeof(lock->class[0])*MAX_LOCKDEP_SUBCLASSES);
+ lock->class_cache = NULL;
+ if (subclass)
+ register_lock_class(lock, subclass, 1);
}
EXPORT_SYMBOL_GPL(lockdep_init_map);
unsigned long ip)
{
struct task_struct *curr = current;
+ struct lock_class *class = NULL;
struct held_lock *hlock;
- struct lock_class *class;
unsigned int depth, id;
int chain_head = 0;
u64 chain_key;
return 0;
}
- class = lock->class[subclass];
- /* not cached yet? */
+ if (!subclass)
+ class = lock->class_cache;
+ /*
+ * Not cached yet or subclass?
+ */
if (unlikely(!class)) {
- class = register_lock_class(lock, subclass);
+ class = register_lock_class(lock, subclass, 0);
if (!class)
return 0;
}
* We look up the chain_key and do the O(N^2) check and update of
* the dependencies only if this is a new dependency chain.
* (If lookup_chain_cache() returns with 1 it acquires
- * hash_lock for us)
+ * graph_lock for us)
*/
- if (!trylock && (check == 2) && lookup_chain_cache(chain_key)) {
+ if (!trylock && (check == 2) && lookup_chain_cache(chain_key, class)) {
/*
* Check whether last held lock:
*
if (!chain_head && ret != 2)
if (!check_prevs_add(curr, hlock))
return 0;
- __raw_spin_unlock(&hash_lock);
+ graph_unlock();
}
curr->lockdep_depth++;
check_chain_key(curr);
void lockdep_reset(void)
{
unsigned long flags;
+ int i;
raw_local_irq_save(flags);
current->curr_chain_key = 0;
nr_softirq_chains = 0;
nr_process_chains = 0;
debug_locks = 1;
+ for (i = 0; i < CHAINHASH_SIZE; i++)
+ INIT_LIST_HEAD(chainhash_table + i);
raw_local_irq_restore(flags);
}
int i;
raw_local_irq_save(flags);
- __raw_spin_lock(&hash_lock);
+ graph_lock();
/*
* Unhash all classes that were created by this module:
zap_class(class);
}
- __raw_spin_unlock(&hash_lock);
+ graph_unlock();
raw_local_irq_restore(flags);
}
void lockdep_reset_lock(struct lockdep_map *lock)
{
- struct lock_class *class, *next, *entry;
+ struct lock_class *class, *next;
struct list_head *head;
unsigned long flags;
int i, j;
raw_local_irq_save(flags);
- __raw_spin_lock(&hash_lock);
/*
- * Remove all classes this lock has:
+ * Remove all classes this lock might have:
+ */
+ for (j = 0; j < MAX_LOCKDEP_SUBCLASSES; j++) {
+ /*
+ * If the class exists we look it up and zap it:
+ */
+ class = look_up_lock_class(lock, j);
+ if (class)
+ zap_class(class);
+ }
+ /*
+ * Debug check: in the end all mapped classes should
+ * be gone.
*/
+ graph_lock();
for (i = 0; i < CLASSHASH_SIZE; i++) {
head = classhash_table + i;
if (list_empty(head))
continue;
list_for_each_entry_safe(class, next, head, hash_entry) {
- for (j = 0; j < MAX_LOCKDEP_SUBCLASSES; j++) {
- entry = lock->class[j];
- if (class == entry) {
- zap_class(class);
- lock->class[j] = NULL;
- break;
- }
+ if (unlikely(class == lock->class_cache)) {
+ if (debug_locks_off_graph_unlock())
+ WARN_ON(1);
+ goto out_restore;
}
}
}
+ graph_unlock();
- /*
- * Debug check: in the end all mapped classes should
- * be gone.
- */
- for (j = 0; j < MAX_LOCKDEP_SUBCLASSES; j++) {
- entry = lock->class[j];
- if (!entry)
- continue;
- __raw_spin_unlock(&hash_lock);
- DEBUG_LOCKS_WARN_ON(1);
- raw_local_irq_restore(flags);
- return;
- }
-
- __raw_spin_unlock(&hash_lock);
+out_restore:
raw_local_irq_restore(flags);
}
static void
print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
- const void *mem_to)
+ const void *mem_to, struct held_lock *hlock)
{
if (!debug_locks_off())
return;
printk( "-------------------------\n");
printk("%s/%d is freeing memory %p-%p, with a lock still held there!\n",
curr->comm, curr->pid, mem_from, mem_to-1);
+ print_lock(hlock);
lockdep_print_held_locks(curr);
printk("\nstack backtrace:\n");
!in_range(mem_from, lock_to, mem_to))
continue;
- print_freed_lock_bug(curr, mem_from, mem_to);
+ print_freed_lock_bug(curr, mem_from, mem_to, hlock);
break;
}
local_irq_restore(flags);
}
+EXPORT_SYMBOL_GPL(debug_check_no_locks_freed);
static void print_held_locks_bug(struct task_struct *curr)
{