2 * Runtime locking correctness validator
4 * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
6 * see Documentation/lockdep-design.txt for more details.
8 #ifndef __LINUX_LOCKDEP_H
9 #define __LINUX_LOCKDEP_H
11 #include <linux/linkage.h>
12 #include <linux/list.h>
13 #include <linux/debug_locks.h>
14 #include <linux/stacktrace.h>
19 * Lock-class usage-state bits:
26 LOCK_ENABLED_SOFTIRQS,
27 LOCK_ENABLED_HARDIRQS,
28 LOCK_USED_IN_HARDIRQ_READ,
29 LOCK_USED_IN_SOFTIRQ_READ,
30 LOCK_ENABLED_SOFTIRQS_READ,
31 LOCK_ENABLED_HARDIRQS_READ,
36 * Usage-state bitmasks:
38 #define LOCKF_USED (1 << LOCK_USED)
39 #define LOCKF_USED_IN_HARDIRQ (1 << LOCK_USED_IN_HARDIRQ)
40 #define LOCKF_USED_IN_SOFTIRQ (1 << LOCK_USED_IN_SOFTIRQ)
41 #define LOCKF_ENABLED_HARDIRQS (1 << LOCK_ENABLED_HARDIRQS)
42 #define LOCKF_ENABLED_SOFTIRQS (1 << LOCK_ENABLED_SOFTIRQS)
44 #define LOCKF_ENABLED_IRQS (LOCKF_ENABLED_HARDIRQS | LOCKF_ENABLED_SOFTIRQS)
45 #define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ)
47 #define LOCKF_USED_IN_HARDIRQ_READ (1 << LOCK_USED_IN_HARDIRQ_READ)
48 #define LOCKF_USED_IN_SOFTIRQ_READ (1 << LOCK_USED_IN_SOFTIRQ_READ)
49 #define LOCKF_ENABLED_HARDIRQS_READ (1 << LOCK_ENABLED_HARDIRQS_READ)
50 #define LOCKF_ENABLED_SOFTIRQS_READ (1 << LOCK_ENABLED_SOFTIRQS_READ)
52 #define LOCKF_ENABLED_IRQS_READ \
53 (LOCKF_ENABLED_HARDIRQS_READ | LOCKF_ENABLED_SOFTIRQS_READ)
54 #define LOCKF_USED_IN_IRQ_READ \
55 (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ)
57 #define MAX_LOCKDEP_SUBCLASSES 8UL
60 * Lock-classes are keyed via unique addresses, by embedding the
61 * lockclass-key into the kernel (or module) .data section. (For
62 * static locks we use the lock address itself as the key.)
64 struct lockdep_subclass_key {
66 } __attribute__ ((__packed__));
68 struct lock_class_key {
69 struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES];
73 * The lock-class itself:
79 struct list_head hash_entry;
82 * global list of all lock-classes:
84 struct list_head lock_entry;
86 struct lockdep_subclass_key *key;
87 unsigned int subclass;
90 * IRQ/softirq usage tracking bits:
92 unsigned long usage_mask;
93 struct stack_trace usage_traces[LOCK_USAGE_STATES];
96 * These fields represent a directed graph of lock dependencies,
97 * to every node we attach a list of "forward" and a list of
98 * "backward" graph nodes.
100 struct list_head locks_after, locks_before;
103 * Generation counter, when doing certain classes of graph walking,
104 * to ensure that we check one node only once:
106 unsigned int version;
109 * Statistics counter:
118 * Map the lock object (the lock instance) to the lock-class object.
119 * This is embedded into specific lock instances:
122 struct lock_class_key *key;
123 struct lock_class *class_cache;
128 * Every lock has a list of other locks that were taken after it.
129 * We only grow the list, never remove from it:
132 struct list_head entry;
133 struct lock_class *class;
134 struct stack_trace trace;
138 * We record lock dependency chains, so that we can cache them:
141 struct list_head entry;
147 * One-way hash of the dependency chain up to this point. We
148 * hash the hashes step by step as the dependency chain grows.
150 * We use it for dependency-caching and we skip detection
151 * passes and dependency-updates if there is a cache-hit, so
152 * it is absolutely critical for 100% coverage of the validator
153 * to have a unique key value for every unique dependency path
154 * that can occur in the system, to make a unique hash value
155 * as likely as possible - hence the 64-bit width.
157 * The task struct holds the current hash value (initialized
158 * with zero), here we store the previous hash value:
161 struct lock_class *class;
162 unsigned long acquire_ip;
163 struct lockdep_map *instance;
166 * The lock-stack is unified in that the lock chains of interrupt
167 * contexts nest ontop of process context chains, but we 'separate'
168 * the hashes by starting with 0 if we cross into an interrupt
169 * context, and we also keep do not add cross-context lock
170 * dependencies - the lock usage graph walking covers that area
171 * anyway, and we'd just unnecessarily increase the number of
172 * dependencies otherwise. [Note: hardirq and softirq contexts
173 * are separated from each other too.]
175 * The following field is used to detect when we cross into an
186 * Initialization, self-test and debugging-output methods:
188 extern void lockdep_init(void);
189 extern void lockdep_info(void);
190 extern void lockdep_reset(void);
191 extern void lockdep_reset_lock(struct lockdep_map *lock);
192 extern void lockdep_free_key_range(void *start, unsigned long size);
194 extern void lockdep_off(void);
195 extern void lockdep_on(void);
196 extern int lockdep_internal(void);
199 * These methods are used by specific locking variants (spinlocks,
200 * rwlocks, mutexes and rwsems) to pass init/acquire/release events
204 extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
205 struct lock_class_key *key);
208 * Reinitialize a lock key - for cases where there is special locking or
209 * special initialization of locks so that the validator gets the scope
210 * of dependencies wrong: they are either too broad (they need a class-split)
211 * or they are too narrow (they suffer from a false class-split):
213 #define lockdep_set_class(lock, key) \
214 lockdep_init_map(&(lock)->dep_map, #key, key)
215 #define lockdep_set_class_and_name(lock, key, name) \
216 lockdep_init_map(&(lock)->dep_map, name, key)
223 * 0: exclusive (write) acquire
224 * 1: read-acquire (no recursion allowed)
225 * 2: read-acquire with same-instance recursion allowed
230 * 1: simple checks (freeing, held-at-exit-time, etc.)
233 extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
234 int trylock, int read, int check, unsigned long ip);
236 extern void lock_release(struct lockdep_map *lock, int nested,
239 # define INIT_LOCKDEP .lockdep_recursion = 0,
243 static inline void lockdep_off(void)
247 static inline void lockdep_on(void)
251 static inline int lockdep_internal(void)
256 # define lock_acquire(l, s, t, r, c, i) do { } while (0)
257 # define lock_release(l, n, i) do { } while (0)
258 # define lockdep_init() do { } while (0)
259 # define lockdep_info() do { } while (0)
260 # define lockdep_init_map(lock, name, key) do { (void)(key); } while (0)
261 # define lockdep_set_class(lock, key) do { (void)(key); } while (0)
262 # define lockdep_set_class_and_name(lock, key, name) \
263 do { (void)(key); } while (0)
264 # define INIT_LOCKDEP
265 # define lockdep_reset() do { debug_locks = 1; } while (0)
266 # define lockdep_free_key_range(start, size) do { } while (0)
268 * The class key takes no space if lockdep is disabled:
270 struct lock_class_key { };
271 #endif /* !LOCKDEP */
273 #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_GENERIC_HARDIRQS)
274 extern void early_init_irq_lock_class(void);
276 # define early_init_irq_lock_class() do { } while (0)
279 #ifdef CONFIG_TRACE_IRQFLAGS
280 extern void early_boot_irqs_off(void);
281 extern void early_boot_irqs_on(void);
283 # define early_boot_irqs_off() do { } while (0)
284 # define early_boot_irqs_on() do { } while (0)
288 * For trivial one-depth nesting of a lock-class, the following
289 * global define can be used. (Subsystems with multiple levels
290 * of nesting should define their own lock-nesting subclasses.)
292 #define SINGLE_DEPTH_NESTING 1
295 * Map the dependency ops to NOP or to real lockdep ops, depending
296 * on the per lock-class debug mode:
299 #ifdef CONFIG_DEBUG_LOCK_ALLOC
300 # ifdef CONFIG_PROVE_LOCKING
301 # define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i)
303 # define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i)
305 # define spin_release(l, n, i) lock_release(l, n, i)
307 # define spin_acquire(l, s, t, i) do { } while (0)
308 # define spin_release(l, n, i) do { } while (0)
311 #ifdef CONFIG_DEBUG_LOCK_ALLOC
312 # ifdef CONFIG_PROVE_LOCKING
313 # define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i)
314 # define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 2, i)
316 # define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i)
317 # define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 1, i)
319 # define rwlock_release(l, n, i) lock_release(l, n, i)
321 # define rwlock_acquire(l, s, t, i) do { } while (0)
322 # define rwlock_acquire_read(l, s, t, i) do { } while (0)
323 # define rwlock_release(l, n, i) do { } while (0)
326 #ifdef CONFIG_DEBUG_LOCK_ALLOC
327 # ifdef CONFIG_PROVE_LOCKING
328 # define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i)
330 # define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i)
332 # define mutex_release(l, n, i) lock_release(l, n, i)
334 # define mutex_acquire(l, s, t, i) do { } while (0)
335 # define mutex_release(l, n, i) do { } while (0)
338 #ifdef CONFIG_DEBUG_LOCK_ALLOC
339 # ifdef CONFIG_PROVE_LOCKING
340 # define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i)
341 # define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, i)
343 # define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i)
344 # define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, i)
346 # define rwsem_release(l, n, i) lock_release(l, n, i)
348 # define rwsem_acquire(l, s, t, i) do { } while (0)
349 # define rwsem_acquire_read(l, s, t, i) do { } while (0)
350 # define rwsem_release(l, n, i) do { } while (0)
353 #endif /* __LINUX_LOCKDEP_H */