mm: rewrite vmap layer
[linux-2.6] / include / linux / smp_lock.h
1 #ifndef __LINUX_SMPLOCK_H
2 #define __LINUX_SMPLOCK_H
3
4 #ifdef CONFIG_LOCK_KERNEL
5 #include <linux/sched.h>
6
7 #define kernel_locked()         (current->lock_depth >= 0)
8
9 extern int __lockfunc __reacquire_kernel_lock(void);
10 extern void __lockfunc __release_kernel_lock(void);
11
12 /*
13  * Release/re-acquire global kernel lock for the scheduler
14  */
15 #define release_kernel_lock(tsk) do {           \
16         if (unlikely((tsk)->lock_depth >= 0))   \
17                 __release_kernel_lock();        \
18 } while (0)
19
20 static inline int reacquire_kernel_lock(struct task_struct *task)
21 {
22         if (unlikely(task->lock_depth >= 0))
23                 return __reacquire_kernel_lock();
24         return 0;
25 }
26
27 extern void __lockfunc lock_kernel(void)        __acquires(kernel_lock);
28 extern void __lockfunc unlock_kernel(void)      __releases(kernel_lock);
29
30 /*
31  * Various legacy drivers don't really need the BKL in a specific
32  * function, but they *do* need to know that the BKL became available.
33  * This function just avoids wrapping a bunch of lock/unlock pairs
34  * around code which doesn't really need it.
35  */
36 static inline void cycle_kernel_lock(void)
37 {
38         lock_kernel();
39         unlock_kernel();
40 }
41
42 #else
43
44 #define lock_kernel()                           do { } while(0)
45 #define unlock_kernel()                         do { } while(0)
46 #define release_kernel_lock(task)               do { } while(0)
47 #define cycle_kernel_lock()                     do { } while(0)
48 #define reacquire_kernel_lock(task)             0
49 #define kernel_locked()                         1
50
51 #endif /* CONFIG_LOCK_KERNEL */
52 #endif /* __LINUX_SMPLOCK_H */