[PATCH] unshare system call -v5: unshare vm
[linux-2.6] / kernel / softlockup.c
1 /*
2  * Detect Soft Lockups
3  *
4  * started by Ingo Molnar, (C) 2005, Red Hat
5  *
6  * this code detects soft lockups: incidents in where on a CPU
7  * the kernel does not reschedule for 10 seconds or more.
8  */
9
10 #include <linux/mm.h>
11 #include <linux/cpu.h>
12 #include <linux/init.h>
13 #include <linux/delay.h>
14 #include <linux/kthread.h>
15 #include <linux/notifier.h>
16 #include <linux/module.h>
17
18 static DEFINE_SPINLOCK(print_lock);
19
20 static DEFINE_PER_CPU(unsigned long, timestamp) = 0;
21 static DEFINE_PER_CPU(unsigned long, print_timestamp) = 0;
22 static DEFINE_PER_CPU(struct task_struct *, watchdog_task);
23
24 static int did_panic = 0;
25 static int softlock_panic(struct notifier_block *this, unsigned long event,
26                                 void *ptr)
27 {
28         did_panic = 1;
29
30         return NOTIFY_DONE;
31 }
32
33 static struct notifier_block panic_block = {
34         .notifier_call = softlock_panic,
35 };
36
37 void touch_softlockup_watchdog(void)
38 {
39         per_cpu(timestamp, raw_smp_processor_id()) = jiffies;
40 }
41 EXPORT_SYMBOL(touch_softlockup_watchdog);
42
43 /*
44  * This callback runs from the timer interrupt, and checks
45  * whether the watchdog thread has hung or not:
46  */
47 void softlockup_tick(struct pt_regs *regs)
48 {
49         int this_cpu = smp_processor_id();
50         unsigned long timestamp = per_cpu(timestamp, this_cpu);
51
52         if (per_cpu(print_timestamp, this_cpu) == timestamp)
53                 return;
54
55         /* Do not cause a second panic when there already was one */
56         if (did_panic)
57                 return;
58
59         if (time_after(jiffies, timestamp + 10*HZ)) {
60                 per_cpu(print_timestamp, this_cpu) = timestamp;
61
62                 spin_lock(&print_lock);
63                 printk(KERN_ERR "BUG: soft lockup detected on CPU#%d!\n",
64                         this_cpu);
65                 show_regs(regs);
66                 spin_unlock(&print_lock);
67         }
68 }
69
70 /*
71  * The watchdog thread - runs every second and touches the timestamp.
72  */
73 static int watchdog(void * __bind_cpu)
74 {
75         struct sched_param param = { .sched_priority = 99 };
76
77         sched_setscheduler(current, SCHED_FIFO, &param);
78         current->flags |= PF_NOFREEZE;
79
80         set_current_state(TASK_INTERRUPTIBLE);
81
82         /*
83          * Run briefly once per second - if this gets delayed for
84          * more than 10 seconds then the debug-printout triggers
85          * in softlockup_tick():
86          */
87         while (!kthread_should_stop()) {
88                 msleep_interruptible(1000);
89                 touch_softlockup_watchdog();
90         }
91         __set_current_state(TASK_RUNNING);
92
93         return 0;
94 }
95
96 /*
97  * Create/destroy watchdog threads as CPUs come and go:
98  */
99 static int __devinit
100 cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
101 {
102         int hotcpu = (unsigned long)hcpu;
103         struct task_struct *p;
104
105         switch (action) {
106         case CPU_UP_PREPARE:
107                 BUG_ON(per_cpu(watchdog_task, hotcpu));
108                 p = kthread_create(watchdog, hcpu, "watchdog/%d", hotcpu);
109                 if (IS_ERR(p)) {
110                         printk("watchdog for %i failed\n", hotcpu);
111                         return NOTIFY_BAD;
112                 }
113                 per_cpu(watchdog_task, hotcpu) = p;
114                 kthread_bind(p, hotcpu);
115                 break;
116         case CPU_ONLINE:
117
118                 wake_up_process(per_cpu(watchdog_task, hotcpu));
119                 break;
120 #ifdef CONFIG_HOTPLUG_CPU
121         case CPU_UP_CANCELED:
122                 /* Unbind so it can run.  Fall thru. */
123                 kthread_bind(per_cpu(watchdog_task, hotcpu),
124                              any_online_cpu(cpu_online_map));
125         case CPU_DEAD:
126                 p = per_cpu(watchdog_task, hotcpu);
127                 per_cpu(watchdog_task, hotcpu) = NULL;
128                 kthread_stop(p);
129                 break;
130 #endif /* CONFIG_HOTPLUG_CPU */
131         }
132         return NOTIFY_OK;
133 }
134
135 static struct notifier_block __devinitdata cpu_nfb = {
136         .notifier_call = cpu_callback
137 };
138
139 __init void spawn_softlockup_task(void)
140 {
141         void *cpu = (void *)(long)smp_processor_id();
142
143         cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
144         cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
145         register_cpu_notifier(&cpu_nfb);
146
147         notifier_chain_register(&panic_notifier_list, &panic_block);
148 }
149