2 * linux/arch/x86_64/nmi.c
4 * NMI watchdog support on APIC systems
6 * Started by Ingo Molnar <mingo@redhat.com>
9 * Mikael Pettersson : AMD K7 support for local APIC NMI watchdog.
10 * Mikael Pettersson : Power Management for local APIC NMI watchdog.
12 * Mikael Pettersson : PM converted to driver model. Disable/enable API.
15 #include <linux/nmi.h>
17 #include <linux/delay.h>
18 #include <linux/interrupt.h>
19 #include <linux/module.h>
20 #include <linux/sysdev.h>
21 #include <linux/sysctl.h>
22 #include <linux/kprobes.h>
23 #include <linux/cpumask.h>
27 #include <asm/proto.h>
28 #include <asm/kdebug.h>
30 #include <asm/intel_arch_perfmon.h>
32 int unknown_nmi_panic;
33 int nmi_watchdog_enabled;
34 int panic_on_unrecovered_nmi;
36 /* perfctr_nmi_owner tracks the ownership of the perfctr registers:
37 * evtsel_nmi_owner tracks the ownership of the event selection
38 * - different performance counters/ event selection may be reserved for
39 * different subsystems this reservation system just tries to coordinate
42 static DEFINE_PER_CPU(unsigned, perfctr_nmi_owner);
43 static DEFINE_PER_CPU(unsigned, evntsel_nmi_owner[2]);
45 static cpumask_t backtrace_mask = CPU_MASK_NONE;
47 /* this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's
48 * offset from MSR_P4_BSU_ESCR0. It will be the max for all platforms (for now)
50 #define NMI_MAX_COUNTER_BITS 66
53 * >0: the lapic NMI watchdog is active, but can be disabled
54 * <0: the lapic NMI watchdog has not been set up, and cannot
56 * 0: the lapic NMI watchdog is disabled, but can be enabled
58 atomic_t nmi_active = ATOMIC_INIT(0); /* oprofile uses this */
61 unsigned int nmi_watchdog = NMI_DEFAULT;
62 static unsigned int nmi_hz = HZ;
64 struct nmi_watchdog_ctlblk {
67 unsigned int cccr_msr;
68 unsigned int perfctr_msr; /* the MSR to reset in NMI handler */
69 unsigned int evntsel_msr; /* the MSR to select the events to handle */
71 static DEFINE_PER_CPU(struct nmi_watchdog_ctlblk, nmi_watchdog_ctlblk);
73 /* local prototypes */
74 static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu);
76 /* converts an msr to an appropriate reservation bit */
77 static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr)
79 /* returns the bit offset of the performance counter register */
80 switch (boot_cpu_data.x86_vendor) {
82 return (msr - MSR_K7_PERFCTR0);
83 case X86_VENDOR_INTEL:
84 if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
85 return (msr - MSR_ARCH_PERFMON_PERFCTR0);
87 return (msr - MSR_P4_BPU_PERFCTR0);
92 /* converts an msr to an appropriate reservation bit */
93 static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr)
95 /* returns the bit offset of the event selection register */
96 switch (boot_cpu_data.x86_vendor) {
98 return (msr - MSR_K7_EVNTSEL0);
99 case X86_VENDOR_INTEL:
100 if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
101 return (msr - MSR_ARCH_PERFMON_EVENTSEL0);
103 return (msr - MSR_P4_BSU_ESCR0);
108 /* checks for a bit availability (hack for oprofile) */
109 int avail_to_resrv_perfctr_nmi_bit(unsigned int counter)
111 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
113 return (!test_bit(counter, &__get_cpu_var(perfctr_nmi_owner)));
116 /* checks the an msr for availability */
117 int avail_to_resrv_perfctr_nmi(unsigned int msr)
119 unsigned int counter;
121 counter = nmi_perfctr_msr_to_bit(msr);
122 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
124 return (!test_bit(counter, &__get_cpu_var(perfctr_nmi_owner)));
127 int reserve_perfctr_nmi(unsigned int msr)
129 unsigned int counter;
131 counter = nmi_perfctr_msr_to_bit(msr);
132 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
134 if (!test_and_set_bit(counter, &__get_cpu_var(perfctr_nmi_owner)))
139 void release_perfctr_nmi(unsigned int msr)
141 unsigned int counter;
143 counter = nmi_perfctr_msr_to_bit(msr);
144 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
146 clear_bit(counter, &__get_cpu_var(perfctr_nmi_owner));
149 int reserve_evntsel_nmi(unsigned int msr)
151 unsigned int counter;
153 counter = nmi_evntsel_msr_to_bit(msr);
154 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
156 if (!test_and_set_bit(counter, &__get_cpu_var(evntsel_nmi_owner)))
161 void release_evntsel_nmi(unsigned int msr)
163 unsigned int counter;
165 counter = nmi_evntsel_msr_to_bit(msr);
166 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
168 clear_bit(counter, &__get_cpu_var(evntsel_nmi_owner));
171 static __cpuinit inline int nmi_known_cpu(void)
173 switch (boot_cpu_data.x86_vendor) {
175 return boot_cpu_data.x86 == 15;
176 case X86_VENDOR_INTEL:
177 if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
180 return (boot_cpu_data.x86 == 15);
185 /* Run after command line and cpu_init init, but before all other checks */
186 void nmi_watchdog_default(void)
188 if (nmi_watchdog != NMI_DEFAULT)
191 nmi_watchdog = NMI_LOCAL_APIC;
193 nmi_watchdog = NMI_IO_APIC;
197 /* The performance counters used by NMI_LOCAL_APIC don't trigger when
198 * the CPU is idle. To make sure the NMI watchdog really ticks on all
199 * CPUs during the test make them busy.
201 static __init void nmi_cpu_busy(void *data)
203 volatile int *endflag = data;
204 local_irq_enable_in_hardirq();
205 /* Intentionally don't use cpu_relax here. This is
206 to make sure that the performance counter really ticks,
207 even if there is a simulator or similar that catches the
208 pause instruction. On a real HT machine this is fine because
209 all other CPUs are busy with "useless" delay loops and don't
210 care if they get somewhat less cycles. */
211 while (*endflag == 0)
216 int __init check_nmi_watchdog (void)
218 volatile int endflag = 0;
222 if ((nmi_watchdog == NMI_NONE) || (nmi_watchdog == NMI_DEFAULT))
225 if (!atomic_read(&nmi_active))
228 counts = kmalloc(NR_CPUS * sizeof(int), GFP_KERNEL);
232 printk(KERN_INFO "testing NMI watchdog ... ");
235 if (nmi_watchdog == NMI_LOCAL_APIC)
236 smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0);
239 for (cpu = 0; cpu < NR_CPUS; cpu++)
240 counts[cpu] = cpu_pda(cpu)->__nmi_count;
242 mdelay((10*1000)/nmi_hz); // wait 10 ticks
244 for_each_online_cpu(cpu) {
245 if (!per_cpu(nmi_watchdog_ctlblk, cpu).enabled)
247 if (cpu_pda(cpu)->__nmi_count - counts[cpu] <= 5) {
248 printk("CPU#%d: NMI appears to be stuck (%d->%d)!\n",
251 cpu_pda(cpu)->__nmi_count);
252 per_cpu(nmi_watchdog_ctlblk, cpu).enabled = 0;
253 atomic_dec(&nmi_active);
256 if (!atomic_read(&nmi_active)) {
258 atomic_set(&nmi_active, -1);
264 /* now that we know it works we can reduce NMI frequency to
265 something more reasonable; makes a difference in some configs */
266 if (nmi_watchdog == NMI_LOCAL_APIC) {
267 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
271 * On Intel CPUs with ARCH_PERFMON only 32 bits in the counter
272 * are writable, with higher bits sign extending from bit 31.
273 * So, we can only program the counter with 31 bit values and
274 * 32nd bit should be 1, for 33.. to be 1.
275 * Find the appropriate nmi_hz
277 if (wd->perfctr_msr == MSR_ARCH_PERFMON_PERFCTR0 &&
278 ((u64)cpu_khz * 1000) > 0x7fffffffULL) {
279 nmi_hz = ((u64)cpu_khz * 1000) / 0x7fffffffUL + 1;
287 int __init setup_nmi_watchdog(char *str)
291 if (!strncmp(str,"panic",5)) {
292 panic_on_timeout = 1;
293 str = strchr(str, ',');
299 get_option(&str, &nmi);
301 if ((nmi >= NMI_INVALID) || (nmi < NMI_NONE))
304 if ((nmi == NMI_LOCAL_APIC) && (nmi_known_cpu() == 0))
305 return 0; /* no lapic support */
310 __setup("nmi_watchdog=", setup_nmi_watchdog);
312 static void disable_lapic_nmi_watchdog(void)
314 BUG_ON(nmi_watchdog != NMI_LOCAL_APIC);
316 if (atomic_read(&nmi_active) <= 0)
319 on_each_cpu(stop_apic_nmi_watchdog, NULL, 0, 1);
321 BUG_ON(atomic_read(&nmi_active) != 0);
324 static void enable_lapic_nmi_watchdog(void)
326 BUG_ON(nmi_watchdog != NMI_LOCAL_APIC);
328 /* are we already enabled */
329 if (atomic_read(&nmi_active) != 0)
332 /* are we lapic aware */
333 if (nmi_known_cpu() <= 0)
336 on_each_cpu(setup_apic_nmi_watchdog, NULL, 0, 1);
337 touch_nmi_watchdog();
340 void disable_timer_nmi_watchdog(void)
342 BUG_ON(nmi_watchdog != NMI_IO_APIC);
344 if (atomic_read(&nmi_active) <= 0)
348 on_each_cpu(stop_apic_nmi_watchdog, NULL, 0, 1);
350 BUG_ON(atomic_read(&nmi_active) != 0);
353 void enable_timer_nmi_watchdog(void)
355 BUG_ON(nmi_watchdog != NMI_IO_APIC);
357 if (atomic_read(&nmi_active) == 0) {
358 touch_nmi_watchdog();
359 on_each_cpu(setup_apic_nmi_watchdog, NULL, 0, 1);
366 static int nmi_pm_active; /* nmi_active before suspend */
368 static int lapic_nmi_suspend(struct sys_device *dev, pm_message_t state)
370 /* only CPU0 goes here, other CPUs should be offline */
371 nmi_pm_active = atomic_read(&nmi_active);
372 stop_apic_nmi_watchdog(NULL);
373 BUG_ON(atomic_read(&nmi_active) != 0);
377 static int lapic_nmi_resume(struct sys_device *dev)
379 /* only CPU0 goes here, other CPUs should be offline */
380 if (nmi_pm_active > 0) {
381 setup_apic_nmi_watchdog(NULL);
382 touch_nmi_watchdog();
387 static struct sysdev_class nmi_sysclass = {
388 set_kset_name("lapic_nmi"),
389 .resume = lapic_nmi_resume,
390 .suspend = lapic_nmi_suspend,
393 static struct sys_device device_lapic_nmi = {
395 .cls = &nmi_sysclass,
398 static int __init init_lapic_nmi_sysfs(void)
402 /* should really be a BUG_ON but b/c this is an
403 * init call, it just doesn't work. -dcz
405 if (nmi_watchdog != NMI_LOCAL_APIC)
408 if ( atomic_read(&nmi_active) < 0 )
411 error = sysdev_class_register(&nmi_sysclass);
413 error = sysdev_register(&device_lapic_nmi);
416 /* must come after the local APIC's device_initcall() */
417 late_initcall(init_lapic_nmi_sysfs);
419 #endif /* CONFIG_PM */
422 * Activate the NMI watchdog via the local APIC.
423 * Original code written by Keith Owens.
426 /* Note that these events don't tick when the CPU idles. This means
427 the frequency varies with CPU load. */
429 #define K7_EVNTSEL_ENABLE (1 << 22)
430 #define K7_EVNTSEL_INT (1 << 20)
431 #define K7_EVNTSEL_OS (1 << 17)
432 #define K7_EVNTSEL_USR (1 << 16)
433 #define K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING 0x76
434 #define K7_NMI_EVENT K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING
436 static int setup_k7_watchdog(void)
438 unsigned int perfctr_msr, evntsel_msr;
439 unsigned int evntsel;
440 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
442 perfctr_msr = MSR_K7_PERFCTR0;
443 evntsel_msr = MSR_K7_EVNTSEL0;
444 if (!reserve_perfctr_nmi(perfctr_msr))
447 if (!reserve_evntsel_nmi(evntsel_msr))
450 /* Simulator may not support it */
451 if (checking_wrmsrl(evntsel_msr, 0UL))
453 wrmsrl(perfctr_msr, 0UL);
455 evntsel = K7_EVNTSEL_INT
460 /* setup the timer */
461 wrmsr(evntsel_msr, evntsel, 0);
462 wrmsrl(perfctr_msr, -((u64)cpu_khz * 1000 / nmi_hz));
463 apic_write(APIC_LVTPC, APIC_DM_NMI);
464 evntsel |= K7_EVNTSEL_ENABLE;
465 wrmsr(evntsel_msr, evntsel, 0);
467 wd->perfctr_msr = perfctr_msr;
468 wd->evntsel_msr = evntsel_msr;
469 wd->cccr_msr = 0; //unused
470 wd->check_bit = 1ULL<<63;
473 release_evntsel_nmi(evntsel_msr);
475 release_perfctr_nmi(perfctr_msr);
480 static void stop_k7_watchdog(void)
482 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
484 wrmsr(wd->evntsel_msr, 0, 0);
486 release_evntsel_nmi(wd->evntsel_msr);
487 release_perfctr_nmi(wd->perfctr_msr);
490 /* Note that these events don't tick when the CPU idles. This means
491 the frequency varies with CPU load. */
493 #define MSR_P4_MISC_ENABLE_PERF_AVAIL (1<<7)
494 #define P4_ESCR_EVENT_SELECT(N) ((N)<<25)
495 #define P4_ESCR_OS (1<<3)
496 #define P4_ESCR_USR (1<<2)
497 #define P4_CCCR_OVF_PMI0 (1<<26)
498 #define P4_CCCR_OVF_PMI1 (1<<27)
499 #define P4_CCCR_THRESHOLD(N) ((N)<<20)
500 #define P4_CCCR_COMPLEMENT (1<<19)
501 #define P4_CCCR_COMPARE (1<<18)
502 #define P4_CCCR_REQUIRED (3<<16)
503 #define P4_CCCR_ESCR_SELECT(N) ((N)<<13)
504 #define P4_CCCR_ENABLE (1<<12)
505 #define P4_CCCR_OVF (1<<31)
506 /* Set up IQ_COUNTER0 to behave like a clock, by having IQ_CCCR0 filter
507 CRU_ESCR0 (with any non-null event selector) through a complemented
508 max threshold. [IA32-Vol3, Section 14.9.9] */
510 static int setup_p4_watchdog(void)
512 unsigned int perfctr_msr, evntsel_msr, cccr_msr;
513 unsigned int evntsel, cccr_val;
514 unsigned int misc_enable, dummy;
516 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
518 rdmsr(MSR_IA32_MISC_ENABLE, misc_enable, dummy);
519 if (!(misc_enable & MSR_P4_MISC_ENABLE_PERF_AVAIL))
523 /* detect which hyperthread we are on */
524 if (smp_num_siblings == 2) {
525 unsigned int ebx, apicid;
528 apicid = (ebx >> 24) & 0xff;
534 /* performance counters are shared resources
535 * assign each hyperthread its own set
536 * (re-use the ESCR0 register, seems safe
537 * and keeps the cccr_val the same)
541 perfctr_msr = MSR_P4_IQ_PERFCTR0;
542 evntsel_msr = MSR_P4_CRU_ESCR0;
543 cccr_msr = MSR_P4_IQ_CCCR0;
544 cccr_val = P4_CCCR_OVF_PMI0 | P4_CCCR_ESCR_SELECT(4);
547 perfctr_msr = MSR_P4_IQ_PERFCTR1;
548 evntsel_msr = MSR_P4_CRU_ESCR0;
549 cccr_msr = MSR_P4_IQ_CCCR1;
550 cccr_val = P4_CCCR_OVF_PMI1 | P4_CCCR_ESCR_SELECT(4);
553 if (!reserve_perfctr_nmi(perfctr_msr))
556 if (!reserve_evntsel_nmi(evntsel_msr))
559 evntsel = P4_ESCR_EVENT_SELECT(0x3F)
563 cccr_val |= P4_CCCR_THRESHOLD(15)
568 wrmsr(evntsel_msr, evntsel, 0);
569 wrmsr(cccr_msr, cccr_val, 0);
570 wrmsrl(perfctr_msr, -((u64)cpu_khz * 1000 / nmi_hz));
571 apic_write(APIC_LVTPC, APIC_DM_NMI);
572 cccr_val |= P4_CCCR_ENABLE;
573 wrmsr(cccr_msr, cccr_val, 0);
575 wd->perfctr_msr = perfctr_msr;
576 wd->evntsel_msr = evntsel_msr;
577 wd->cccr_msr = cccr_msr;
578 wd->check_bit = 1ULL<<39;
581 release_perfctr_nmi(perfctr_msr);
586 static void stop_p4_watchdog(void)
588 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
590 wrmsr(wd->cccr_msr, 0, 0);
591 wrmsr(wd->evntsel_msr, 0, 0);
593 release_evntsel_nmi(wd->evntsel_msr);
594 release_perfctr_nmi(wd->perfctr_msr);
597 #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
598 #define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
600 static int setup_intel_arch_watchdog(void)
603 union cpuid10_eax eax;
605 unsigned int perfctr_msr, evntsel_msr;
606 unsigned int evntsel;
607 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
610 * Check whether the Architectural PerfMon supports
611 * Unhalted Core Cycles Event or not.
612 * NOTE: Corresponding bit = 0 in ebx indicates event present.
614 cpuid(10, &(eax.full), &ebx, &unused, &unused);
615 if ((eax.split.mask_length < (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX+1)) ||
616 (ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT))
619 perfctr_msr = MSR_ARCH_PERFMON_PERFCTR0;
620 evntsel_msr = MSR_ARCH_PERFMON_EVENTSEL0;
622 if (!reserve_perfctr_nmi(perfctr_msr))
625 if (!reserve_evntsel_nmi(evntsel_msr))
628 wrmsrl(perfctr_msr, 0UL);
630 evntsel = ARCH_PERFMON_EVENTSEL_INT
631 | ARCH_PERFMON_EVENTSEL_OS
632 | ARCH_PERFMON_EVENTSEL_USR
633 | ARCH_PERFMON_NMI_EVENT_SEL
634 | ARCH_PERFMON_NMI_EVENT_UMASK;
636 /* setup the timer */
637 wrmsr(evntsel_msr, evntsel, 0);
638 wrmsrl(perfctr_msr, -((u64)cpu_khz * 1000 / nmi_hz));
640 apic_write(APIC_LVTPC, APIC_DM_NMI);
641 evntsel |= ARCH_PERFMON_EVENTSEL0_ENABLE;
642 wrmsr(evntsel_msr, evntsel, 0);
644 wd->perfctr_msr = perfctr_msr;
645 wd->evntsel_msr = evntsel_msr;
646 wd->cccr_msr = 0; //unused
647 wd->check_bit = 1ULL << (eax.split.bit_width - 1);
650 release_perfctr_nmi(perfctr_msr);
655 static void stop_intel_arch_watchdog(void)
658 union cpuid10_eax eax;
660 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
663 * Check whether the Architectural PerfMon supports
664 * Unhalted Core Cycles Event or not.
665 * NOTE: Corresponding bit = 0 in ebx indicates event present.
667 cpuid(10, &(eax.full), &ebx, &unused, &unused);
668 if ((eax.split.mask_length < (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX+1)) ||
669 (ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT))
672 wrmsr(wd->evntsel_msr, 0, 0);
674 release_evntsel_nmi(wd->evntsel_msr);
675 release_perfctr_nmi(wd->perfctr_msr);
678 void setup_apic_nmi_watchdog(void *unused)
680 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
682 /* only support LOCAL and IO APICs for now */
683 if ((nmi_watchdog != NMI_LOCAL_APIC) &&
684 (nmi_watchdog != NMI_IO_APIC))
687 if (wd->enabled == 1)
690 /* cheap hack to support suspend/resume */
691 /* if cpu0 is not active neither should the other cpus */
692 if ((smp_processor_id() != 0) && (atomic_read(&nmi_active) <= 0))
695 if (nmi_watchdog == NMI_LOCAL_APIC) {
696 switch (boot_cpu_data.x86_vendor) {
698 if (strstr(boot_cpu_data.x86_model_id, "Screwdriver"))
700 if (!setup_k7_watchdog())
703 case X86_VENDOR_INTEL:
704 if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
705 if (!setup_intel_arch_watchdog())
709 if (!setup_p4_watchdog())
717 atomic_inc(&nmi_active);
720 void stop_apic_nmi_watchdog(void *unused)
722 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
724 /* only support LOCAL and IO APICs for now */
725 if ((nmi_watchdog != NMI_LOCAL_APIC) &&
726 (nmi_watchdog != NMI_IO_APIC))
729 if (wd->enabled == 0)
732 if (nmi_watchdog == NMI_LOCAL_APIC) {
733 switch (boot_cpu_data.x86_vendor) {
735 if (strstr(boot_cpu_data.x86_model_id, "Screwdriver"))
739 case X86_VENDOR_INTEL:
740 if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
741 stop_intel_arch_watchdog();
751 atomic_dec(&nmi_active);
755 * the best way to detect whether a CPU has a 'hard lockup' problem
756 * is to check it's local APIC timer IRQ counts. If they are not
757 * changing then that CPU has some problem.
759 * as these watchdog NMI IRQs are generated on every CPU, we only
760 * have to check the current processor.
763 static DEFINE_PER_CPU(unsigned, last_irq_sum);
764 static DEFINE_PER_CPU(local_t, alert_counter);
765 static DEFINE_PER_CPU(int, nmi_touch);
767 void touch_nmi_watchdog (void)
769 if (nmi_watchdog > 0) {
773 * Tell other CPUs to reset their alert counters. We cannot
774 * do it ourselves because the alert count increase is not
777 for_each_present_cpu (cpu)
778 per_cpu(nmi_touch, cpu) = 1;
781 touch_softlockup_watchdog();
784 int __kprobes nmi_watchdog_tick(struct pt_regs * regs, unsigned reason)
788 int cpu = smp_processor_id();
789 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
793 /* check for other users first */
794 if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT)
800 sum = read_pda(apic_timer_irqs);
801 if (__get_cpu_var(nmi_touch)) {
802 __get_cpu_var(nmi_touch) = 0;
806 if (cpu_isset(cpu, backtrace_mask)) {
807 static DEFINE_SPINLOCK(lock); /* Serialise the printks */
810 printk("NMI backtrace for cpu %d\n", cpu);
813 cpu_clear(cpu, backtrace_mask);
816 #ifdef CONFIG_X86_MCE
817 /* Could check oops_in_progress here too, but it's safer
819 if (atomic_read(&mce_entry) > 0)
822 /* if the apic timer isn't firing, this cpu isn't doing much */
823 if (!touched && __get_cpu_var(last_irq_sum) == sum) {
825 * Ayiee, looks like this CPU is stuck ...
826 * wait a few IRQs (5 seconds) before doing the oops ...
828 local_inc(&__get_cpu_var(alert_counter));
829 if (local_read(&__get_cpu_var(alert_counter)) == 5*nmi_hz)
830 die_nmi("NMI Watchdog detected LOCKUP on CPU %d\n", regs,
833 __get_cpu_var(last_irq_sum) = sum;
834 local_set(&__get_cpu_var(alert_counter), 0);
837 /* see if the nmi watchdog went off */
839 if (nmi_watchdog == NMI_LOCAL_APIC) {
840 rdmsrl(wd->perfctr_msr, dummy);
841 if (dummy & wd->check_bit){
842 /* this wasn't a watchdog timer interrupt */
846 /* only Intel uses the cccr msr */
847 if (wd->cccr_msr != 0) {
850 * - An overflown perfctr will assert its interrupt
851 * until the OVF flag in its CCCR is cleared.
852 * - LVTPC is masked on interrupt and must be
853 * unmasked by the LVTPC handler.
855 rdmsrl(wd->cccr_msr, dummy);
856 dummy &= ~P4_CCCR_OVF;
857 wrmsrl(wd->cccr_msr, dummy);
858 apic_write(APIC_LVTPC, APIC_DM_NMI);
859 } else if (wd->perfctr_msr == MSR_ARCH_PERFMON_PERFCTR0) {
861 * ArchPerfom/Core Duo needs to re-unmask
864 apic_write(APIC_LVTPC, APIC_DM_NMI);
866 /* start the cycle over again */
867 wrmsrl(wd->perfctr_msr, -((u64)cpu_khz * 1000 / nmi_hz));
869 } else if (nmi_watchdog == NMI_IO_APIC) {
870 /* don't know how to accurately check for this.
871 * just assume it was a watchdog timer interrupt
872 * This matches the old behaviour.
876 printk(KERN_WARNING "Unknown enabled NMI hardware?!\n");
882 asmlinkage __kprobes void do_nmi(struct pt_regs * regs, long error_code)
885 add_pda(__nmi_count,1);
886 default_do_nmi(regs);
890 int do_nmi_callback(struct pt_regs * regs, int cpu)
893 if (unknown_nmi_panic)
894 return unknown_nmi_panic_callback(regs, cpu);
901 static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu)
903 unsigned char reason = get_nmi_reason();
906 sprintf(buf, "NMI received for unknown reason %02x\n", reason);
907 die_nmi(buf, regs, 1); /* Always panic here */
912 * proc handler for /proc/sys/kernel/nmi
914 int proc_nmi_enabled(struct ctl_table *table, int write, struct file *file,
915 void __user *buffer, size_t *length, loff_t *ppos)
919 nmi_watchdog_enabled = (atomic_read(&nmi_active) > 0) ? 1 : 0;
920 old_state = nmi_watchdog_enabled;
921 proc_dointvec(table, write, file, buffer, length, ppos);
922 if (!!old_state == !!nmi_watchdog_enabled)
925 if (atomic_read(&nmi_active) < 0) {
926 printk( KERN_WARNING "NMI watchdog is permanently disabled\n");
930 /* if nmi_watchdog is not set yet, then set it */
931 nmi_watchdog_default();
933 if (nmi_watchdog == NMI_LOCAL_APIC) {
934 if (nmi_watchdog_enabled)
935 enable_lapic_nmi_watchdog();
937 disable_lapic_nmi_watchdog();
940 "NMI watchdog doesn't know what hardware to touch\n");
948 void __trigger_all_cpu_backtrace(void)
952 backtrace_mask = cpu_online_map;
953 /* Wait for up to 10 seconds for all CPUs to do the backtrace */
954 for (i = 0; i < 10 * 1000; i++) {
955 if (cpus_empty(backtrace_mask))
961 EXPORT_SYMBOL(nmi_active);
962 EXPORT_SYMBOL(nmi_watchdog);
963 EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi);
964 EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit);
965 EXPORT_SYMBOL(reserve_perfctr_nmi);
966 EXPORT_SYMBOL(release_perfctr_nmi);
967 EXPORT_SYMBOL(reserve_evntsel_nmi);
968 EXPORT_SYMBOL(release_evntsel_nmi);
969 EXPORT_SYMBOL(disable_timer_nmi_watchdog);
970 EXPORT_SYMBOL(enable_timer_nmi_watchdog);
971 EXPORT_SYMBOL(touch_nmi_watchdog);