2 * Architecture specific (i386/x86_64) functions for kexec based crash dumps.
4 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
6 * Copyright (C) IBM Corporation, 2004. All rights reserved.
10 #include <linux/init.h>
11 #include <linux/types.h>
12 #include <linux/kernel.h>
13 #include <linux/smp.h>
14 #include <linux/reboot.h>
15 #include <linux/kexec.h>
16 #include <linux/delay.h>
17 #include <linux/elf.h>
18 #include <linux/elfcore.h>
20 #include <asm/processor.h>
21 #include <asm/hardirq.h>
23 #include <asm/hw_irq.h>
26 #include <linux/kdebug.h>
28 #include <asm/reboot.h>
32 /* This keeps a track of which one is crashing cpu. */
33 static int crashing_cpu;
35 #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
36 static atomic_t waiting_for_crash_ipi;
38 static int crash_nmi_callback(struct notifier_block *self,
39 unsigned long val, void *data)
43 struct pt_regs fixed_regs;
47 if (val != DIE_NMI_IPI)
50 regs = ((struct die_args *)data)->regs;
51 cpu = raw_smp_processor_id();
53 /* Don't do anything if this handler is invoked on crashing cpu.
54 * Otherwise, system will completely hang. Crashing cpu can get
55 * an NMI if system was initially booted with nmi_watchdog parameter.
57 if (cpu == crashing_cpu)
62 if (!user_mode_vm(regs)) {
63 crash_fixup_ss_esp(&fixed_regs, regs);
67 crash_save_cpu(regs, cpu);
69 atomic_dec(&waiting_for_crash_ipi);
70 /* Assume hlt works */
78 static void smp_send_nmi_allbutself(void)
80 cpumask_t mask = cpu_online_map;
81 cpu_clear(safe_smp_processor_id(), mask);
82 if (!cpus_empty(mask))
83 send_IPI_mask(mask, NMI_VECTOR);
86 static struct notifier_block crash_nmi_nb = {
87 .notifier_call = crash_nmi_callback,
90 static void nmi_shootdown_cpus(void)
94 atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
95 /* Would it be better to replace the trap vector here? */
96 if (register_die_notifier(&crash_nmi_nb))
97 return; /* return what? */
98 /* Ensure the new callback function is set before sending
103 smp_send_nmi_allbutself();
105 msecs = 1000; /* Wait at most a second for the other cpus to stop */
106 while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) {
111 /* Leave the nmi callback set */
112 disable_local_APIC();
115 static void nmi_shootdown_cpus(void)
117 /* There are no cpus to shootdown */
121 void native_machine_crash_shutdown(struct pt_regs *regs)
123 /* This function is only called after the system
124 * has panicked or is otherwise in a critical state.
125 * The minimum amount of code to allow a kexec'd kernel
126 * to run successfully needs to happen here.
128 * In practice this means shooting down the other cpus in
131 /* The kernel is broken so disable interrupts */
134 /* Make a note of crashing cpu. Will be used in NMI callback.*/
135 crashing_cpu = safe_smp_processor_id();
136 nmi_shootdown_cpus();
138 #if defined(CONFIG_X86_IO_APIC)
141 #ifdef CONFIG_HPET_TIMER
144 crash_save_cpu(regs, safe_smp_processor_id());