2 * Architecture specific (i386) functions for kexec based crash dumps.
4 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
6 * Copyright (C) IBM Corporation, 2004. All rights reserved.
10 #include <linux/init.h>
11 #include <linux/types.h>
12 #include <linux/kernel.h>
13 #include <linux/smp.h>
14 #include <linux/reboot.h>
15 #include <linux/kexec.h>
16 #include <linux/delay.h>
17 #include <linux/elf.h>
18 #include <linux/elfcore.h>
20 #include <asm/processor.h>
21 #include <asm/hardirq.h>
23 #include <asm/hw_irq.h>
28 note_buf_t crash_notes[NR_CPUS];
29 /* This keeps a track of which one is crashing cpu. */
30 static int crashing_cpu;
32 static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data,
37 note.n_namesz = strlen(name) + 1;
38 note.n_descsz = data_len;
40 memcpy(buf, ¬e, sizeof(note));
41 buf += (sizeof(note) +3)/4;
42 memcpy(buf, name, note.n_namesz);
43 buf += (note.n_namesz + 3)/4;
44 memcpy(buf, data, note.n_descsz);
45 buf += (note.n_descsz + 3)/4;
50 static void final_note(u32 *buf)
57 memcpy(buf, ¬e, sizeof(note));
60 static void crash_save_this_cpu(struct pt_regs *regs, int cpu)
62 struct elf_prstatus prstatus;
65 if ((cpu < 0) || (cpu >= NR_CPUS))
68 /* Using ELF notes here is opportunistic.
69 * I need a well defined structure format
70 * for the data I pass, and I need tags
71 * on the data to indicate what information I have
72 * squirrelled away. ELF notes happen to provide
73 * all of that that no need to invent something new.
75 buf = &crash_notes[cpu][0];
76 memset(&prstatus, 0, sizeof(prstatus));
77 prstatus.pr_pid = current->pid;
78 elf_core_copy_regs(&prstatus.pr_reg, regs);
79 buf = append_elf_note(buf, "CORE", NT_PRSTATUS, &prstatus,
84 static void crash_get_current_regs(struct pt_regs *regs)
86 __asm__ __volatile__("movl %%ebx,%0" : "=m"(regs->ebx));
87 __asm__ __volatile__("movl %%ecx,%0" : "=m"(regs->ecx));
88 __asm__ __volatile__("movl %%edx,%0" : "=m"(regs->edx));
89 __asm__ __volatile__("movl %%esi,%0" : "=m"(regs->esi));
90 __asm__ __volatile__("movl %%edi,%0" : "=m"(regs->edi));
91 __asm__ __volatile__("movl %%ebp,%0" : "=m"(regs->ebp));
92 __asm__ __volatile__("movl %%eax,%0" : "=m"(regs->eax));
93 __asm__ __volatile__("movl %%esp,%0" : "=m"(regs->esp));
94 __asm__ __volatile__("movw %%ss, %%ax;" :"=a"(regs->xss));
95 __asm__ __volatile__("movw %%cs, %%ax;" :"=a"(regs->xcs));
96 __asm__ __volatile__("movw %%ds, %%ax;" :"=a"(regs->xds));
97 __asm__ __volatile__("movw %%es, %%ax;" :"=a"(regs->xes));
98 __asm__ __volatile__("pushfl; popl %0" :"=m"(regs->eflags));
100 regs->eip = (unsigned long)current_text_addr();
103 /* CPU does not save ss and esp on stack if execution is already
104 * running in kernel mode at the time of NMI occurrence. This code
107 static void crash_setup_regs(struct pt_regs *newregs, struct pt_regs *oldregs)
109 memcpy(newregs, oldregs, sizeof(*newregs));
110 newregs->esp = (unsigned long)&(oldregs->esp);
111 __asm__ __volatile__("xorl %eax, %eax;");
112 __asm__ __volatile__ ("movw %%ss, %%ax;" :"=a"(newregs->xss));
115 /* We may have saved_regs from where the error came from
116 * or it is NULL if via a direct panic().
118 static void crash_save_self(struct pt_regs *saved_regs)
123 cpu = smp_processor_id();
125 crash_setup_regs(®s, saved_regs);
127 crash_get_current_regs(®s);
128 crash_save_this_cpu(®s, cpu);
132 static atomic_t waiting_for_crash_ipi;
134 static int crash_nmi_callback(struct pt_regs *regs, int cpu)
136 struct pt_regs fixed_regs;
138 /* Don't do anything if this handler is invoked on crashing cpu.
139 * Otherwise, system will completely hang. Crashing cpu can get
140 * an NMI if system was initially booted with nmi_watchdog parameter.
142 if (cpu == crashing_cpu)
146 if (!user_mode(regs)) {
147 crash_setup_regs(&fixed_regs, regs);
150 crash_save_this_cpu(regs, cpu);
151 disable_local_APIC();
152 atomic_dec(&waiting_for_crash_ipi);
153 /* Assume hlt works */
161 * By using the NMI code instead of a vector we just sneak thru the
162 * word generator coming out with just what we want. AND it does
163 * not matter if clustered_apic_mode is set or not.
165 static void smp_send_nmi_allbutself(void)
167 send_IPI_allbutself(APIC_DM_NMI);
170 static void nmi_shootdown_cpus(void)
174 atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
175 /* Would it be better to replace the trap vector here? */
176 set_nmi_callback(crash_nmi_callback);
177 /* Ensure the new callback function is set before sending
182 smp_send_nmi_allbutself();
184 msecs = 1000; /* Wait at most a second for the other cpus to stop */
185 while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) {
190 /* Leave the nmi callback set */
191 disable_local_APIC();
194 static void nmi_shootdown_cpus(void)
196 /* There are no cpus to shootdown */
200 void machine_crash_shutdown(struct pt_regs *regs)
202 /* This function is only called after the system
203 * has paniced or is otherwise in a critical state.
204 * The minimum amount of code to allow a kexec'd kernel
205 * to run successfully needs to happen here.
207 * In practice this means shooting down the other cpus in
210 /* The kernel is broken so disable interrupts */
213 /* Make a note of crashing cpu. Will be used in NMI callback.*/
214 crashing_cpu = smp_processor_id();
215 nmi_shootdown_cpus();
217 #if defined(CONFIG_X86_IO_APIC)
220 crash_save_self(regs);