2 * Architecture specific (i386) functions for kexec based crash dumps.
4 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
6 * Copyright (C) IBM Corporation, 2004. All rights reserved.
10 #include <linux/init.h>
11 #include <linux/types.h>
12 #include <linux/kernel.h>
13 #include <linux/smp.h>
14 #include <linux/reboot.h>
15 #include <linux/kexec.h>
16 #include <linux/delay.h>
17 #include <linux/elf.h>
18 #include <linux/elfcore.h>
20 #include <asm/processor.h>
21 #include <asm/hardirq.h>
23 #include <asm/hw_irq.h>
27 note_buf_t crash_notes[NR_CPUS];
28 /* This keeps a track of which one is crashing cpu. */
29 static int crashing_cpu;
31 static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data,
36 note.n_namesz = strlen(name) + 1;
37 note.n_descsz = data_len;
39 memcpy(buf, ¬e, sizeof(note));
40 buf += (sizeof(note) +3)/4;
41 memcpy(buf, name, note.n_namesz);
42 buf += (note.n_namesz + 3)/4;
43 memcpy(buf, data, note.n_descsz);
44 buf += (note.n_descsz + 3)/4;
49 static void final_note(u32 *buf)
56 memcpy(buf, ¬e, sizeof(note));
59 static void crash_save_this_cpu(struct pt_regs *regs, int cpu)
61 struct elf_prstatus prstatus;
64 if ((cpu < 0) || (cpu >= NR_CPUS))
67 /* Using ELF notes here is opportunistic.
68 * I need a well defined structure format
69 * for the data I pass, and I need tags
70 * on the data to indicate what information I have
71 * squirrelled away. ELF notes happen to provide
72 * all of that that no need to invent something new.
74 buf = &crash_notes[cpu][0];
75 memset(&prstatus, 0, sizeof(prstatus));
76 prstatus.pr_pid = current->pid;
77 elf_core_copy_regs(&prstatus.pr_reg, regs);
78 buf = append_elf_note(buf, "CORE", NT_PRSTATUS, &prstatus,
83 static void crash_get_current_regs(struct pt_regs *regs)
85 __asm__ __volatile__("movl %%ebx,%0" : "=m"(regs->ebx));
86 __asm__ __volatile__("movl %%ecx,%0" : "=m"(regs->ecx));
87 __asm__ __volatile__("movl %%edx,%0" : "=m"(regs->edx));
88 __asm__ __volatile__("movl %%esi,%0" : "=m"(regs->esi));
89 __asm__ __volatile__("movl %%edi,%0" : "=m"(regs->edi));
90 __asm__ __volatile__("movl %%ebp,%0" : "=m"(regs->ebp));
91 __asm__ __volatile__("movl %%eax,%0" : "=m"(regs->eax));
92 __asm__ __volatile__("movl %%esp,%0" : "=m"(regs->esp));
93 __asm__ __volatile__("movw %%ss, %%ax;" :"=a"(regs->xss));
94 __asm__ __volatile__("movw %%cs, %%ax;" :"=a"(regs->xcs));
95 __asm__ __volatile__("movw %%ds, %%ax;" :"=a"(regs->xds));
96 __asm__ __volatile__("movw %%es, %%ax;" :"=a"(regs->xes));
97 __asm__ __volatile__("pushfl; popl %0" :"=m"(regs->eflags));
99 regs->eip = (unsigned long)current_text_addr();
102 /* CPU does not save ss and esp on stack if execution is already
103 * running in kernel mode at the time of NMI occurrence. This code
106 static void crash_setup_regs(struct pt_regs *newregs, struct pt_regs *oldregs)
108 memcpy(newregs, oldregs, sizeof(*newregs));
109 newregs->esp = (unsigned long)&(oldregs->esp);
110 __asm__ __volatile__("xorl %eax, %eax;");
111 __asm__ __volatile__ ("movw %%ss, %%ax;" :"=a"(newregs->xss));
114 /* We may have saved_regs from where the error came from
115 * or it is NULL if via a direct panic().
117 static void crash_save_self(struct pt_regs *saved_regs)
122 cpu = smp_processor_id();
124 crash_setup_regs(®s, saved_regs);
126 crash_get_current_regs(®s);
127 crash_save_this_cpu(®s, cpu);
131 static atomic_t waiting_for_crash_ipi;
133 static int crash_nmi_callback(struct pt_regs *regs, int cpu)
135 struct pt_regs fixed_regs;
137 /* Don't do anything if this handler is invoked on crashing cpu.
138 * Otherwise, system will completely hang. Crashing cpu can get
139 * an NMI if system was initially booted with nmi_watchdog parameter.
141 if (cpu == crashing_cpu)
145 if (!user_mode(regs)) {
146 crash_setup_regs(&fixed_regs, regs);
149 crash_save_this_cpu(regs, cpu);
150 atomic_dec(&waiting_for_crash_ipi);
151 /* Assume hlt works */
159 * By using the NMI code instead of a vector we just sneak thru the
160 * word generator coming out with just what we want. AND it does
161 * not matter if clustered_apic_mode is set or not.
163 static void smp_send_nmi_allbutself(void)
165 send_IPI_allbutself(APIC_DM_NMI);
168 static void nmi_shootdown_cpus(void)
172 atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
173 /* Would it be better to replace the trap vector here? */
174 set_nmi_callback(crash_nmi_callback);
175 /* Ensure the new callback function is set before sending
180 smp_send_nmi_allbutself();
182 msecs = 1000; /* Wait at most a second for the other cpus to stop */
183 while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) {
188 /* Leave the nmi callback set */
191 static void nmi_shootdown_cpus(void)
193 /* There are no cpus to shootdown */
197 void machine_crash_shutdown(struct pt_regs *regs)
199 /* This function is only called after the system
200 * has paniced or is otherwise in a critical state.
201 * The minimum amount of code to allow a kexec'd kernel
202 * to run successfully needs to happen here.
204 * In practice this means shooting down the other cpus in
207 /* The kernel is broken so disable interrupts */
210 /* Make a note of crashing cpu. Will be used in NMI callback.*/
211 crashing_cpu = smp_processor_id();
212 nmi_shootdown_cpus();
213 crash_save_self(regs);