2 * handle transition of Linux booting another kernel
3 * Copyright (C) 2002-2005 Eric Biederman <ebiederm@xmission.com>
5 * This source code is licensed under the GNU General Public License,
6 * Version 2. See the file COPYING for more details.
10 #include <linux/kexec.h>
11 #include <linux/delay.h>
12 #include <linux/init.h>
13 #include <linux/numa.h>
14 #include <linux/ftrace.h>
15 #include <linux/suspend.h>
17 #include <asm/pgtable.h>
18 #include <asm/pgalloc.h>
19 #include <asm/tlbflush.h>
20 #include <asm/mmu_context.h>
23 #include <asm/cpufeature.h>
25 #include <asm/system.h>
26 #include <asm/cacheflush.h>
28 #define PAGE_ALIGNED __attribute__ ((__aligned__(PAGE_SIZE)))
29 static u32 kexec_pgd[1024] PAGE_ALIGNED;
31 static u32 kexec_pmd0[1024] PAGE_ALIGNED;
32 static u32 kexec_pmd1[1024] PAGE_ALIGNED;
34 static u32 kexec_pte0[1024] PAGE_ALIGNED;
35 static u32 kexec_pte1[1024] PAGE_ALIGNED;
37 static void set_idt(void *newidt, __u16 limit)
39 struct desc_ptr curidt;
41 /* ia32 supports unaliged loads & stores */
43 curidt.address = (unsigned long)newidt;
49 static void set_gdt(void *newgdt, __u16 limit)
51 struct desc_ptr curgdt;
53 /* ia32 supports unaligned loads & stores */
55 curgdt.address = (unsigned long)newgdt;
60 static void load_segments(void)
63 #define STR(X) __STR(X)
65 __asm__ __volatile__ (
66 "\tljmp $"STR(__KERNEL_CS)",$1f\n"
68 "\tmovl $"STR(__KERNEL_DS)",%%eax\n"
80 * A architecture hook called to validate the
81 * proposed image and prepare the control pages
82 * as needed. The pages for KEXEC_CONTROL_PAGE_SIZE
83 * have been allocated, but the segments have yet
84 * been copied into the kernel.
86 * Do what every setup is needed on image and the
87 * reboot code buffer to allow us to avoid allocations
90 * Make control page executable.
92 int machine_kexec_prepare(struct kimage *image)
95 set_pages_x(image->control_code_page, 1);
100 * Undo anything leftover by machine_kexec_prepare
101 * when an image is freed.
103 void machine_kexec_cleanup(struct kimage *image)
106 set_pages_nx(image->control_code_page, 1);
110 * Do not allocate memory (or fail in any way) in machine_kexec().
111 * We are past the point of no return, committed to rebooting now.
113 void machine_kexec(struct kimage *image)
115 unsigned long page_list[PAGES_NR];
117 int save_ftrace_enabled;
118 asmlinkage unsigned long
119 (*relocate_kernel_ptr)(unsigned long indirection_page,
120 unsigned long control_page,
121 unsigned long start_address,
122 unsigned int has_pae,
123 unsigned int preserve_context);
125 #ifdef CONFIG_KEXEC_JUMP
126 if (kexec_image->preserve_context)
127 save_processor_state();
130 save_ftrace_enabled = __ftrace_enabled_save();
132 /* Interrupts aren't acceptable while we reboot */
135 if (image->preserve_context) {
136 #ifdef CONFIG_X86_IO_APIC
137 /* We need to put APICs in legacy mode so that we can
138 * get timer interrupts in second kernel. kexec/kdump
139 * paths already have calls to disable_IO_APIC() in
140 * one form or other. kexec jump path also need
147 control_page = page_address(image->control_code_page);
148 memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
150 relocate_kernel_ptr = control_page;
151 page_list[PA_CONTROL_PAGE] = __pa(control_page);
152 page_list[VA_CONTROL_PAGE] = (unsigned long)control_page;
153 page_list[PA_PGD] = __pa(kexec_pgd);
154 page_list[VA_PGD] = (unsigned long)kexec_pgd;
155 #ifdef CONFIG_X86_PAE
156 page_list[PA_PMD_0] = __pa(kexec_pmd0);
157 page_list[VA_PMD_0] = (unsigned long)kexec_pmd0;
158 page_list[PA_PMD_1] = __pa(kexec_pmd1);
159 page_list[VA_PMD_1] = (unsigned long)kexec_pmd1;
161 page_list[PA_PTE_0] = __pa(kexec_pte0);
162 page_list[VA_PTE_0] = (unsigned long)kexec_pte0;
163 page_list[PA_PTE_1] = __pa(kexec_pte1);
164 page_list[VA_PTE_1] = (unsigned long)kexec_pte1;
165 page_list[PA_SWAP_PAGE] = (page_to_pfn(image->swap_page) << PAGE_SHIFT);
167 /* The segment registers are funny things, they have both a
168 * visible and an invisible part. Whenever the visible part is
169 * set to a specific selector, the invisible part is loaded
170 * with from a table in memory. At no other time is the
171 * descriptor table in memory accessed.
173 * I take advantage of this here by force loading the
174 * segments, before I zap the gdt with an invalid value.
177 /* The gdt & idt are now invalid.
178 * If you want to load them you must set up your own idt & gdt.
180 set_gdt(phys_to_virt(0),0);
181 set_idt(phys_to_virt(0),0);
184 image->start = relocate_kernel_ptr((unsigned long)image->head,
185 (unsigned long)page_list,
186 image->start, cpu_has_pae,
187 image->preserve_context);
189 #ifdef CONFIG_KEXEC_JUMP
190 if (kexec_image->preserve_context)
191 restore_processor_state();
194 __ftrace_enabled_restore(save_ftrace_enabled);
197 void arch_crash_save_vmcoreinfo(void)
200 VMCOREINFO_SYMBOL(node_data);
201 VMCOREINFO_LENGTH(node_data, MAX_NUMNODES);
203 #ifdef CONFIG_X86_PAE
204 VMCOREINFO_CONFIG(X86_PAE);