2 * handle transition of Linux booting another kernel
3 * Copyright (C) 2002-2005 Eric Biederman <ebiederm@xmission.com>
5 * This source code is licensed under the GNU General Public License,
6 * Version 2. See the file COPYING for more details.
10 #include <linux/kexec.h>
11 #include <linux/delay.h>
12 #include <linux/init.h>
13 #include <linux/numa.h>
14 #include <asm/pgtable.h>
15 #include <asm/pgalloc.h>
16 #include <asm/tlbflush.h>
17 #include <asm/mmu_context.h>
20 #include <asm/cpufeature.h>
22 #include <asm/system.h>
24 #define PAGE_ALIGNED __attribute__ ((__aligned__(PAGE_SIZE)))
25 static u32 kexec_pgd[1024] PAGE_ALIGNED;
27 static u32 kexec_pmd0[1024] PAGE_ALIGNED;
28 static u32 kexec_pmd1[1024] PAGE_ALIGNED;
30 static u32 kexec_pte0[1024] PAGE_ALIGNED;
31 static u32 kexec_pte1[1024] PAGE_ALIGNED;
33 static void set_idt(void *newidt, __u16 limit)
35 struct desc_ptr curidt;
37 /* ia32 supports unaliged loads & stores */
39 curidt.address = (unsigned long)newidt;
45 static void set_gdt(void *newgdt, __u16 limit)
47 struct desc_ptr curgdt;
49 /* ia32 supports unaligned loads & stores */
51 curgdt.address = (unsigned long)newgdt;
56 static void load_segments(void)
59 #define STR(X) __STR(X)
61 __asm__ __volatile__ (
62 "\tljmp $"STR(__KERNEL_CS)",$1f\n"
64 "\tmovl $"STR(__KERNEL_DS)",%%eax\n"
76 * A architecture hook called to validate the
77 * proposed image and prepare the control pages
78 * as needed. The pages for KEXEC_CONTROL_CODE_SIZE
79 * have been allocated, but the segments have yet
80 * been copied into the kernel.
82 * Do what every setup is needed on image and the
83 * reboot code buffer to allow us to avoid allocations
88 int machine_kexec_prepare(struct kimage *image)
94 * Undo anything leftover by machine_kexec_prepare
95 * when an image is freed.
97 void machine_kexec_cleanup(struct kimage *image)
102 * Do not allocate memory (or fail in any way) in machine_kexec().
103 * We are past the point of no return, committed to rebooting now.
105 NORET_TYPE void machine_kexec(struct kimage *image)
107 unsigned long page_list[PAGES_NR];
110 /* Interrupts aren't acceptable while we reboot */
113 control_page = page_address(image->control_code_page);
114 memcpy(control_page, relocate_kernel, PAGE_SIZE);
116 page_list[PA_CONTROL_PAGE] = __pa(control_page);
117 page_list[VA_CONTROL_PAGE] = (unsigned long)relocate_kernel;
118 page_list[PA_PGD] = __pa(kexec_pgd);
119 page_list[VA_PGD] = (unsigned long)kexec_pgd;
120 #ifdef CONFIG_X86_PAE
121 page_list[PA_PMD_0] = __pa(kexec_pmd0);
122 page_list[VA_PMD_0] = (unsigned long)kexec_pmd0;
123 page_list[PA_PMD_1] = __pa(kexec_pmd1);
124 page_list[VA_PMD_1] = (unsigned long)kexec_pmd1;
126 page_list[PA_PTE_0] = __pa(kexec_pte0);
127 page_list[VA_PTE_0] = (unsigned long)kexec_pte0;
128 page_list[PA_PTE_1] = __pa(kexec_pte1);
129 page_list[VA_PTE_1] = (unsigned long)kexec_pte1;
131 /* The segment registers are funny things, they have both a
132 * visible and an invisible part. Whenever the visible part is
133 * set to a specific selector, the invisible part is loaded
134 * with from a table in memory. At no other time is the
135 * descriptor table in memory accessed.
137 * I take advantage of this here by force loading the
138 * segments, before I zap the gdt with an invalid value.
141 /* The gdt & idt are now invalid.
142 * If you want to load them you must set up your own idt & gdt.
144 set_gdt(phys_to_virt(0),0);
145 set_idt(phys_to_virt(0),0);
148 relocate_kernel((unsigned long)image->head, (unsigned long)page_list,
149 image->start, cpu_has_pae);
152 void arch_crash_save_vmcoreinfo(void)
155 VMCOREINFO_SYMBOL(node_data);
156 VMCOREINFO_LENGTH(node_data, MAX_NUMNODES);
158 #ifdef CONFIG_X86_PAE
159 VMCOREINFO_CONFIG(X86_PAE);