2 * machine_kexec.c - handle transition of Linux booting another kernel
3 * Copyright (C) 2002-2005 Eric Biederman <ebiederm@xmission.com>
5 * This source code is licensed under the GNU General Public License,
6 * Version 2. See the file COPYING for more details.
10 #include <linux/kexec.h>
11 #include <linux/string.h>
12 #include <linux/reboot.h>
13 #include <asm/pgtable.h>
14 #include <asm/tlbflush.h>
15 #include <asm/mmu_context.h>
18 static void init_level2_page(pmd_t *level2p, unsigned long addr)
20 unsigned long end_addr;
23 end_addr = addr + PUD_SIZE;
24 while (addr < end_addr) {
25 set_pmd(level2p++, __pmd(addr | __PAGE_KERNEL_LARGE_EXEC));
30 static int init_level3_page(struct kimage *image, pud_t *level3p,
31 unsigned long addr, unsigned long last_addr)
33 unsigned long end_addr;
38 end_addr = addr + PGDIR_SIZE;
39 while ((addr < last_addr) && (addr < end_addr)) {
43 page = kimage_alloc_control_pages(image, 0);
48 level2p = (pmd_t *)page_address(page);
49 init_level2_page(level2p, addr);
50 set_pud(level3p++, __pud(__pa(level2p) | _KERNPG_TABLE));
53 /* clear the unused entries */
54 while (addr < end_addr) {
63 static int init_level4_page(struct kimage *image, pgd_t *level4p,
64 unsigned long addr, unsigned long last_addr)
66 unsigned long end_addr;
71 end_addr = addr + (PTRS_PER_PGD * PGDIR_SIZE);
72 while ((addr < last_addr) && (addr < end_addr)) {
76 page = kimage_alloc_control_pages(image, 0);
81 level3p = (pud_t *)page_address(page);
82 result = init_level3_page(image, level3p, addr, last_addr);
86 set_pgd(level4p++, __pgd(__pa(level3p) | _KERNPG_TABLE));
89 /* clear the unused entries */
90 while (addr < end_addr) {
99 static int init_pgtable(struct kimage *image, unsigned long start_pgtable)
102 level4p = (pgd_t *)__va(start_pgtable);
103 return init_level4_page(image, level4p, 0, end_pfn << PAGE_SHIFT);
106 static void set_idt(void *newidt, u16 limit)
108 struct desc_ptr curidt;
110 /* x86-64 supports unaliged loads & stores */
112 curidt.address = (unsigned long)newidt;
114 __asm__ __volatile__ (
121 static void set_gdt(void *newgdt, u16 limit)
123 struct desc_ptr curgdt;
125 /* x86-64 supports unaligned loads & stores */
127 curgdt.address = (unsigned long)newgdt;
129 __asm__ __volatile__ (
135 static void load_segments(void)
137 __asm__ __volatile__ (
143 : : "a" (__KERNEL_DS) : "memory"
147 typedef NORET_TYPE void (*relocate_new_kernel_t)(unsigned long indirection_page,
148 unsigned long control_code_buffer,
149 unsigned long start_address,
150 unsigned long pgtable) ATTRIB_NORET;
152 extern const unsigned char relocate_new_kernel[];
153 extern const unsigned long relocate_new_kernel_size;
155 int machine_kexec_prepare(struct kimage *image)
157 unsigned long start_pgtable, control_code_buffer;
160 /* Calculate the offsets */
161 start_pgtable = page_to_pfn(image->control_code_page) << PAGE_SHIFT;
162 control_code_buffer = start_pgtable + PAGE_SIZE;
164 /* Setup the identity mapped 64bit page table */
165 result = init_pgtable(image, start_pgtable);
169 /* Place the code in the reboot code buffer */
170 memcpy(__va(control_code_buffer), relocate_new_kernel,
171 relocate_new_kernel_size);
176 void machine_kexec_cleanup(struct kimage *image)
182 * Do not allocate memory (or fail in any way) in machine_kexec().
183 * We are past the point of no return, committed to rebooting now.
185 NORET_TYPE void machine_kexec(struct kimage *image)
187 unsigned long page_list;
188 unsigned long control_code_buffer;
189 unsigned long start_pgtable;
190 relocate_new_kernel_t rnk;
192 /* Interrupts aren't acceptable while we reboot */
195 /* Calculate the offsets */
196 page_list = image->head;
197 start_pgtable = page_to_pfn(image->control_code_page) << PAGE_SHIFT;
198 control_code_buffer = start_pgtable + PAGE_SIZE;
200 /* Set the low half of the page table to my identity mapped
201 * page table for kexec. Leave the high half pointing at the
202 * kernel pages. Don't bother to flush the global pages
203 * as that will happen when I fully switch to my identity mapped
206 memcpy(__va(read_cr3()), __va(start_pgtable), PAGE_SIZE/2);
210 /* The segment registers are funny things, they have both a
211 * visible and an invisible part. Whenever the visible part is
212 * set to a specific selector, the invisible part is loaded
213 * with from a table in memory. At no other time is the
214 * descriptor table in memory accessed.
216 * I take advantage of this here by force loading the
217 * segments, before I zap the gdt with an invalid value.
220 /* The gdt & idt are now invalid.
221 * If you want to load them you must set up your own idt & gdt.
223 set_gdt(phys_to_virt(0),0);
224 set_idt(phys_to_virt(0),0);
226 rnk = (relocate_new_kernel_t) control_code_buffer;
227 (*rnk)(page_list, control_code_buffer, image->start, start_pgtable);