2 * machine_kexec.c - handle transition of Linux booting another kernel
3 * Copyright (C) 2002-2005 Eric Biederman <ebiederm@xmission.com>
5 * This source code is licensed under the GNU General Public License,
6 * Version 2. See the file COPYING for more details.
10 #include <linux/kexec.h>
11 #include <linux/delay.h>
12 #include <asm/pgtable.h>
13 #include <asm/pgalloc.h>
14 #include <asm/tlbflush.h>
15 #include <asm/mmu_context.h>
18 #include <asm/cpufeature.h>
20 #include <asm/system.h>
22 #define PAGE_ALIGNED __attribute__ ((__aligned__(PAGE_SIZE)))
24 #define L0_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
25 #define L1_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
26 #define L2_ATTR (_PAGE_PRESENT)
28 #define LEVEL0_SIZE (1UL << 12UL)
30 #ifndef CONFIG_X86_PAE
31 #define LEVEL1_SIZE (1UL << 22UL)
32 static u32 pgtable_level1[1024] PAGE_ALIGNED;
34 static void identity_map_page(unsigned long address)
36 unsigned long level1_index, level2_index;
39 /* Find the current page table */
40 pgtable_level2 = __va(read_cr3());
42 /* Find the indexes of the physical address to identity map */
43 level1_index = (address % LEVEL1_SIZE)/LEVEL0_SIZE;
44 level2_index = address / LEVEL1_SIZE;
46 /* Identity map the page table entry */
47 pgtable_level1[level1_index] = address | L0_ATTR;
48 pgtable_level2[level2_index] = __pa(pgtable_level1) | L1_ATTR;
50 /* Flush the tlb so the new mapping takes effect.
51 * Global tlb entries are not flushed but that is not an issue.
53 load_cr3(pgtable_level2);
57 #define LEVEL1_SIZE (1UL << 21UL)
58 #define LEVEL2_SIZE (1UL << 30UL)
59 static u64 pgtable_level1[512] PAGE_ALIGNED;
60 static u64 pgtable_level2[512] PAGE_ALIGNED;
62 static void identity_map_page(unsigned long address)
64 unsigned long level1_index, level2_index, level3_index;
67 /* Find the current page table */
68 pgtable_level3 = __va(read_cr3());
70 /* Find the indexes of the physical address to identity map */
71 level1_index = (address % LEVEL1_SIZE)/LEVEL0_SIZE;
72 level2_index = (address % LEVEL2_SIZE)/LEVEL1_SIZE;
73 level3_index = address / LEVEL2_SIZE;
75 /* Identity map the page table entry */
76 pgtable_level1[level1_index] = address | L0_ATTR;
77 pgtable_level2[level2_index] = __pa(pgtable_level1) | L1_ATTR;
78 set_64bit(&pgtable_level3[level3_index],
79 __pa(pgtable_level2) | L2_ATTR);
81 /* Flush the tlb so the new mapping takes effect.
82 * Global tlb entries are not flushed but that is not an issue.
84 load_cr3(pgtable_level3);
88 static void set_idt(void *newidt, __u16 limit)
90 struct Xgt_desc_struct curidt;
92 /* ia32 supports unaliged loads & stores */
94 curidt.address = (unsigned long)newidt;
100 static void set_gdt(void *newgdt, __u16 limit)
102 struct Xgt_desc_struct curgdt;
104 /* ia32 supports unaligned loads & stores */
106 curgdt.address = (unsigned long)newgdt;
111 static void load_segments(void)
114 #define STR(X) __STR(X)
116 __asm__ __volatile__ (
117 "\tljmp $"STR(__KERNEL_CS)",$1f\n"
119 "\tmovl $"STR(__KERNEL_DS)",%%eax\n"
120 "\tmovl %%eax,%%ds\n"
121 "\tmovl %%eax,%%es\n"
122 "\tmovl %%eax,%%fs\n"
123 "\tmovl %%eax,%%gs\n"
124 "\tmovl %%eax,%%ss\n"
125 ::: "eax", "memory");
130 typedef asmlinkage NORET_TYPE void (*relocate_new_kernel_t)(
131 unsigned long indirection_page,
132 unsigned long reboot_code_buffer,
133 unsigned long start_address,
134 unsigned int has_pae) ATTRIB_NORET;
136 extern const unsigned char relocate_new_kernel[];
137 extern void relocate_new_kernel_end(void);
138 extern const unsigned int relocate_new_kernel_size;
141 * A architecture hook called to validate the
142 * proposed image and prepare the control pages
143 * as needed. The pages for KEXEC_CONTROL_CODE_SIZE
144 * have been allocated, but the segments have yet
145 * been copied into the kernel.
147 * Do what every setup is needed on image and the
148 * reboot code buffer to allow us to avoid allocations
153 int machine_kexec_prepare(struct kimage *image)
159 * Undo anything leftover by machine_kexec_prepare
160 * when an image is freed.
162 void machine_kexec_cleanup(struct kimage *image)
167 * Do not allocate memory (or fail in any way) in machine_kexec().
168 * We are past the point of no return, committed to rebooting now.
170 NORET_TYPE void machine_kexec(struct kimage *image)
172 unsigned long page_list;
173 unsigned long reboot_code_buffer;
175 relocate_new_kernel_t rnk;
177 /* Interrupts aren't acceptable while we reboot */
180 /* Compute some offsets */
181 reboot_code_buffer = page_to_pfn(image->control_code_page)
183 page_list = image->head;
185 /* Set up an identity mapping for the reboot_code_buffer */
186 identity_map_page(reboot_code_buffer);
189 memcpy((void *)reboot_code_buffer, relocate_new_kernel,
190 relocate_new_kernel_size);
192 /* The segment registers are funny things, they are
193 * automatically loaded from a table, in memory wherever you
194 * set them to a specific selector, but this table is never
195 * accessed again you set the segment to a different selector.
197 * The more common model is are caches where the behide
198 * the scenes work is done, but is also dropped at arbitrary
201 * I take advantage of this here by force loading the
202 * segments, before I zap the gdt with an invalid value.
205 /* The gdt & idt are now invalid.
206 * If you want to load them you must set up your own idt & gdt.
208 set_gdt(phys_to_virt(0),0);
209 set_idt(phys_to_virt(0),0);
212 rnk = (relocate_new_kernel_t) reboot_code_buffer;
213 (*rnk)(page_list, reboot_code_buffer, image->start, cpu_has_pae);