2 * machine_kexec.c - handle transition of Linux booting another kernel
3 * Copyright (C) 2002-2005 Eric Biederman <ebiederm@xmission.com>
5 * This source code is licensed under the GNU General Public License,
6 * Version 2. See the file COPYING for more details.
10 #include <linux/kexec.h>
11 #include <linux/delay.h>
12 #include <asm/pgtable.h>
13 #include <asm/pgalloc.h>
14 #include <asm/tlbflush.h>
15 #include <asm/mmu_context.h>
18 #include <asm/cpufeature.h>
20 static inline unsigned long read_cr3(void)
23 asm volatile("movl %%cr3,%0": "=r"(cr3));
27 #define PAGE_ALIGNED __attribute__ ((__aligned__(PAGE_SIZE)))
29 #define L0_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
30 #define L1_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
31 #define L2_ATTR (_PAGE_PRESENT)
33 #define LEVEL0_SIZE (1UL << 12UL)
35 #ifndef CONFIG_X86_PAE
36 #define LEVEL1_SIZE (1UL << 22UL)
37 static u32 pgtable_level1[1024] PAGE_ALIGNED;
39 static void identity_map_page(unsigned long address)
41 unsigned long level1_index, level2_index;
44 /* Find the current page table */
45 pgtable_level2 = __va(read_cr3());
47 /* Find the indexes of the physical address to identity map */
48 level1_index = (address % LEVEL1_SIZE)/LEVEL0_SIZE;
49 level2_index = address / LEVEL1_SIZE;
51 /* Identity map the page table entry */
52 pgtable_level1[level1_index] = address | L0_ATTR;
53 pgtable_level2[level2_index] = __pa(pgtable_level1) | L1_ATTR;
55 /* Flush the tlb so the new mapping takes effect.
56 * Global tlb entries are not flushed but that is not an issue.
58 load_cr3(pgtable_level2);
62 #define LEVEL1_SIZE (1UL << 21UL)
63 #define LEVEL2_SIZE (1UL << 30UL)
64 static u64 pgtable_level1[512] PAGE_ALIGNED;
65 static u64 pgtable_level2[512] PAGE_ALIGNED;
67 static void identity_map_page(unsigned long address)
69 unsigned long level1_index, level2_index, level3_index;
72 /* Find the current page table */
73 pgtable_level3 = __va(read_cr3());
75 /* Find the indexes of the physical address to identity map */
76 level1_index = (address % LEVEL1_SIZE)/LEVEL0_SIZE;
77 level2_index = (address % LEVEL2_SIZE)/LEVEL1_SIZE;
78 level3_index = address / LEVEL2_SIZE;
80 /* Identity map the page table entry */
81 pgtable_level1[level1_index] = address | L0_ATTR;
82 pgtable_level2[level2_index] = __pa(pgtable_level1) | L1_ATTR;
83 set_64bit(&pgtable_level3[level3_index],
84 __pa(pgtable_level2) | L2_ATTR);
86 /* Flush the tlb so the new mapping takes effect.
87 * Global tlb entries are not flushed but that is not an issue.
89 load_cr3(pgtable_level3);
94 static void set_idt(void *newidt, __u16 limit)
96 unsigned char curidt[6];
98 /* ia32 supports unaliged loads & stores */
99 (*(__u16 *)(curidt)) = limit;
100 (*(__u32 *)(curidt +2)) = (unsigned long)(newidt);
102 __asm__ __volatile__ (
109 static void set_gdt(void *newgdt, __u16 limit)
111 unsigned char curgdt[6];
113 /* ia32 supports unaligned loads & stores */
114 (*(__u16 *)(curgdt)) = limit;
115 (*(__u32 *)(curgdt +2)) = (unsigned long)(newgdt);
117 __asm__ __volatile__ (
123 static void load_segments(void)
126 #define STR(X) __STR(X)
128 __asm__ __volatile__ (
129 "\tljmp $"STR(__KERNEL_CS)",$1f\n"
131 "\tmovl $"STR(__KERNEL_DS)",%eax\n"
142 typedef asmlinkage NORET_TYPE void (*relocate_new_kernel_t)(
143 unsigned long indirection_page,
144 unsigned long reboot_code_buffer,
145 unsigned long start_address,
146 unsigned int has_pae) ATTRIB_NORET;
148 const extern unsigned char relocate_new_kernel[];
149 extern void relocate_new_kernel_end(void);
150 const extern unsigned int relocate_new_kernel_size;
153 * A architecture hook called to validate the
154 * proposed image and prepare the control pages
155 * as needed. The pages for KEXEC_CONTROL_CODE_SIZE
156 * have been allocated, but the segments have yet
157 * been copied into the kernel.
159 * Do what every setup is needed on image and the
160 * reboot code buffer to allow us to avoid allocations
165 int machine_kexec_prepare(struct kimage *image)
171 * Undo anything leftover by machine_kexec_prepare
172 * when an image is freed.
174 void machine_kexec_cleanup(struct kimage *image)
179 * Do not allocate memory (or fail in any way) in machine_kexec().
180 * We are past the point of no return, committed to rebooting now.
182 NORET_TYPE void machine_kexec(struct kimage *image)
184 unsigned long page_list;
185 unsigned long reboot_code_buffer;
187 relocate_new_kernel_t rnk;
189 /* Interrupts aren't acceptable while we reboot */
192 /* Compute some offsets */
193 reboot_code_buffer = page_to_pfn(image->control_code_page)
195 page_list = image->head;
197 /* Set up an identity mapping for the reboot_code_buffer */
198 identity_map_page(reboot_code_buffer);
201 memcpy((void *)reboot_code_buffer, relocate_new_kernel,
202 relocate_new_kernel_size);
204 /* The segment registers are funny things, they are
205 * automatically loaded from a table, in memory wherever you
206 * set them to a specific selector, but this table is never
207 * accessed again you set the segment to a different selector.
209 * The more common model is are caches where the behide
210 * the scenes work is done, but is also dropped at arbitrary
213 * I take advantage of this here by force loading the
214 * segments, before I zap the gdt with an invalid value.
217 /* The gdt & idt are now invalid.
218 * If you want to load them you must set up your own idt & gdt.
220 set_gdt(phys_to_virt(0),0);
221 set_idt(phys_to_virt(0),0);
224 rnk = (relocate_new_kernel_t) reboot_code_buffer;
225 (*rnk)(page_list, reboot_code_buffer, image->start, cpu_has_pae);