2 * linux/arch/x86_64/kernel/head.S -- start in 32bit and switch to 64bit
4 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
6 * Copyright (C) 2000 Karsten Keil <kkeil@suse.de>
7 * Copyright (C) 2001,2002 Andi Kleen <ak@suse.de>
8 * Copyright (C) 2005 Eric Biederman <ebiederm@xmission.com>
12 #include <linux/linkage.h>
13 #include <linux/threads.h>
14 #include <linux/init.h>
16 #include <asm/segment.h>
17 #include <asm/pgtable.h>
20 #include <asm/cache.h>
21 #include <asm/processor-flags.h>
22 #include <asm/percpu.h>
24 #ifdef CONFIG_PARAVIRT
25 #include <asm/asm-offsets.h>
26 #include <asm/paravirt.h>
28 #define GET_CR2_INTO_RCX movq %cr2, %rcx
31 /* we are not able to switch in one step to the final KERNEL ADRESS SPACE
32 * because we need identity-mapped pages.
36 #define pud_index(x) (((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
38 L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
39 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
40 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
41 L3_START_KERNEL = pud_index(__START_KERNEL_map)
50 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 1,
51 * and someone has loaded an identity mapped page table
52 * for us. These identity mapped page tables map all of the
53 * kernel pages and possibly all of memory.
55 * %esi holds a physical pointer to real_mode_data.
57 * We come here either directly from a 64bit bootloader, or from
58 * arch/x86_64/boot/compressed/head.S.
60 * We only come here initially at boot nothing else comes here.
62 * Since we may be loaded at an address different from what we were
63 * compiled to run at we first fixup the physical addresses in our page
64 * tables and then reload them.
67 /* Compute the delta between the address I am compiled to run at and the
68 * address I am actually running at.
70 leaq _text(%rip), %rbp
71 subq $_text - __START_KERNEL_map, %rbp
73 /* Is the address not 2M aligned? */
75 andl $~PMD_PAGE_MASK, %eax
79 /* Is the address too large? */
80 leaq _text(%rip), %rdx
81 movq $PGDIR_SIZE, %rax
85 /* Fixup the physical addresses in the page table
87 addq %rbp, init_level4_pgt + 0(%rip)
88 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
89 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
91 addq %rbp, level3_ident_pgt + 0(%rip)
93 addq %rbp, level3_kernel_pgt + (510*8)(%rip)
94 addq %rbp, level3_kernel_pgt + (511*8)(%rip)
96 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
98 /* Add an Identity mapping if I am above 1G */
99 leaq _text(%rip), %rdi
100 andq $PMD_PAGE_MASK, %rdi
103 shrq $PUD_SHIFT, %rax
104 andq $(PTRS_PER_PUD - 1), %rax
107 leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
108 leaq level3_ident_pgt(%rip), %rbx
109 movq %rdx, 0(%rbx, %rax, 8)
112 shrq $PMD_SHIFT, %rax
113 andq $(PTRS_PER_PMD - 1), %rax
114 leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
115 leaq level2_spare_pgt(%rip), %rbx
116 movq %rdx, 0(%rbx, %rax, 8)
120 * Fixup the kernel text+data virtual addresses. Note that
121 * we might write invalid pmds, when the kernel is relocated
122 * cleanup_highmap() fixes this up along with the mappings
126 leaq level2_kernel_pgt(%rip), %rdi
128 /* See if it is a valid page table entry */
132 /* Go to the next page */
137 /* Fixup phys_base */
138 addq %rbp, phys_base(%rip)
140 #ifdef CONFIG_X86_TRAMPOLINE
141 addq %rbp, trampoline_level4_pgt + 0(%rip)
142 addq %rbp, trampoline_level4_pgt + (511*8)(%rip)
145 /* Due to ENTRY(), sometimes the empty space gets filled with
146 * zeros. Better take a jmp than relying on empty space being
147 * filled with 0x90 (nop)
149 jmp secondary_startup_64
150 ENTRY(secondary_startup_64)
152 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 1,
153 * and someone has loaded a mapped page table.
155 * %esi holds a physical pointer to real_mode_data.
157 * We come here either from startup_64 (using physical addresses)
158 * or from trampoline.S (using virtual addresses).
160 * Using virtual addresses from trampoline.S removes the need
161 * to have any identity mapped pages in the kernel page table
162 * after the boot processor executes this code.
165 /* Enable PAE mode and PGE */
166 movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
169 /* Setup early boot stage 4 level pagetables. */
170 movq $(init_level4_pgt - __START_KERNEL_map), %rax
171 addq phys_base(%rip), %rax
174 /* Ensure I am executing from virtual addresses */
179 /* Check if nx is implemented */
180 movl $0x80000001, %eax
184 /* Setup EFER (Extended Feature Enable Register) */
187 btsl $_EFER_SCE, %eax /* Enable System Call */
188 btl $20,%edi /* No Execute supported? */
191 1: wrmsr /* Make changes effective */
194 #define CR0_STATE (X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | \
195 X86_CR0_NE | X86_CR0_WP | X86_CR0_AM | \
197 movl $CR0_STATE, %eax
198 /* Make changes effective */
201 /* Setup a boot time stack */
202 movq stack_start(%rip),%rsp
204 /* zero EFLAGS after setting rsp */
210 * Fix up static pointers that need __per_cpu_load added. The assembler
211 * is unable to do this directly. This is only needed for the boot cpu.
212 * These values are set up with the correct base addresses by C code for
215 movq initial_gs(%rip), %rax
216 cmpl $0, per_cpu__cpu_number(%rax)
218 addq %rax, early_gdt_descr_base(%rip)
222 * We must switch to a new descriptor in kernel space for the GDT
223 * because soon the kernel won't have access anymore to the userspace
224 * addresses where we're currently running on. We have to do that here
225 * because in 32bit we couldn't load a 64bit linear address.
227 lgdt early_gdt_descr(%rip)
229 /* set up data segments. actually 0 would do too */
230 movl $__KERNEL_DS,%eax
236 * We don't really need to load %fs or %gs, but load them anyway
237 * to kill any stale realmode selectors. This allows execution
245 * The base of %gs always points to the bottom of the irqstack
246 * union. If the stack protector canary is enabled, it is
247 * located at %gs:40. Note that, on SMP, the boot cpu uses
248 * init data section till per cpu areas are set up.
250 movl $MSR_GS_BASE,%ecx
251 movq initial_gs(%rip),%rax
256 /* esi is pointer to real mode structure with interesting info.
260 /* Finally jump to run C code and to be on real kernel address
261 * Since we are running on identity-mapped space we have to jump
262 * to the full 64bit address, this is only possible as indirect
263 * jump. In addition we need to ensure %cs is set so we make this
266 movq initial_code(%rip),%rax
267 pushq $0 # fake return address to stop unwinder
268 pushq $__KERNEL_CS # set correct cs
269 pushq %rax # target address in negative space
272 /* SMP bootup changes these two */
276 .quad x86_64_start_kernel
281 .quad PER_CPU_VAR(irq_stack_union)
286 .quad init_thread_union+THREAD_SIZE-8
292 .section ".init.text","ax"
293 #ifdef CONFIG_EARLY_PRINTK
294 .globl early_idt_handlers
297 .rept NUM_EXCEPTION_VECTORS
299 jmp early_idt_handler
304 ENTRY(early_idt_handler)
305 #ifdef CONFIG_EARLY_PRINTK
306 cmpl $2,early_recursion_flag(%rip)
308 incl early_recursion_flag(%rip)
311 xorl %r8d,%r8d # zero for error code
312 movl %esi,%ecx # get vector number
313 # Test %ecx against mask of vectors that push error code.
320 popq %r8 # get error code
321 0: movq 0(%rsp),%rcx # get ip
322 movq 8(%rsp),%rdx # get cs
324 leaq early_idt_msg(%rip),%rdi
326 cmpl $2,early_recursion_flag(%rip)
329 #ifdef CONFIG_KALLSYMS
330 leaq early_idt_ripmsg(%rip),%rdi
331 movq 0(%rsp),%rsi # get rip again
334 #endif /* EARLY_PRINTK */
338 #ifdef CONFIG_EARLY_PRINTK
339 early_recursion_flag:
343 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
346 #endif /* CONFIG_EARLY_PRINTK */
351 #define NEXT_PAGE(name) \
355 /* Automate the creation of 1 to 1 mapping pmd entries */
356 #define PMDS(START, PERM, COUNT) \
359 .quad (START) + (i << PMD_SHIFT) + (PERM) ; \
364 * This default setting generates an ident mapping at address 0x100000
365 * and a mapping for the kernel that precisely maps virtual address
366 * 0xffffffff80000000 to physical address 0x000000. (always using
367 * 2Mbyte large pages provided by PAE mode)
369 NEXT_PAGE(init_level4_pgt)
370 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
371 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
372 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
373 .org init_level4_pgt + L4_START_KERNEL*8, 0
374 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
375 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
377 NEXT_PAGE(level3_ident_pgt)
378 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
381 NEXT_PAGE(level3_kernel_pgt)
382 .fill L3_START_KERNEL,8,0
383 /* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */
384 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
385 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
387 NEXT_PAGE(level2_fixmap_pgt)
389 .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
390 /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
393 NEXT_PAGE(level1_fixmap_pgt)
396 NEXT_PAGE(level2_ident_pgt)
397 /* Since I easily can, map the first 1G.
398 * Don't set NX because code runs from these pages.
400 PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
402 NEXT_PAGE(level2_kernel_pgt)
404 * 512 MB kernel mapping. We spend a full page on this pagetable
407 * The kernel code+data+bss must not be bigger than that.
409 * (NOTE: at +512MB starts the module area, see MODULES_VADDR.
410 * If you want to increase this then increase MODULES_VADDR
413 PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
414 KERNEL_IMAGE_SIZE/PMD_SIZE)
416 NEXT_PAGE(level2_spare_pgt)
424 .globl early_gdt_descr
426 .word GDT_ENTRIES*8-1
427 early_gdt_descr_base:
428 .quad per_cpu__gdt_page
431 /* This must match the first entry in level2_kernel_pgt */
432 .quad 0x0000000000000000
434 #include "../../x86/xen/xen-head.S"
436 .section .bss, "aw", @nobits
437 .align L1_CACHE_BYTES
441 .section .bss.page_aligned, "aw", @nobits
443 ENTRY(empty_zero_page)