2 * linux/arch/x86_64/kernel/head.S -- start in 32bit and switch to 64bit
4 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
6 * Copyright (C) 2000 Karsten Keil <kkeil@suse.de>
7 * Copyright (C) 2001,2002 Andi Kleen <ak@suse.de>
8 * Copyright (C) 2005 Eric Biederman <ebiederm@xmission.com>
12 #include <linux/linkage.h>
13 #include <linux/threads.h>
14 #include <linux/init.h>
16 #include <asm/segment.h>
17 #include <asm/pgtable.h>
20 #include <asm/cache.h>
22 #ifdef CONFIG_PARAVIRT
23 #include <asm/asm-offsets.h>
24 #include <asm/paravirt.h>
26 #define GET_CR2_INTO_RCX movq %cr2, %rcx
29 /* we are not able to switch in one step to the final KERNEL ADRESS SPACE
30 * because we need identity-mapped pages.
41 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 1,
42 * and someone has loaded an identity mapped page table
43 * for us. These identity mapped page tables map all of the
44 * kernel pages and possibly all of memory.
46 * %esi holds a physical pointer to real_mode_data.
48 * We come here either directly from a 64bit bootloader, or from
49 * arch/x86_64/boot/compressed/head.S.
51 * We only come here initially at boot nothing else comes here.
53 * Since we may be loaded at an address different from what we were
54 * compiled to run at we first fixup the physical addresses in our page
55 * tables and then reload them.
58 /* Compute the delta between the address I am compiled to run at and the
59 * address I am actually running at.
61 leaq _text(%rip), %rbp
62 subq $_text - __START_KERNEL_map, %rbp
64 /* Is the address not 2M aligned? */
66 andl $~PMD_PAGE_MASK, %eax
70 /* Is the address too large? */
71 leaq _text(%rip), %rdx
72 movq $PGDIR_SIZE, %rax
76 /* Fixup the physical addresses in the page table
78 addq %rbp, init_level4_pgt + 0(%rip)
79 addq %rbp, init_level4_pgt + (258*8)(%rip)
80 addq %rbp, init_level4_pgt + (511*8)(%rip)
82 addq %rbp, level3_ident_pgt + 0(%rip)
84 addq %rbp, level3_kernel_pgt + (510*8)(%rip)
85 addq %rbp, level3_kernel_pgt + (511*8)(%rip)
87 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
89 /* Add an Identity mapping if I am above 1G */
90 leaq _text(%rip), %rdi
91 andq $PMD_PAGE_MASK, %rdi
95 andq $(PTRS_PER_PUD - 1), %rax
98 leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
99 leaq level3_ident_pgt(%rip), %rbx
100 movq %rdx, 0(%rbx, %rax, 8)
103 shrq $PMD_SHIFT, %rax
104 andq $(PTRS_PER_PMD - 1), %rax
105 leaq __PAGE_KERNEL_LARGE_EXEC(%rdi), %rdx
106 leaq level2_spare_pgt(%rip), %rbx
107 movq %rdx, 0(%rbx, %rax, 8)
111 * Fixup the kernel text+data virtual addresses. Note that
112 * we might write invalid pmds, when the kernel is relocated
113 * cleanup_highmap() fixes this up along with the mappings
117 leaq level2_kernel_pgt(%rip), %rdi
119 /* See if it is a valid page table entry */
123 /* Go to the next page */
128 /* Fixup phys_base */
129 addq %rbp, phys_base(%rip)
131 #ifdef CONFIG_X86_TRAMPOLINE
132 addq %rbp, trampoline_level4_pgt + 0(%rip)
133 addq %rbp, trampoline_level4_pgt + (511*8)(%rip)
136 /* Due to ENTRY(), sometimes the empty space gets filled with
137 * zeros. Better take a jmp than relying on empty space being
138 * filled with 0x90 (nop)
140 jmp secondary_startup_64
141 ENTRY(secondary_startup_64)
143 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 1,
144 * and someone has loaded a mapped page table.
146 * %esi holds a physical pointer to real_mode_data.
148 * We come here either from startup_64 (using physical addresses)
149 * or from trampoline.S (using virtual addresses).
151 * Using virtual addresses from trampoline.S removes the need
152 * to have any identity mapped pages in the kernel page table
153 * after the boot processor executes this code.
156 /* Enable PAE mode and PGE */
162 /* Setup early boot stage 4 level pagetables. */
163 movq $(init_level4_pgt - __START_KERNEL_map), %rax
164 addq phys_base(%rip), %rax
167 /* Ensure I am executing from virtual addresses */
172 /* Check if nx is implemented */
173 movl $0x80000001, %eax
177 /* Setup EFER (Extended Feature Enable Register) */
180 btsl $_EFER_SCE, %eax /* Enable System Call */
181 btl $20,%edi /* No Execute supported? */
184 1: wrmsr /* Make changes effective */
187 #define CR0_PM 1 /* protected mode */
188 #define CR0_MP (1<<1)
189 #define CR0_ET (1<<4)
190 #define CR0_NE (1<<5)
191 #define CR0_WP (1<<16)
192 #define CR0_AM (1<<18)
193 #define CR0_PAGING (1<<31)
194 movl $CR0_PM|CR0_MP|CR0_ET|CR0_NE|CR0_WP|CR0_AM|CR0_PAGING,%eax
195 /* Make changes effective */
198 /* Setup a boot time stack */
199 movq init_rsp(%rip),%rsp
201 /* zero EFLAGS after setting rsp */
206 * We must switch to a new descriptor in kernel space for the GDT
207 * because soon the kernel won't have access anymore to the userspace
208 * addresses where we're currently running on. We have to do that here
209 * because in 32bit we couldn't load a 64bit linear address.
211 lgdt cpu_gdt_descr(%rip)
213 /* set up data segments. actually 0 would do too */
214 movl $__KERNEL_DS,%eax
220 * We don't really need to load %fs or %gs, but load them anyway
221 * to kill any stale realmode selectors. This allows execution
228 * Setup up a dummy PDA. this is just for some early bootup code
229 * that does in_interrupt()
231 movl $MSR_GS_BASE,%ecx
232 movq $empty_zero_page,%rax
237 /* esi is pointer to real mode structure with interesting info.
241 /* Finally jump to run C code and to be on real kernel address
242 * Since we are running on identity-mapped space we have to jump
243 * to the full 64bit address, this is only possible as indirect
244 * jump. In addition we need to ensure %cs is set so we make this
247 movq initial_code(%rip),%rax
248 pushq $0 # fake return address to stop unwinder
249 pushq $__KERNEL_CS # set correct cs
250 pushq %rax # target address in negative space
253 /* SMP bootup changes these two */
257 .quad x86_64_start_kernel
261 .quad init_thread_union+THREAD_SIZE-8
266 .section ".init.text","ax"
267 #ifdef CONFIG_EARLY_PRINTK
268 .globl early_idt_handlers
271 .rept NUM_EXCEPTION_VECTORS
273 jmp early_idt_handler
278 ENTRY(early_idt_handler)
279 #ifdef CONFIG_EARLY_PRINTK
280 cmpl $2,early_recursion_flag(%rip)
282 incl early_recursion_flag(%rip)
285 xorl %r8d,%r8d # zero for error code
286 movl %esi,%ecx # get vector number
287 # Test %ecx against mask of vectors that push error code.
294 popq %r8 # get error code
295 0: movq 0(%rsp),%rcx # get ip
296 movq 8(%rsp),%rdx # get cs
298 leaq early_idt_msg(%rip),%rdi
300 cmpl $2,early_recursion_flag(%rip)
303 #ifdef CONFIG_KALLSYMS
304 leaq early_idt_ripmsg(%rip),%rdi
305 movq 8(%rsp),%rsi # get rip again
308 #endif /* EARLY_PRINTK */
312 #ifdef CONFIG_EARLY_PRINTK
313 early_recursion_flag:
317 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
320 #endif /* CONFIG_EARLY_PRINTK */
325 #define NEXT_PAGE(name) \
329 /* Automate the creation of 1 to 1 mapping pmd entries */
330 #define PMDS(START, PERM, COUNT) \
333 .quad (START) + (i << 21) + (PERM) ; \
338 * This default setting generates an ident mapping at address 0x100000
339 * and a mapping for the kernel that precisely maps virtual address
340 * 0xffffffff80000000 to physical address 0x000000. (always using
341 * 2Mbyte large pages provided by PAE mode)
343 NEXT_PAGE(init_level4_pgt)
344 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
346 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
348 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
349 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
351 NEXT_PAGE(level3_ident_pgt)
352 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
355 NEXT_PAGE(level3_kernel_pgt)
357 /* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */
358 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
359 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
361 NEXT_PAGE(level2_fixmap_pgt)
363 .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
364 /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
367 NEXT_PAGE(level1_fixmap_pgt)
370 NEXT_PAGE(level2_ident_pgt)
371 /* Since I easily can, map the first 1G.
372 * Don't set NX because code runs from these pages.
374 PMDS(0, __PAGE_KERNEL_LARGE_EXEC, PTRS_PER_PMD)
376 NEXT_PAGE(level2_kernel_pgt)
378 * 512 MB kernel mapping. We spend a full page on this pagetable
381 * The kernel code+data+bss must not be bigger than that.
383 * (NOTE: at +512MB starts the module area, see MODULES_VADDR.
384 * If you want to increase this then increase MODULES_VADDR
387 PMDS(0, __PAGE_KERNEL_LARGE_EXEC|_PAGE_GLOBAL,
388 KERNEL_IMAGE_SIZE/PMD_SIZE)
390 NEXT_PAGE(level2_spare_pgt)
400 .word gdt_end-cpu_gdt_table-1
411 /* This must match the first entry in level2_kernel_pgt */
412 .quad 0x0000000000000000
414 /* We need valid kernel segments for data and code in long mode too
415 * IRET will check the segment types kkeil 2000/10/28
416 * Also sysret mandates a special GDT layout
419 .section .data.page_aligned, "aw"
422 /* The TLS descriptors are currently at a different place compared to i386.
423 Hopefully nobody expects them at a fixed place (Wine?) */
426 .quad 0x0000000000000000 /* NULL descriptor */
427 .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
428 .quad 0x00af9b000000ffff /* __KERNEL_CS */
429 .quad 0x00cf93000000ffff /* __KERNEL_DS */
430 .quad 0x00cffb000000ffff /* __USER32_CS */
431 .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
432 .quad 0x00affb000000ffff /* __USER_CS */
433 .quad 0x0 /* unused */
436 .quad 0,0,0 /* three TLS descriptors */
437 .quad 0x0000f40000000000 /* node/CPU stored in limit */
439 /* asm/segment.h:GDT_ENTRIES must match this */
440 /* This should be a multiple of the cache line size */
441 /* GDTs of other CPUs are now dynamically allocated */
443 /* zero the remaining page */
444 .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
446 .section .bss, "aw", @nobits
447 .align L1_CACHE_BYTES
451 .section .bss.page_aligned, "aw", @nobits
453 ENTRY(empty_zero_page)