2 * linux/arch/x86_64/kernel/head.S -- start in 32bit and switch to 64bit
4 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
6 * Copyright (C) 2000 Karsten Keil <kkeil@suse.de>
7 * Copyright (C) 2001,2002 Andi Kleen <ak@suse.de>
9 * $Id: head.S,v 1.49 2002/03/19 17:39:25 ak Exp $
13 #include <linux/linkage.h>
14 #include <linux/threads.h>
15 #include <linux/init.h>
17 #include <asm/segment.h>
20 #include <asm/cache.h>
22 /* we are not able to switch in one step to the final KERNEL ADRESS SPACE
23 * because we need identity-mapped pages on setup so define __START_KERNEL to
24 * 0x100000 for this stage
31 /* %bx: 1 if coming from smp trampoline on secondary cpu */
35 * At this point the CPU runs in 32bit protected mode (CS.D = 1) with
36 * paging disabled and the point of this file is to switch to 64bit
37 * long mode with a kernel mapping for kerneland to jump into the
38 * kernel virtual addresses.
39 * There is no stack until we set one up.
42 /* Initialize the %ds segment register */
43 movl $__KERNEL_DS,%eax
46 /* Load new GDT with the 64bit segments using 32bit descriptor */
47 lgdt pGDT32 - __START_KERNEL_map
49 /* If the CPU doesn't support CPUID this will double fault.
50 * Unfortunately it is hard to check for CPUID without a stack.
53 /* Check if extended functions are implemented */
54 movl $0x80000000, %eax
56 cmpl $0x80000000, %eax
58 /* Check if long mode is implemented */
65 * Prepare for entering 64bits mode
73 /* Setup early boot stage 4 level pagetables */
74 movl $(boot_level4_pgt - __START_KERNEL_map), %eax
77 /* Setup EFER (Extended Feature Enable Register) */
81 /* Enable Long Mode */
84 /* Make changes effective */
88 btsl $31, %eax /* Enable paging and in turn activate Long Mode */
89 btsl $0, %eax /* Enable protected mode */
90 /* Make changes effective */
93 * At this point we're in long mode but in 32bit compatibility mode
94 * with EFER.LME = 1, CS.L = 0, CS.D = 1 (and in turn
95 * EFER.LMA = 1). Now we want to jump in 64bit mode, to do that we use
96 * the new gdt/idt that has __KERNEL_CS with CS.L = 1.
98 ljmp $__KERNEL_CS, $(startup_64 - __START_KERNEL_map)
104 /* We come here either from startup_32
105 * or directly from a 64bit bootloader.
106 * Since we may have come directly from a bootloader we
107 * reload the page tables here.
110 /* Enable PAE mode and PGE */
116 /* Setup early boot stage 4 level pagetables. */
117 movq $(boot_level4_pgt - __START_KERNEL_map), %rax
120 /* Check if nx is implemented */
121 movl $0x80000001, %eax
125 /* Setup EFER (Extended Feature Enable Register) */
129 /* Enable System Call */
130 btsl $_EFER_SCE, %eax
132 /* No Execute supported? */
137 /* Make changes effective */
141 #define CR0_PM 1 /* protected mode */
142 #define CR0_MP (1<<1)
143 #define CR0_ET (1<<4)
144 #define CR0_NE (1<<5)
145 #define CR0_WP (1<<16)
146 #define CR0_AM (1<<18)
147 #define CR0_PAGING (1<<31)
148 movl $CR0_PM|CR0_MP|CR0_ET|CR0_NE|CR0_WP|CR0_AM|CR0_PAGING,%eax
149 /* Make changes effective */
152 /* Setup a boot time stack */
153 movq init_rsp(%rip),%rsp
155 /* zero EFLAGS after setting rsp */
160 * We must switch to a new descriptor in kernel space for the GDT
161 * because soon the kernel won't have access anymore to the userspace
162 * addresses where we're currently running on. We have to do that here
163 * because in 32bit we couldn't load a 64bit linear address.
168 * Setup up a dummy PDA. this is just for some early bootup code
169 * that does in_interrupt()
171 movl $MSR_GS_BASE,%ecx
172 movq $empty_zero_page,%rax
177 /* set up data segments. actually 0 would do too */
178 movl $__KERNEL_DS,%eax
183 /* esi is pointer to real mode structure with interesting info.
187 /* Finally jump to run C code and to be on real kernel address
188 * Since we are running on identity-mapped space we have to jump
189 * to the full 64bit address , this is only possible as indirect
192 movq initial_code(%rip),%rax
195 /* SMP bootup changes these two */
198 .quad x86_64_start_kernel
201 .quad init_thread_union+THREAD_SIZE-8
203 ENTRY(early_idt_handler)
204 cmpl $2,early_recursion_flag(%rip)
206 incl early_recursion_flag(%rip)
208 movq 8(%rsp),%rsi # get rip
211 leaq early_idt_msg(%rip),%rdi
213 cmpl $2,early_recursion_flag(%rip)
218 early_recursion_flag:
222 .asciz "PANIC: early exception rip %lx error %lx cr2 %lx\n"
226 /* This isn't an x86-64 CPU so hang */
233 .word gdt_end-cpu_gdt_table
234 .long cpu_gdt_table-__START_KERNEL_map
238 .long startup_64-__START_KERNEL_map
245 ENTRY(init_level4_pgt)
246 /* This gets initialized in x86_64_start_kernel */
250 ENTRY(level3_ident_pgt)
251 .quad 0x0000000000004007 + __PHYSICAL_START
255 ENTRY(level3_kernel_pgt)
257 /* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */
258 .quad 0x0000000000005007 + __PHYSICAL_START /* -> level2_kernel_pgt */
262 ENTRY(level2_ident_pgt)
263 /* 40MB for bootup. */
264 .quad 0x0000000000000083
265 .quad 0x0000000000200083
266 .quad 0x0000000000400083
267 .quad 0x0000000000600083
268 .quad 0x0000000000800083
269 .quad 0x0000000000A00083
270 .quad 0x0000000000C00083
271 .quad 0x0000000000E00083
272 .quad 0x0000000001000083
273 .quad 0x0000000001200083
274 .quad 0x0000000001400083
275 .quad 0x0000000001600083
276 .quad 0x0000000001800083
277 .quad 0x0000000001A00083
278 .quad 0x0000000001C00083
279 .quad 0x0000000001E00083
280 .quad 0x0000000002000083
281 .quad 0x0000000002200083
282 .quad 0x0000000002400083
283 .quad 0x0000000002600083
284 /* Temporary mappings for the super early allocator in arch/x86_64/mm/init.c */
285 .globl temp_boot_pmds
290 ENTRY(level2_kernel_pgt)
291 /* 40MB kernel mapping. The kernel code cannot be bigger than that.
292 When you change this change KERNEL_TEXT_SIZE in page.h too. */
293 /* (2^48-(2*1024*1024*1024)-((2^39)*511)-((2^30)*510)) = 0 */
294 .quad 0x0000000000000183
295 .quad 0x0000000000200183
296 .quad 0x0000000000400183
297 .quad 0x0000000000600183
298 .quad 0x0000000000800183
299 .quad 0x0000000000A00183
300 .quad 0x0000000000C00183
301 .quad 0x0000000000E00183
302 .quad 0x0000000001000183
303 .quad 0x0000000001200183
304 .quad 0x0000000001400183
305 .quad 0x0000000001600183
306 .quad 0x0000000001800183
307 .quad 0x0000000001A00183
308 .quad 0x0000000001C00183
309 .quad 0x0000000001E00183
310 .quad 0x0000000002000183
311 .quad 0x0000000002200183
312 .quad 0x0000000002400183
313 .quad 0x0000000002600183
314 /* Module mapping starts here */
318 ENTRY(empty_zero_page)
321 ENTRY(empty_bad_page)
324 ENTRY(empty_bad_pte_table)
327 ENTRY(empty_bad_pmd_table)
330 ENTRY(level3_physmem_pgt)
331 .quad 0x0000000000005007 + __PHYSICAL_START /* -> level2_kernel_pgt (so that __va works even before pagetable_init) */
334 #ifdef CONFIG_ACPI_SLEEP
335 ENTRY(wakeup_level4_pgt)
336 .quad 0x0000000000002007 + __PHYSICAL_START /* -> level3_ident_pgt */
338 .quad 0x000000000000a007 + __PHYSICAL_START
340 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
341 .quad 0x0000000000003007 + __PHYSICAL_START /* -> level3_kernel_pgt */
344 #ifndef CONFIG_HOTPLUG_CPU
348 * This default setting generates an ident mapping at address 0x100000
349 * and a mapping for the kernel that precisely maps virtual address
350 * 0xffffffff80000000 to physical address 0x000000. (always using
351 * 2Mbyte large pages provided by PAE mode)
354 ENTRY(boot_level4_pgt)
355 .quad 0x0000000000002007 + __PHYSICAL_START /* -> level3_ident_pgt */
357 .quad 0x000000000000a007 + __PHYSICAL_START
359 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
360 .quad 0x0000000000003007 + __PHYSICAL_START /* -> level3_kernel_pgt */
367 .word gdt_end-cpu_gdt_table
377 /* We need valid kernel segments for data and code in long mode too
378 * IRET will check the segment types kkeil 2000/10/28
379 * Also sysret mandates a special GDT layout
382 .align L1_CACHE_BYTES
384 /* The TLS descriptors are currently at a different place compared to i386.
385 Hopefully nobody expects them at a fixed place (Wine?) */
388 .quad 0x0000000000000000 /* NULL descriptor */
389 .quad 0x008f9a000000ffff /* __KERNEL_COMPAT32_CS */
390 .quad 0x00af9a000000ffff /* __KERNEL_CS */
391 .quad 0x00cf92000000ffff /* __KERNEL_DS */
392 .quad 0x00cffa000000ffff /* __USER32_CS */
393 .quad 0x00cff2000000ffff /* __USER_DS, __USER32_DS */
394 .quad 0x00affa000000ffff /* __USER_CS */
395 .quad 0x00cf9a000000ffff /* __KERNEL32_CS */
398 .quad 0,0,0 /* three TLS descriptors */
399 .quad 0x00009a000000ffff /* __KERNEL16_CS - 16bit PM for S3 wakeup. */
400 /* base must be patched for real base address. */
402 /* asm/segment.h:GDT_ENTRIES must match this */
403 /* This should be a multiple of the cache line size */
404 /* GDTs of other CPUs: */
405 .fill (GDT_SIZE * NR_CPUS) - (gdt_end - cpu_gdt_table)
407 .align L1_CACHE_BYTES