2 * X86-64 specific CPU setup.
3 * Copyright (C) 1995 Linus Torvalds
4 * Copyright 2001, 2002, 2003 SuSE Labs / Andi Kleen.
5 * See setup.c for older changelog.
7 #include <linux/init.h>
8 #include <linux/kernel.h>
9 #include <linux/sched.h>
10 #include <linux/string.h>
11 #include <linux/bootmem.h>
12 #include <linux/bitops.h>
13 #include <linux/module.h>
15 #include <asm/pgtable.h>
16 #include <asm/processor.h>
18 #include <asm/atomic.h>
19 #include <asm/mmu_context.h>
22 #include <asm/percpu.h>
23 #include <asm/proto.h>
24 #include <asm/sections.h>
25 #include <asm/setup.h>
27 #ifndef CONFIG_DEBUG_BOOT_PARAMS
28 struct boot_params __initdata boot_params;
30 struct boot_params boot_params;
33 cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE;
35 struct x8664_pda *_cpu_pda[NR_CPUS] __read_mostly;
36 EXPORT_SYMBOL(_cpu_pda);
37 struct x8664_pda boot_cpu_pda[NR_CPUS] __cacheline_aligned;
39 struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table };
41 char boot_cpu_stack[IRQSTACKSIZE] __attribute__((section(".bss.page_aligned")));
43 unsigned long __supported_pte_mask __read_mostly = ~0UL;
44 static int do_not_nx __cpuinitdata = 0;
47 Control non executable mappings for 64bit processes.
52 static int __init nonx_setup(char *str)
56 if (!strncmp(str, "on", 2)) {
57 __supported_pte_mask |= _PAGE_NX;
59 } else if (!strncmp(str, "off", 3)) {
61 __supported_pte_mask &= ~_PAGE_NX;
65 early_param("noexec", nonx_setup);
67 int force_personality32 = 0;
70 Control non executable heap for 32bit processes.
71 To control the stack too use noexec=off
73 on PROT_READ does not imply PROT_EXEC for 32bit processes
74 off PROT_READ implies PROT_EXEC (default)
76 static int __init nonx32_setup(char *str)
78 if (!strcmp(str, "on"))
79 force_personality32 &= ~READ_IMPLIES_EXEC;
80 else if (!strcmp(str, "off"))
81 force_personality32 |= READ_IMPLIES_EXEC;
84 __setup("noexec32=", nonx32_setup);
87 * Copy data used in early init routines from the initial arrays to the
88 * per cpu data areas. These arrays then become expendable and the *_ptrs
89 * are zeroed indicating that the static arrays are gone.
91 void __init setup_percpu_maps(void)
95 for_each_possible_cpu(cpu) {
97 if (per_cpu_offset(cpu)) {
99 per_cpu(x86_cpu_to_apicid, cpu) =
100 x86_cpu_to_apicid_init[cpu];
102 per_cpu(x86_cpu_to_node_map, cpu) =
103 x86_cpu_to_node_map_init[cpu];
108 printk(KERN_NOTICE "per_cpu_offset zero for cpu %d\n",
113 /* indicate the early static arrays are gone */
114 x86_cpu_to_apicid_early_ptr = NULL;
116 x86_cpu_to_node_map_early_ptr = NULL;
122 * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
123 * Always point %gs to its beginning
125 void __init setup_per_cpu_areas(void)
130 #ifdef CONFIG_HOTPLUG_CPU
131 prefill_possible_map();
134 /* Copy section for each CPU (we discard the original) */
135 size = PERCPU_ENOUGH_ROOM;
137 printk(KERN_INFO "PERCPU: Allocating %lu bytes of per cpu data\n", size);
138 for_each_cpu_mask (i, cpu_possible_map) {
141 if (!NODE_DATA(early_cpu_to_node(i))) {
142 printk("cpu with no node %d, num_online_nodes %d\n",
143 i, num_online_nodes());
144 ptr = alloc_bootmem_pages(size);
146 ptr = alloc_bootmem_pages_node(NODE_DATA(early_cpu_to_node(i)), size);
149 panic("Cannot allocate cpu data for CPU %d\n", i);
150 cpu_pda(i)->data_offset = ptr - __per_cpu_start;
151 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
154 /* setup percpu data maps early */
158 void pda_init(int cpu)
160 struct x8664_pda *pda = cpu_pda(cpu);
162 /* Setup up data that may be needed in __get_free_pages early */
163 asm volatile("movl %0,%%fs ; movl %0,%%gs" :: "r" (0));
164 /* Memory clobbers used to order PDA accessed */
166 wrmsrl(MSR_GS_BASE, pda);
169 pda->cpunumber = cpu;
172 (unsigned long)stack_thread_info() - PDA_STACKOFFSET + THREAD_SIZE;
173 pda->active_mm = &init_mm;
177 /* others are initialized in smpboot.c */
178 pda->pcurrent = &init_task;
179 pda->irqstackptr = boot_cpu_stack;
181 pda->irqstackptr = (char *)
182 __get_free_pages(GFP_ATOMIC, IRQSTACK_ORDER);
183 if (!pda->irqstackptr)
184 panic("cannot allocate irqstack for cpu %d", cpu);
188 pda->irqstackptr += IRQSTACKSIZE-64;
191 char boot_exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]
192 __attribute__((section(".bss.page_aligned")));
194 extern asmlinkage void ignore_sysret(void);
196 /* May not be marked __init: used by software suspend */
197 void syscall_init(void)
200 * LSTAR and STAR live in a bit strange symbiosis.
201 * They both write to the same internal register. STAR allows to set CS/DS
202 * but only a 32bit target. LSTAR sets the 64bit rip.
204 wrmsrl(MSR_STAR, ((u64)__USER32_CS)<<48 | ((u64)__KERNEL_CS)<<32);
205 wrmsrl(MSR_LSTAR, system_call);
206 wrmsrl(MSR_CSTAR, ignore_sysret);
208 #ifdef CONFIG_IA32_EMULATION
209 syscall32_cpu_init ();
212 /* Flags to clear on syscall */
213 wrmsrl(MSR_SYSCALL_MASK,
214 X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF|X86_EFLAGS_IOPL);
217 void __cpuinit check_efer(void)
221 rdmsrl(MSR_EFER, efer);
222 if (!(efer & EFER_NX) || do_not_nx) {
223 __supported_pte_mask &= ~_PAGE_NX;
227 unsigned long kernel_eflags;
230 * Copies of the original ist values from the tss are only accessed during
231 * debugging, no special alignment required.
233 DEFINE_PER_CPU(struct orig_ist, orig_ist);
236 * cpu_init() initializes state that is per-CPU. Some data is already
237 * initialized (naturally) in the bootstrap process, such as the GDT
238 * and IDT. We reload them nevertheless, this function acts as a
239 * 'CPU state barrier', nothing should get across.
240 * A lot of state is already set up in PDA init.
242 void __cpuinit cpu_init (void)
244 int cpu = stack_smp_processor_id();
245 struct tss_struct *t = &per_cpu(init_tss, cpu);
246 struct orig_ist *orig_ist = &per_cpu(orig_ist, cpu);
248 char *estacks = NULL;
249 struct task_struct *me;
252 /* CPU 0 is initialised in head64.c */
256 estacks = boot_exception_stacks;
260 if (cpu_test_and_set(cpu, cpu_initialized))
261 panic("CPU#%d already initialized!\n", cpu);
263 printk("Initializing CPU#%d\n", cpu);
265 clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
268 * Initialize the per-CPU GDT with the boot GDT,
269 * and set up the GDT descriptor:
272 memcpy(get_cpu_gdt_table(cpu), cpu_gdt_table, GDT_SIZE);
274 cpu_gdt_descr[cpu].size = GDT_SIZE;
275 load_gdt((const struct desc_ptr *)&cpu_gdt_descr[cpu]);
276 load_idt((const struct desc_ptr *)&idt_descr);
278 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
281 wrmsrl(MSR_FS_BASE, 0);
282 wrmsrl(MSR_KERNEL_GS_BASE, 0);
288 * set up and load the per-CPU TSS
290 for (v = 0; v < N_EXCEPTION_STACKS; v++) {
291 static const unsigned int order[N_EXCEPTION_STACKS] = {
292 [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STACK_ORDER,
293 [DEBUG_STACK - 1] = DEBUG_STACK_ORDER
296 estacks = (char *)__get_free_pages(GFP_ATOMIC, order[v]);
298 panic("Cannot allocate exception stack %ld %d\n",
301 estacks += PAGE_SIZE << order[v];
302 orig_ist->ist[v] = t->x86_tss.ist[v] = (unsigned long)estacks;
305 t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
307 * <= is required because the CPU will access up to
308 * 8 bits beyond the end of the IO permission bitmap.
310 for (i = 0; i <= IO_BITMAP_LONGS; i++)
311 t->io_bitmap[i] = ~0UL;
313 atomic_inc(&init_mm.mm_count);
314 me->active_mm = &init_mm;
317 enter_lazy_tlb(&init_mm, me);
319 set_tss_desc(cpu, t);
321 load_LDT(&init_mm.context);
324 * Clear all 6 debug registers:
327 set_debugreg(0UL, 0);
328 set_debugreg(0UL, 1);
329 set_debugreg(0UL, 2);
330 set_debugreg(0UL, 3);
331 set_debugreg(0UL, 6);
332 set_debugreg(0UL, 7);
336 raw_local_save_flags(kernel_eflags);