1 #include <linux/init.h>
2 #include <linux/string.h>
3 #include <linux/delay.h>
5 #include <linux/module.h>
6 #include <linux/percpu.h>
7 #include <linux/bootmem.h>
8 #include <asm/semaphore.h>
9 #include <asm/processor.h>
13 #include <asm/mmu_context.h>
16 #ifdef CONFIG_X86_LOCAL_APIC
17 #include <asm/mpspec.h>
19 #include <mach_apic.h>
25 DEFINE_PER_CPU(struct desc_struct, cpu_gdt[GDT_ENTRIES]) = {
26 [GDT_ENTRY_KERNEL_CS] = { 0x0000ffff, 0x00cf9a00 },
27 [GDT_ENTRY_KERNEL_DS] = { 0x0000ffff, 0x00cf9200 },
28 [GDT_ENTRY_DEFAULT_USER_CS] = { 0x0000ffff, 0x00cffa00 },
29 [GDT_ENTRY_DEFAULT_USER_DS] = { 0x0000ffff, 0x00cff200 },
31 * Segments used for calling PnP BIOS have byte granularity.
32 * They code segments and data segments have fixed 64k limits,
33 * the transfer segment sizes are set at run time.
35 [GDT_ENTRY_PNPBIOS_CS32] = { 0x0000ffff, 0x00409a00 },/* 32-bit code */
36 [GDT_ENTRY_PNPBIOS_CS16] = { 0x0000ffff, 0x00009a00 },/* 16-bit code */
37 [GDT_ENTRY_PNPBIOS_DS] = { 0x0000ffff, 0x00009200 }, /* 16-bit data */
38 [GDT_ENTRY_PNPBIOS_TS1] = { 0x00000000, 0x00009200 },/* 16-bit data */
39 [GDT_ENTRY_PNPBIOS_TS2] = { 0x00000000, 0x00009200 },/* 16-bit data */
41 * The APM segments have byte granularity and their bases
42 * are set at run time. All have 64k limits.
44 [GDT_ENTRY_APMBIOS_BASE] = { 0x0000ffff, 0x00409a00 },/* 32-bit code */
46 [GDT_ENTRY_APMBIOS_BASE+1] = { 0x0000ffff, 0x00009a00 },
47 [GDT_ENTRY_APMBIOS_BASE+2] = { 0x0000ffff, 0x00409200 }, /* data */
49 [GDT_ENTRY_ESPFIX_SS] = { 0x00000000, 0x00c09200 },
50 [GDT_ENTRY_PDA] = { 0x00000000, 0x00c09200 }, /* set in setup_pda */
52 EXPORT_PER_CPU_SYMBOL_GPL(cpu_gdt);
54 DEFINE_PER_CPU(struct i386_pda, _cpu_pda);
55 EXPORT_PER_CPU_SYMBOL(_cpu_pda);
57 static int cachesize_override __cpuinitdata = -1;
58 static int disable_x86_fxsr __cpuinitdata;
59 static int disable_x86_serial_nr __cpuinitdata = 1;
60 static int disable_x86_sep __cpuinitdata;
62 struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {};
64 extern int disable_pse;
66 static void __cpuinit default_init(struct cpuinfo_x86 * c)
68 /* Not much we can do here... */
69 /* Check if at least it has cpuid */
70 if (c->cpuid_level == -1) {
71 /* No cpuid. It must be an ancient CPU */
73 strcpy(c->x86_model_id, "486");
75 strcpy(c->x86_model_id, "386");
79 static struct cpu_dev __cpuinitdata default_cpu = {
80 .c_init = default_init,
81 .c_vendor = "Unknown",
83 static struct cpu_dev * this_cpu __cpuinitdata = &default_cpu;
85 static int __init cachesize_setup(char *str)
87 get_option (&str, &cachesize_override);
90 __setup("cachesize=", cachesize_setup);
92 int __cpuinit get_model_name(struct cpuinfo_x86 *c)
97 if (cpuid_eax(0x80000000) < 0x80000004)
100 v = (unsigned int *) c->x86_model_id;
101 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
102 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
103 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
104 c->x86_model_id[48] = 0;
106 /* Intel chips right-justify this string for some dumb reason;
107 undo that brain damage */
108 p = q = &c->x86_model_id[0];
114 while ( q <= &c->x86_model_id[48] )
115 *q++ = '\0'; /* Zero-pad the rest */
122 void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
124 unsigned int n, dummy, ecx, edx, l2size;
126 n = cpuid_eax(0x80000000);
128 if (n >= 0x80000005) {
129 cpuid(0x80000005, &dummy, &dummy, &ecx, &edx);
130 printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
131 edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
132 c->x86_cache_size=(ecx>>24)+(edx>>24);
135 if (n < 0x80000006) /* Some chips just has a large L1. */
138 ecx = cpuid_ecx(0x80000006);
141 /* do processor-specific cache resizing */
142 if (this_cpu->c_size_cache)
143 l2size = this_cpu->c_size_cache(c,l2size);
145 /* Allow user to override all this if necessary. */
146 if (cachesize_override != -1)
147 l2size = cachesize_override;
150 return; /* Again, no L2 cache is possible */
152 c->x86_cache_size = l2size;
154 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
158 /* Naming convention should be: <Name> [(<Codename>)] */
159 /* This table only is used unless init_<vendor>() below doesn't set it; */
160 /* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used */
162 /* Look up CPU names by table lookup. */
163 static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c)
165 struct cpu_model_info *info;
167 if ( c->x86_model >= 16 )
168 return NULL; /* Range check */
173 info = this_cpu->c_models;
175 while (info && info->family) {
176 if (info->family == c->x86)
177 return info->model_names[c->x86_model];
180 return NULL; /* Not found */
184 static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c, int early)
186 char *v = c->x86_vendor_id;
190 for (i = 0; i < X86_VENDOR_NUM; i++) {
192 if (!strcmp(v,cpu_devs[i]->c_ident[0]) ||
193 (cpu_devs[i]->c_ident[1] &&
194 !strcmp(v,cpu_devs[i]->c_ident[1]))) {
197 this_cpu = cpu_devs[i];
204 printk(KERN_ERR "CPU: Vendor unknown, using generic init.\n");
205 printk(KERN_ERR "CPU: Your system may be unstable.\n");
207 c->x86_vendor = X86_VENDOR_UNKNOWN;
208 this_cpu = &default_cpu;
212 static int __init x86_fxsr_setup(char * s)
214 /* Tell all the other CPU's to not use it... */
215 disable_x86_fxsr = 1;
218 * ... and clear the bits early in the boot_cpu_data
219 * so that the bootup process doesn't try to do this
222 clear_bit(X86_FEATURE_FXSR, boot_cpu_data.x86_capability);
223 clear_bit(X86_FEATURE_XMM, boot_cpu_data.x86_capability);
226 __setup("nofxsr", x86_fxsr_setup);
229 static int __init x86_sep_setup(char * s)
234 __setup("nosep", x86_sep_setup);
237 /* Standard macro to see if a specific flag is changeable */
238 static inline int flag_is_changeable_p(u32 flag)
252 : "=&r" (f1), "=&r" (f2)
255 return ((f1^f2) & flag) != 0;
259 /* Probe for the CPUID instruction */
260 static int __cpuinit have_cpuid_p(void)
262 return flag_is_changeable_p(X86_EFLAGS_ID);
265 void __init cpu_detect(struct cpuinfo_x86 *c)
267 /* Get vendor name */
268 cpuid(0x00000000, &c->cpuid_level,
269 (int *)&c->x86_vendor_id[0],
270 (int *)&c->x86_vendor_id[8],
271 (int *)&c->x86_vendor_id[4]);
274 if (c->cpuid_level >= 0x00000001) {
275 u32 junk, tfms, cap0, misc;
276 cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
277 c->x86 = (tfms >> 8) & 15;
278 c->x86_model = (tfms >> 4) & 15;
280 c->x86 += (tfms >> 20) & 0xff;
282 c->x86_model += ((tfms >> 16) & 0xF) << 4;
283 c->x86_mask = tfms & 15;
285 c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8;
289 /* Do minimum CPU detection early.
290 Fields really needed: vendor, cpuid_level, family, model, mask, cache alignment.
291 The others are not touched to avoid unwanted side effects.
293 WARNING: this function is only called on the BP. Don't add code here
294 that is supposed to run on all CPUs. */
295 static void __init early_cpu_detect(void)
297 struct cpuinfo_x86 *c = &boot_cpu_data;
299 c->x86_cache_alignment = 32;
306 get_cpu_vendor(c, 1);
309 static void __cpuinit generic_identify(struct cpuinfo_x86 * c)
314 if (have_cpuid_p()) {
315 /* Get vendor name */
316 cpuid(0x00000000, &c->cpuid_level,
317 (int *)&c->x86_vendor_id[0],
318 (int *)&c->x86_vendor_id[8],
319 (int *)&c->x86_vendor_id[4]);
321 get_cpu_vendor(c, 0);
322 /* Initialize the standard set of capabilities */
323 /* Note that the vendor-specific code below might override */
325 /* Intel-defined flags: level 0x00000001 */
326 if ( c->cpuid_level >= 0x00000001 ) {
327 u32 capability, excap;
328 cpuid(0x00000001, &tfms, &ebx, &excap, &capability);
329 c->x86_capability[0] = capability;
330 c->x86_capability[4] = excap;
331 c->x86 = (tfms >> 8) & 15;
332 c->x86_model = (tfms >> 4) & 15;
334 c->x86 += (tfms >> 20) & 0xff;
336 c->x86_model += ((tfms >> 16) & 0xF) << 4;
337 c->x86_mask = tfms & 15;
339 c->apicid = phys_pkg_id((ebx >> 24) & 0xFF, 0);
341 c->apicid = (ebx >> 24) & 0xFF;
343 if (c->x86_capability[0] & (1<<19))
344 c->x86_clflush_size = ((ebx >> 8) & 0xff) * 8;
346 /* Have CPUID level 0 only - unheard of */
350 /* AMD-defined flags: level 0x80000001 */
351 xlvl = cpuid_eax(0x80000000);
352 if ( (xlvl & 0xffff0000) == 0x80000000 ) {
353 if ( xlvl >= 0x80000001 ) {
354 c->x86_capability[1] = cpuid_edx(0x80000001);
355 c->x86_capability[6] = cpuid_ecx(0x80000001);
357 if ( xlvl >= 0x80000004 )
358 get_model_name(c); /* Default name */
362 early_intel_workaround(c);
365 c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff;
369 static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
371 if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr ) {
372 /* Disable processor serial number */
374 rdmsr(MSR_IA32_BBL_CR_CTL,lo,hi);
376 wrmsr(MSR_IA32_BBL_CR_CTL,lo,hi);
377 printk(KERN_NOTICE "CPU serial number disabled.\n");
378 clear_bit(X86_FEATURE_PN, c->x86_capability);
380 /* Disabling the serial number may affect the cpuid level */
381 c->cpuid_level = cpuid_eax(0);
385 static int __init x86_serial_nr_setup(char *s)
387 disable_x86_serial_nr = 0;
390 __setup("serialnumber", x86_serial_nr_setup);
395 * This does the hard work of actually picking apart the CPU stuff...
397 void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
401 c->loops_per_jiffy = loops_per_jiffy;
402 c->x86_cache_size = -1;
403 c->x86_vendor = X86_VENDOR_UNKNOWN;
404 c->cpuid_level = -1; /* CPUID not detected */
405 c->x86_model = c->x86_mask = 0; /* So far unknown... */
406 c->x86_vendor_id[0] = '\0'; /* Unset */
407 c->x86_model_id[0] = '\0'; /* Unset */
408 c->x86_max_cores = 1;
409 c->x86_clflush_size = 32;
410 memset(&c->x86_capability, 0, sizeof c->x86_capability);
412 if (!have_cpuid_p()) {
413 /* First of all, decide if this is a 486 or higher */
414 /* It's a 486 if we can modify the AC flag */
415 if ( flag_is_changeable_p(X86_EFLAGS_AC) )
423 printk(KERN_DEBUG "CPU: After generic identify, caps:");
424 for (i = 0; i < NCAPINTS; i++)
425 printk(" %08lx", c->x86_capability[i]);
428 if (this_cpu->c_identify) {
429 this_cpu->c_identify(c);
431 printk(KERN_DEBUG "CPU: After vendor identify, caps:");
432 for (i = 0; i < NCAPINTS; i++)
433 printk(" %08lx", c->x86_capability[i]);
438 * Vendor-specific initialization. In this section we
439 * canonicalize the feature flags, meaning if there are
440 * features a certain CPU supports which CPUID doesn't
441 * tell us, CPUID claiming incorrect flags, or other bugs,
442 * we handle them here.
444 * At the end of this section, c->x86_capability better
445 * indicate the features this CPU genuinely supports!
447 if (this_cpu->c_init)
450 /* Disable the PN if appropriate */
451 squash_the_stupid_serial_number(c);
454 * The vendor-specific functions might have changed features. Now
455 * we do "generic changes."
460 clear_bit(X86_FEATURE_TSC, c->x86_capability);
463 if (disable_x86_fxsr) {
464 clear_bit(X86_FEATURE_FXSR, c->x86_capability);
465 clear_bit(X86_FEATURE_XMM, c->x86_capability);
470 clear_bit(X86_FEATURE_SEP, c->x86_capability);
473 clear_bit(X86_FEATURE_PSE, c->x86_capability);
475 /* If the model name is still unset, do table lookup. */
476 if ( !c->x86_model_id[0] ) {
478 p = table_lookup_model(c);
480 strcpy(c->x86_model_id, p);
483 sprintf(c->x86_model_id, "%02x/%02x",
484 c->x86, c->x86_model);
487 /* Now the feature flags better reflect actual CPU features! */
489 printk(KERN_DEBUG "CPU: After all inits, caps:");
490 for (i = 0; i < NCAPINTS; i++)
491 printk(" %08lx", c->x86_capability[i]);
495 * On SMP, boot_cpu_data holds the common feature set between
496 * all CPUs; so make sure that we indicate which features are
497 * common between the CPUs. The first time this routine gets
498 * executed, c == &boot_cpu_data.
500 if ( c != &boot_cpu_data ) {
501 /* AND the already accumulated flags with these */
502 for ( i = 0 ; i < NCAPINTS ; i++ )
503 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
506 /* Init Machine Check Exception if available. */
509 if (c == &boot_cpu_data)
513 if (c == &boot_cpu_data)
520 void __cpuinit detect_ht(struct cpuinfo_x86 *c)
522 u32 eax, ebx, ecx, edx;
523 int index_msb, core_bits;
525 cpuid(1, &eax, &ebx, &ecx, &edx);
527 if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY))
530 smp_num_siblings = (ebx & 0xff0000) >> 16;
532 if (smp_num_siblings == 1) {
533 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
534 } else if (smp_num_siblings > 1 ) {
536 if (smp_num_siblings > NR_CPUS) {
537 printk(KERN_WARNING "CPU: Unsupported number of the "
538 "siblings %d", smp_num_siblings);
539 smp_num_siblings = 1;
543 index_msb = get_count_order(smp_num_siblings);
544 c->phys_proc_id = phys_pkg_id((ebx >> 24) & 0xFF, index_msb);
546 printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
549 smp_num_siblings = smp_num_siblings / c->x86_max_cores;
551 index_msb = get_count_order(smp_num_siblings) ;
553 core_bits = get_count_order(c->x86_max_cores);
555 c->cpu_core_id = phys_pkg_id((ebx >> 24) & 0xFF, index_msb) &
556 ((1 << core_bits) - 1);
558 if (c->x86_max_cores > 1)
559 printk(KERN_INFO "CPU: Processor Core ID: %d\n",
565 void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
569 if (c->x86_vendor < X86_VENDOR_NUM)
570 vendor = this_cpu->c_vendor;
571 else if (c->cpuid_level >= 0)
572 vendor = c->x86_vendor_id;
574 if (vendor && strncmp(c->x86_model_id, vendor, strlen(vendor)))
575 printk("%s ", vendor);
577 if (!c->x86_model_id[0])
578 printk("%d86", c->x86);
580 printk("%s", c->x86_model_id);
582 if (c->x86_mask || c->cpuid_level >= 0)
583 printk(" stepping %02x\n", c->x86_mask);
588 cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE;
591 * We're emulating future behavior.
592 * In the future, the cpu-specific init functions will be called implicitly
593 * via the magic of initcalls.
594 * They will insert themselves into the cpu_devs structure.
595 * Then, when cpu_init() is called, we can just iterate over that array.
598 extern int intel_cpu_init(void);
599 extern int cyrix_init_cpu(void);
600 extern int nsc_init_cpu(void);
601 extern int amd_init_cpu(void);
602 extern int centaur_init_cpu(void);
603 extern int transmeta_init_cpu(void);
604 extern int rise_init_cpu(void);
605 extern int nexgen_init_cpu(void);
606 extern int umc_init_cpu(void);
608 void __init early_cpu_init(void)
615 transmeta_init_cpu();
621 #ifdef CONFIG_DEBUG_PAGEALLOC
622 /* pse is not compatible with on-the-fly unmapping,
623 * disable it even if the cpus claim to support it.
625 clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability);
630 /* Make sure %gs is initialized properly in idle threads */
631 struct pt_regs * __devinit idle_regs(struct pt_regs *regs)
633 memset(regs, 0, sizeof(struct pt_regs));
634 regs->xfs = __KERNEL_PDA;
638 /* Initial PDA used by boot CPU */
639 struct i386_pda boot_pda = {
642 .pcurrent = &init_task,
646 * cpu_init() initializes state that is per-CPU. Some data is already
647 * initialized (naturally) in the bootstrap process, such as the GDT
648 * and IDT. We reload them nevertheless, this function acts as a
649 * 'CPU state barrier', nothing should get across.
651 void __cpuinit cpu_init(void)
653 int cpu = smp_processor_id();
654 struct task_struct *curr = current;
655 struct tss_struct * t = &per_cpu(init_tss, cpu);
656 struct thread_struct *thread = &curr->thread;
658 if (cpu_test_and_set(cpu, cpu_initialized)) {
659 printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
660 for (;;) local_irq_enable();
663 printk(KERN_INFO "Initializing CPU#%d\n", cpu);
665 if (cpu_has_vme || cpu_has_tsc || cpu_has_de)
666 clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
667 if (tsc_disable && cpu_has_tsc) {
668 printk(KERN_NOTICE "Disabling TSC...\n");
669 /**** FIX-HPA: DOES THIS REALLY BELONG HERE? ****/
670 clear_bit(X86_FEATURE_TSC, boot_cpu_data.x86_capability);
671 set_in_cr4(X86_CR4_TSD);
674 load_idt(&idt_descr);
677 * Set up and load the per-CPU TSS and LDT
679 atomic_inc(&init_mm.mm_count);
680 curr->active_mm = &init_mm;
683 enter_lazy_tlb(&init_mm, curr);
685 load_esp0(t, thread);
688 load_LDT(&init_mm.context);
690 #ifdef CONFIG_DOUBLEFAULT
691 /* Set up doublefault TSS pointer in the GDT */
692 __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
696 asm volatile ("mov %0, %%gs" : : "r" (0));
698 /* Clear all 6 debug registers: */
707 * Force FPU initialization:
709 current_thread_info()->status = 0;
711 mxcsr_feature_mask_init();
714 #ifdef CONFIG_HOTPLUG_CPU
715 void __cpuinit cpu_uninit(void)
717 int cpu = raw_smp_processor_id();
718 cpu_clear(cpu, cpu_initialized);
721 per_cpu(cpu_tlbstate, cpu).state = 0;
722 per_cpu(cpu_tlbstate, cpu).active_mm = &init_mm;