2 * Copyright (C) 1995 Linus Torvalds
6 * This file handles the architecture-dependent parts of initialization
9 #include <linux/errno.h>
10 #include <linux/sched.h>
11 #include <linux/kernel.h>
13 #include <linux/stddef.h>
14 #include <linux/unistd.h>
15 #include <linux/ptrace.h>
16 #include <linux/slab.h>
17 #include <linux/user.h>
18 #include <linux/a.out.h>
19 #include <linux/screen_info.h>
20 #include <linux/ioport.h>
21 #include <linux/delay.h>
22 #include <linux/init.h>
23 #include <linux/initrd.h>
24 #include <linux/highmem.h>
25 #include <linux/bootmem.h>
26 #include <linux/module.h>
27 #include <asm/processor.h>
28 #include <linux/console.h>
29 #include <linux/seq_file.h>
30 #include <linux/crash_dump.h>
31 #include <linux/root_dev.h>
32 #include <linux/pci.h>
33 #include <linux/acpi.h>
34 #include <linux/kallsyms.h>
35 #include <linux/edd.h>
36 #include <linux/mmzone.h>
37 #include <linux/kexec.h>
38 #include <linux/cpufreq.h>
39 #include <linux/dmi.h>
40 #include <linux/dma-mapping.h>
41 #include <linux/ctype.h>
44 #include <asm/uaccess.h>
45 #include <asm/system.h>
50 #include <video/edid.h>
53 #include <asm/mpspec.h>
54 #include <asm/mmu_context.h>
55 #include <asm/proto.h>
56 #include <asm/setup.h>
57 #include <asm/mach_apic.h>
59 #include <asm/sections.h>
66 struct cpuinfo_x86 boot_cpu_data __read_mostly;
67 EXPORT_SYMBOL(boot_cpu_data);
69 unsigned long mmu_cr4_features;
71 /* Boot loader ID as an integer, for the benefit of proc_dointvec */
74 unsigned long saved_video_mode;
76 int force_mwait __cpuinitdata;
82 char dmi_alloc_data[DMI_MAX_DATA];
87 struct screen_info screen_info;
88 EXPORT_SYMBOL(screen_info);
89 struct sys_desc_table_struct {
90 unsigned short length;
91 unsigned char table[0];
94 struct edid_info edid_info;
95 EXPORT_SYMBOL_GPL(edid_info);
97 extern int root_mountflags;
99 char __initdata command_line[COMMAND_LINE_SIZE];
101 struct resource standard_io_resources[] = {
102 { .name = "dma1", .start = 0x00, .end = 0x1f,
103 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
104 { .name = "pic1", .start = 0x20, .end = 0x21,
105 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
106 { .name = "timer0", .start = 0x40, .end = 0x43,
107 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
108 { .name = "timer1", .start = 0x50, .end = 0x53,
109 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
110 { .name = "keyboard", .start = 0x60, .end = 0x6f,
111 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
112 { .name = "dma page reg", .start = 0x80, .end = 0x8f,
113 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
114 { .name = "pic2", .start = 0xa0, .end = 0xa1,
115 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
116 { .name = "dma2", .start = 0xc0, .end = 0xdf,
117 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
118 { .name = "fpu", .start = 0xf0, .end = 0xff,
119 .flags = IORESOURCE_BUSY | IORESOURCE_IO }
122 #define IORESOURCE_RAM (IORESOURCE_BUSY | IORESOURCE_MEM)
124 struct resource data_resource = {
125 .name = "Kernel data",
128 .flags = IORESOURCE_RAM,
130 struct resource code_resource = {
131 .name = "Kernel code",
134 .flags = IORESOURCE_RAM,
137 #ifdef CONFIG_PROC_VMCORE
138 /* elfcorehdr= specifies the location of elf core header
139 * stored by the crashed kernel. This option will be passed
140 * by kexec loader to the capture kernel.
142 static int __init setup_elfcorehdr(char *arg)
147 elfcorehdr_addr = memparse(arg, &end);
148 return end > arg ? 0 : -EINVAL;
150 early_param("elfcorehdr", setup_elfcorehdr);
155 contig_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
157 unsigned long bootmap_size, bootmap;
159 bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
160 bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size);
162 panic("Cannot find bootmem map of size %ld\n",bootmap_size);
163 bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn);
164 e820_register_active_regions(0, start_pfn, end_pfn);
165 free_bootmem_with_active_regions(0, end_pfn);
166 reserve_bootmem(bootmap, bootmap_size);
170 #if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
172 #ifdef CONFIG_EDD_MODULE
176 * copy_edd() - Copy the BIOS EDD information
177 * from boot_params into a safe place.
180 static inline void copy_edd(void)
182 memcpy(edd.mbr_signature, boot_params.edd_mbr_sig_buffer,
183 sizeof(edd.mbr_signature));
184 memcpy(edd.edd_info, boot_params.eddbuf, sizeof(edd.edd_info));
185 edd.mbr_signature_nr = boot_params.edd_mbr_sig_buf_entries;
186 edd.edd_info_nr = boot_params.eddbuf_entries;
189 static inline void copy_edd(void)
194 #define EBDA_ADDR_POINTER 0x40E
196 unsigned __initdata ebda_addr;
197 unsigned __initdata ebda_size;
199 static void discover_ebda(void)
202 * there is a real-mode segmented pointer pointing to the
203 * 4K EBDA area at 0x40E
205 ebda_addr = *(unsigned short *)__va(EBDA_ADDR_POINTER);
208 ebda_size = *(unsigned short *)__va(ebda_addr);
210 /* Round EBDA up to pages */
214 ebda_size = round_up(ebda_size + (ebda_addr & ~PAGE_MASK), PAGE_SIZE);
215 if (ebda_size > 64*1024)
219 void __init setup_arch(char **cmdline_p)
221 printk(KERN_INFO "Command line: %s\n", boot_command_line);
223 ROOT_DEV = old_decode_dev(boot_params.hdr.root_dev);
224 screen_info = boot_params.screen_info;
225 edid_info = boot_params.edid_info;
226 saved_video_mode = boot_params.hdr.vid_mode;
227 bootloader_type = boot_params.hdr.type_of_loader;
229 #ifdef CONFIG_BLK_DEV_RAM
230 rd_image_start = boot_params.hdr.ram_size & RAMDISK_IMAGE_START_MASK;
231 rd_prompt = ((boot_params.hdr.ram_size & RAMDISK_PROMPT_FLAG) != 0);
232 rd_doload = ((boot_params.hdr.ram_size & RAMDISK_LOAD_FLAG) != 0);
234 setup_memory_region();
237 if (!boot_params.hdr.root_flags)
238 root_mountflags &= ~MS_RDONLY;
239 init_mm.start_code = (unsigned long) &_text;
240 init_mm.end_code = (unsigned long) &_etext;
241 init_mm.end_data = (unsigned long) &_edata;
242 init_mm.brk = (unsigned long) &_end;
244 code_resource.start = virt_to_phys(&_text);
245 code_resource.end = virt_to_phys(&_etext)-1;
246 data_resource.start = virt_to_phys(&_etext);
247 data_resource.end = virt_to_phys(&_edata)-1;
249 early_identify_cpu(&boot_cpu_data);
251 strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
252 *cmdline_p = command_line;
256 finish_e820_parsing();
258 e820_register_active_regions(0, 0, -1UL);
260 * partially used pages are not usable - thus
261 * we are rounding upwards:
263 end_pfn = e820_end_of_ram();
264 num_physpages = end_pfn;
270 init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT));
275 /* setup to use the static apicid table during kernel startup */
276 x86_cpu_to_apicid_ptr = (void *)&x86_cpu_to_apicid_init;
281 * Initialize the ACPI boot-time table parser (gets the RSDP and SDT).
282 * Call this early for SRAT node setup.
284 acpi_boot_table_init();
287 /* How many end-of-memory variables you have, grandma! */
288 max_low_pfn = end_pfn;
290 high_memory = (void *)__va(end_pfn * PAGE_SIZE - 1) + 1;
292 /* Remove active ranges so rediscovery with NUMA-awareness happens */
293 remove_all_active_ranges();
295 #ifdef CONFIG_ACPI_NUMA
297 * Parse SRAT to discover nodes.
303 numa_initmem_init(0, end_pfn);
305 contig_initmem_init(0, end_pfn);
308 /* Reserve direct mapping */
309 reserve_bootmem_generic(table_start << PAGE_SHIFT,
310 (table_end - table_start) << PAGE_SHIFT);
313 reserve_bootmem_generic(__pa_symbol(&_text),
314 __pa_symbol(&_end) - __pa_symbol(&_text));
317 * reserve physical page 0 - it's a special BIOS page on many boxes,
318 * enabling clean reboots, SMP operation, laptop functions.
320 reserve_bootmem_generic(0, PAGE_SIZE);
322 /* reserve ebda region */
324 reserve_bootmem_generic(ebda_addr, ebda_size);
326 /* reserve nodemap region */
328 reserve_bootmem_generic(nodemap_addr, nodemap_size);
332 /* Reserve SMP trampoline */
333 reserve_bootmem_generic(SMP_TRAMPOLINE_BASE, 2*PAGE_SIZE);
336 #ifdef CONFIG_ACPI_SLEEP
338 * Reserve low memory region for sleep support.
340 acpi_reserve_bootmem();
343 * Find and reserve possible boot-time SMP configuration:
346 #ifdef CONFIG_BLK_DEV_INITRD
347 if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) {
348 unsigned long ramdisk_image = boot_params.hdr.ramdisk_image;
349 unsigned long ramdisk_size = boot_params.hdr.ramdisk_size;
350 unsigned long ramdisk_end = ramdisk_image + ramdisk_size;
351 unsigned long end_of_mem = end_pfn << PAGE_SHIFT;
353 if (ramdisk_end <= end_of_mem) {
354 reserve_bootmem_generic(ramdisk_image, ramdisk_size);
355 initrd_start = ramdisk_image + PAGE_OFFSET;
356 initrd_end = initrd_start+ramdisk_size;
358 printk(KERN_ERR "initrd extends beyond end of memory "
359 "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
360 ramdisk_end, end_of_mem);
366 if (crashk_res.start != crashk_res.end) {
367 reserve_bootmem_generic(crashk_res.start,
368 crashk_res.end - crashk_res.start + 1);
379 * set this early, so we dont allocate cpu0
380 * if MADT list doesnt list BSP first
381 * mpparse.c/MP_processor_info() allocates logical cpu numbers.
383 cpu_set(0, cpu_present_map);
386 * Read APIC and some other early information from ACPI tables.
394 * get boot-time SMP configuration:
396 if (smp_found_config)
398 init_apic_mappings();
401 * We trust e820 completely. No explicit ROM probing in memory.
403 e820_reserve_resources();
404 e820_mark_nosave_regions();
408 /* request I/O space for devices used on all i[345]86 PCs */
409 for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++)
410 request_resource(&ioport_resource, &standard_io_resources[i]);
416 #if defined(CONFIG_VGA_CONSOLE)
417 conswitchp = &vga_con;
418 #elif defined(CONFIG_DUMMY_CONSOLE)
419 conswitchp = &dummy_con;
424 static int __cpuinit get_model_name(struct cpuinfo_x86 *c)
428 if (c->extended_cpuid_level < 0x80000004)
431 v = (unsigned int *) c->x86_model_id;
432 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
433 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
434 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
435 c->x86_model_id[48] = 0;
440 static void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
442 unsigned int n, dummy, eax, ebx, ecx, edx;
444 n = c->extended_cpuid_level;
446 if (n >= 0x80000005) {
447 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
448 printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
449 edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
450 c->x86_cache_size=(ecx>>24)+(edx>>24);
451 /* On K8 L1 TLB is inclusive, so don't count it */
455 if (n >= 0x80000006) {
456 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
457 ecx = cpuid_ecx(0x80000006);
458 c->x86_cache_size = ecx >> 16;
459 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
461 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
462 c->x86_cache_size, ecx & 0xFF);
466 cpuid(0x80000007, &dummy, &dummy, &dummy, &c->x86_power);
467 if (n >= 0x80000008) {
468 cpuid(0x80000008, &eax, &dummy, &dummy, &dummy);
469 c->x86_virt_bits = (eax >> 8) & 0xff;
470 c->x86_phys_bits = eax & 0xff;
475 static int nearby_node(int apicid)
478 for (i = apicid - 1; i >= 0; i--) {
479 int node = apicid_to_node[i];
480 if (node != NUMA_NO_NODE && node_online(node))
483 for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
484 int node = apicid_to_node[i];
485 if (node != NUMA_NO_NODE && node_online(node))
488 return first_node(node_online_map); /* Shouldn't happen */
493 * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
494 * Assumes number of cores is a power of two.
496 static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
501 int cpu = smp_processor_id();
503 unsigned apicid = hard_smp_processor_id();
505 unsigned ecx = cpuid_ecx(0x80000008);
507 c->x86_max_cores = (ecx & 0xff) + 1;
509 /* CPU telling us the core id bits shift? */
510 bits = (ecx >> 12) & 0xF;
512 /* Otherwise recompute */
514 while ((1 << bits) < c->x86_max_cores)
518 /* Low order bits define the core id (index of core in socket) */
519 c->cpu_core_id = c->phys_proc_id & ((1 << bits)-1);
520 /* Convert the APIC ID into the socket ID */
521 c->phys_proc_id = phys_pkg_id(bits);
524 node = c->phys_proc_id;
525 if (apicid_to_node[apicid] != NUMA_NO_NODE)
526 node = apicid_to_node[apicid];
527 if (!node_online(node)) {
528 /* Two possibilities here:
529 - The CPU is missing memory and no node was created.
530 In that case try picking one from a nearby CPU
531 - The APIC IDs differ from the HyperTransport node IDs
532 which the K8 northbridge parsing fills in.
533 Assume they are all increased by a constant offset,
534 but in the same order as the HT nodeids.
535 If that doesn't result in a usable node fall back to the
536 path for the previous case. */
537 int ht_nodeid = apicid - (cpu_data(0).phys_proc_id << bits);
538 if (ht_nodeid >= 0 &&
539 apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
540 node = apicid_to_node[ht_nodeid];
541 /* Pick a nearby node */
542 if (!node_online(node))
543 node = nearby_node(apicid);
545 numa_set_node(cpu, node);
547 printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
552 #define ENABLE_C1E_MASK 0x18000000
553 #define CPUID_PROCESSOR_SIGNATURE 1
554 #define CPUID_XFAM 0x0ff00000
555 #define CPUID_XFAM_K8 0x00000000
556 #define CPUID_XFAM_10H 0x00100000
557 #define CPUID_XFAM_11H 0x00200000
558 #define CPUID_XMOD 0x000f0000
559 #define CPUID_XMOD_REV_F 0x00040000
561 /* AMD systems with C1E don't have a working lAPIC timer. Check for that. */
562 static __cpuinit int amd_apic_timer_broken(void)
565 u32 eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE);
566 switch (eax & CPUID_XFAM) {
568 if ((eax & CPUID_XMOD) < CPUID_XMOD_REV_F)
572 rdmsr(MSR_K8_ENABLE_C1E, lo, hi);
573 if (lo & ENABLE_C1E_MASK)
577 /* err on the side of caution */
583 static void __cpuinit init_amd(struct cpuinfo_x86 *c)
591 * Disable TLB flush filter by setting HWCR.FFDIS on K8
592 * bit 6 of msr C001_0015
594 * Errata 63 for SH-B3 steppings
595 * Errata 122 for all steppings (F+ have it disabled by default)
598 rdmsrl(MSR_K8_HWCR, value);
600 wrmsrl(MSR_K8_HWCR, value);
604 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
605 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
606 clear_bit(0*32+31, &c->x86_capability);
608 /* On C+ stepping K8 rep microcode works well for copy/memset */
609 level = cpuid_eax(1);
610 if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58))
611 set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
612 if (c->x86 == 0x10 || c->x86 == 0x11)
613 set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
615 /* Enable workaround for FXSAVE leak */
617 set_bit(X86_FEATURE_FXSAVE_LEAK, &c->x86_capability);
619 level = get_model_name(c);
623 /* Should distinguish Models here, but this is only
624 a fallback anyways. */
625 strcpy(c->x86_model_id, "Hammer");
629 display_cacheinfo(c);
631 /* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */
632 if (c->x86_power & (1<<8))
633 set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
635 /* Multi core CPU? */
636 if (c->extended_cpuid_level >= 0x80000008)
639 if (c->extended_cpuid_level >= 0x80000006 &&
640 (cpuid_edx(0x80000006) & 0xf000))
641 num_cache_leaves = 4;
643 num_cache_leaves = 3;
645 if (c->x86 == 0xf || c->x86 == 0x10 || c->x86 == 0x11)
646 set_bit(X86_FEATURE_K8, &c->x86_capability);
648 /* RDTSC can be speculated around */
649 clear_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
651 /* Family 10 doesn't support C states in MWAIT so don't use it */
652 if (c->x86 == 0x10 && !force_mwait)
653 clear_bit(X86_FEATURE_MWAIT, &c->x86_capability);
655 if (amd_apic_timer_broken())
656 disable_apic_timer = 1;
659 static void __cpuinit detect_ht(struct cpuinfo_x86 *c)
662 u32 eax, ebx, ecx, edx;
663 int index_msb, core_bits;
665 cpuid(1, &eax, &ebx, &ecx, &edx);
668 if (!cpu_has(c, X86_FEATURE_HT))
670 if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
673 smp_num_siblings = (ebx & 0xff0000) >> 16;
675 if (smp_num_siblings == 1) {
676 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
677 } else if (smp_num_siblings > 1 ) {
679 if (smp_num_siblings > NR_CPUS) {
680 printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);
681 smp_num_siblings = 1;
685 index_msb = get_count_order(smp_num_siblings);
686 c->phys_proc_id = phys_pkg_id(index_msb);
688 smp_num_siblings = smp_num_siblings / c->x86_max_cores;
690 index_msb = get_count_order(smp_num_siblings) ;
692 core_bits = get_count_order(c->x86_max_cores);
694 c->cpu_core_id = phys_pkg_id(index_msb) &
695 ((1 << core_bits) - 1);
698 if ((c->x86_max_cores * smp_num_siblings) > 1) {
699 printk(KERN_INFO "CPU: Physical Processor ID: %d\n", c->phys_proc_id);
700 printk(KERN_INFO "CPU: Processor Core ID: %d\n", c->cpu_core_id);
707 * find out the number of processor cores on the die
709 static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
713 if (c->cpuid_level < 4)
716 cpuid_count(4, 0, &eax, &t, &t, &t);
719 return ((eax >> 26) + 1);
724 static void srat_detect_node(void)
728 int cpu = smp_processor_id();
729 int apicid = hard_smp_processor_id();
731 /* Don't do the funky fallback heuristics the AMD version employs
733 node = apicid_to_node[apicid];
734 if (node == NUMA_NO_NODE)
735 node = first_node(node_online_map);
736 numa_set_node(cpu, node);
738 printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
742 static void __cpuinit init_intel(struct cpuinfo_x86 *c)
747 init_intel_cacheinfo(c);
748 if (c->cpuid_level > 9 ) {
749 unsigned eax = cpuid_eax(10);
750 /* Check for version and the number of counters */
751 if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
752 set_bit(X86_FEATURE_ARCH_PERFMON, &c->x86_capability);
757 rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
759 set_bit(X86_FEATURE_BTS, c->x86_capability);
761 set_bit(X86_FEATURE_PEBS, c->x86_capability);
764 n = c->extended_cpuid_level;
765 if (n >= 0x80000008) {
766 unsigned eax = cpuid_eax(0x80000008);
767 c->x86_virt_bits = (eax >> 8) & 0xff;
768 c->x86_phys_bits = eax & 0xff;
769 /* CPUID workaround for Intel 0F34 CPU */
770 if (c->x86_vendor == X86_VENDOR_INTEL &&
771 c->x86 == 0xF && c->x86_model == 0x3 &&
773 c->x86_phys_bits = 36;
777 c->x86_cache_alignment = c->x86_clflush_size * 2;
778 if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
779 (c->x86 == 0x6 && c->x86_model >= 0x0e))
780 set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
782 set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
784 set_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
786 clear_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
787 c->x86_max_cores = intel_num_cpu_cores(c);
792 static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
794 char *v = c->x86_vendor_id;
796 if (!strcmp(v, "AuthenticAMD"))
797 c->x86_vendor = X86_VENDOR_AMD;
798 else if (!strcmp(v, "GenuineIntel"))
799 c->x86_vendor = X86_VENDOR_INTEL;
801 c->x86_vendor = X86_VENDOR_UNKNOWN;
804 struct cpu_model_info {
807 char *model_names[16];
810 /* Do some early cpuid on the boot CPU to get some parameter that are
811 needed before check_bugs. Everything advanced is in identify_cpu
813 void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
817 c->loops_per_jiffy = loops_per_jiffy;
818 c->x86_cache_size = -1;
819 c->x86_vendor = X86_VENDOR_UNKNOWN;
820 c->x86_model = c->x86_mask = 0; /* So far unknown... */
821 c->x86_vendor_id[0] = '\0'; /* Unset */
822 c->x86_model_id[0] = '\0'; /* Unset */
823 c->x86_clflush_size = 64;
824 c->x86_cache_alignment = c->x86_clflush_size;
825 c->x86_max_cores = 1;
826 c->extended_cpuid_level = 0;
827 memset(&c->x86_capability, 0, sizeof c->x86_capability);
829 /* Get vendor name */
830 cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
831 (unsigned int *)&c->x86_vendor_id[0],
832 (unsigned int *)&c->x86_vendor_id[8],
833 (unsigned int *)&c->x86_vendor_id[4]);
837 /* Initialize the standard set of capabilities */
838 /* Note that the vendor-specific code below might override */
840 /* Intel-defined flags: level 0x00000001 */
841 if (c->cpuid_level >= 0x00000001) {
843 cpuid(0x00000001, &tfms, &misc, &c->x86_capability[4],
844 &c->x86_capability[0]);
845 c->x86 = (tfms >> 8) & 0xf;
846 c->x86_model = (tfms >> 4) & 0xf;
847 c->x86_mask = tfms & 0xf;
849 c->x86 += (tfms >> 20) & 0xff;
851 c->x86_model += ((tfms >> 16) & 0xF) << 4;
852 if (c->x86_capability[0] & (1<<19))
853 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
855 /* Have CPUID level 0 only - unheard of */
860 c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff;
866 * This does the hard work of actually picking apart the CPU stuff...
868 void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
873 early_identify_cpu(c);
875 /* AMD-defined flags: level 0x80000001 */
876 xlvl = cpuid_eax(0x80000000);
877 c->extended_cpuid_level = xlvl;
878 if ((xlvl & 0xffff0000) == 0x80000000) {
879 if (xlvl >= 0x80000001) {
880 c->x86_capability[1] = cpuid_edx(0x80000001);
881 c->x86_capability[6] = cpuid_ecx(0x80000001);
883 if (xlvl >= 0x80000004)
884 get_model_name(c); /* Default name */
887 /* Transmeta-defined flags: level 0x80860001 */
888 xlvl = cpuid_eax(0x80860000);
889 if ((xlvl & 0xffff0000) == 0x80860000) {
890 /* Don't set x86_cpuid_level here for now to not confuse. */
891 if (xlvl >= 0x80860001)
892 c->x86_capability[2] = cpuid_edx(0x80860001);
895 init_scattered_cpuid_features(c);
897 c->apicid = phys_pkg_id(0);
900 * Vendor-specific initialization. In this section we
901 * canonicalize the feature flags, meaning if there are
902 * features a certain CPU supports which CPUID doesn't
903 * tell us, CPUID claiming incorrect flags, or other bugs,
904 * we handle them here.
906 * At the end of this section, c->x86_capability better
907 * indicate the features this CPU genuinely supports!
909 switch (c->x86_vendor) {
914 case X86_VENDOR_INTEL:
918 case X86_VENDOR_UNKNOWN:
920 display_cacheinfo(c);
924 select_idle_routine(c);
928 * On SMP, boot_cpu_data holds the common feature set between
929 * all CPUs; so make sure that we indicate which features are
930 * common between the CPUs. The first time this routine gets
931 * executed, c == &boot_cpu_data.
933 if (c != &boot_cpu_data) {
934 /* AND the already accumulated flags with these */
935 for (i = 0 ; i < NCAPINTS ; i++)
936 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
939 #ifdef CONFIG_X86_MCE
942 if (c != &boot_cpu_data)
945 numa_add_cpu(smp_processor_id());
950 void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
952 if (c->x86_model_id[0])
953 printk("%s", c->x86_model_id);
955 if (c->x86_mask || c->cpuid_level >= 0)
956 printk(" stepping %02x\n", c->x86_mask);
962 * Get CPU information for use by the procfs.
965 static int show_cpuinfo(struct seq_file *m, void *v)
967 struct cpuinfo_x86 *c = v;
971 * These flag bits must match the definitions in <asm/cpufeature.h>.
972 * NULL means this bit is undefined or reserved; either way it doesn't
973 * have meaning as far as Linux is concerned. Note that it's important
974 * to realize there is a difference between this table and CPUID -- if
975 * applications want to get the raw CPUID data, they should access
976 * /dev/cpu/<cpu_nr>/cpuid instead.
978 static const char *const x86_cap_flags[] = {
980 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
981 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
982 "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
983 "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", "pbe",
986 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
987 NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
988 NULL, NULL, NULL, NULL, "nx", NULL, "mmxext", NULL,
989 NULL, "fxsr_opt", "pdpe1gb", "rdtscp", NULL, "lm",
992 /* Transmeta-defined */
993 "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
994 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
995 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
996 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
998 /* Other (Linux-defined) */
999 "cxmmx", "k6_mtrr", "cyrix_arr", "centaur_mcr",
1000 NULL, NULL, NULL, NULL,
1001 "constant_tsc", "up", NULL, "arch_perfmon",
1002 "pebs", "bts", NULL, "sync_rdtsc",
1003 "rep_good", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1004 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1006 /* Intel-defined (#2) */
1007 "pni", NULL, NULL, "monitor", "ds_cpl", "vmx", "smx", "est",
1008 "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
1009 NULL, NULL, "dca", NULL, NULL, NULL, NULL, "popcnt",
1010 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1012 /* VIA/Cyrix/Centaur-defined */
1013 NULL, NULL, "rng", "rng_en", NULL, NULL, "ace", "ace_en",
1014 "ace2", "ace2_en", "phe", "phe_en", "pmm", "pmm_en", NULL, NULL,
1015 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1016 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1018 /* AMD-defined (#2) */
1019 "lahf_lm", "cmp_legacy", "svm", "extapic", "cr8_legacy",
1020 "altmovcr8", "abm", "sse4a",
1021 "misalignsse", "3dnowprefetch",
1022 "osvw", "ibs", NULL, NULL, NULL, NULL,
1023 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1024 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1026 /* Auxiliary (Linux-defined) */
1027 "ida", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1028 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1029 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1030 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1032 static const char *const x86_power_flags[] = {
1033 "ts", /* temperature sensor */
1034 "fid", /* frequency id control */
1035 "vid", /* voltage id control */
1036 "ttp", /* thermal trip */
1041 "", /* tsc invariant mapped to constant_tsc */
1047 if (!cpu_online(c->cpu_index))
1052 seq_printf(m,"processor\t: %u\n"
1054 "cpu family\t: %d\n"
1056 "model name\t: %s\n",
1058 c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
1061 c->x86_model_id[0] ? c->x86_model_id : "unknown");
1063 if (c->x86_mask || c->cpuid_level >= 0)
1064 seq_printf(m, "stepping\t: %d\n", c->x86_mask);
1066 seq_printf(m, "stepping\t: unknown\n");
1068 if (cpu_has(c,X86_FEATURE_TSC)) {
1069 unsigned int freq = cpufreq_quick_get((unsigned)cpu);
1072 seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
1073 freq / 1000, (freq % 1000));
1077 if (c->x86_cache_size >= 0)
1078 seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
1081 if (smp_num_siblings * c->x86_max_cores > 1) {
1082 seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
1083 seq_printf(m, "siblings\t: %d\n",
1084 cpus_weight(per_cpu(cpu_core_map, cpu)));
1085 seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
1086 seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
1092 "fpu_exception\t: yes\n"
1093 "cpuid level\t: %d\n"
1100 for ( i = 0 ; i < 32*NCAPINTS ; i++ )
1101 if (cpu_has(c, i) && x86_cap_flags[i] != NULL)
1102 seq_printf(m, " %s", x86_cap_flags[i]);
1105 seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
1106 c->loops_per_jiffy/(500000/HZ),
1107 (c->loops_per_jiffy/(5000/HZ)) % 100);
1109 if (c->x86_tlbsize > 0)
1110 seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize);
1111 seq_printf(m, "clflush size\t: %d\n", c->x86_clflush_size);
1112 seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment);
1114 seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n",
1115 c->x86_phys_bits, c->x86_virt_bits);
1117 seq_printf(m, "power management:");
1120 for (i = 0; i < 32; i++)
1121 if (c->x86_power & (1 << i)) {
1122 if (i < ARRAY_SIZE(x86_power_flags) &&
1124 seq_printf(m, "%s%s",
1125 x86_power_flags[i][0]?" ":"",
1126 x86_power_flags[i]);
1128 seq_printf(m, " [%d]", i);
1132 seq_printf(m, "\n\n");
1137 static void *c_start(struct seq_file *m, loff_t *pos)
1139 if (*pos == 0) /* just in case, cpu 0 is not the first */
1140 *pos = first_cpu(cpu_possible_map);
1141 if ((*pos) < NR_CPUS && cpu_possible(*pos))
1142 return &cpu_data(*pos);
1146 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1148 *pos = next_cpu(*pos, cpu_possible_map);
1149 return c_start(m, pos);
1152 static void c_stop(struct seq_file *m, void *v)
1156 struct seq_operations cpuinfo_op = {
1160 .show = show_cpuinfo,