2 * Copyright (C) 1995 Linus Torvalds
6 * This file handles the architecture-dependent parts of initialization
9 #include <linux/errno.h>
10 #include <linux/sched.h>
11 #include <linux/kernel.h>
13 #include <linux/stddef.h>
14 #include <linux/unistd.h>
15 #include <linux/ptrace.h>
16 #include <linux/slab.h>
17 #include <linux/user.h>
18 #include <linux/a.out.h>
19 #include <linux/screen_info.h>
20 #include <linux/ioport.h>
21 #include <linux/delay.h>
22 #include <linux/init.h>
23 #include <linux/initrd.h>
24 #include <linux/highmem.h>
25 #include <linux/bootmem.h>
26 #include <linux/module.h>
27 #include <asm/processor.h>
28 #include <linux/console.h>
29 #include <linux/seq_file.h>
30 #include <linux/crash_dump.h>
31 #include <linux/root_dev.h>
32 #include <linux/pci.h>
33 #include <linux/acpi.h>
34 #include <linux/kallsyms.h>
35 #include <linux/edd.h>
36 #include <linux/mmzone.h>
37 #include <linux/kexec.h>
38 #include <linux/cpufreq.h>
39 #include <linux/dmi.h>
40 #include <linux/dma-mapping.h>
41 #include <linux/ctype.h>
44 #include <asm/uaccess.h>
45 #include <asm/system.h>
50 #include <video/edid.h>
53 #include <asm/mpspec.h>
54 #include <asm/mmu_context.h>
55 #include <asm/proto.h>
56 #include <asm/setup.h>
57 #include <asm/mach_apic.h>
59 #include <asm/sections.h>
66 struct cpuinfo_x86 boot_cpu_data __read_mostly;
67 EXPORT_SYMBOL(boot_cpu_data);
69 unsigned long mmu_cr4_features;
71 /* Boot loader ID as an integer, for the benefit of proc_dointvec */
74 unsigned long saved_video_mode;
76 int force_mwait __cpuinitdata;
82 char dmi_alloc_data[DMI_MAX_DATA];
87 struct screen_info screen_info;
88 EXPORT_SYMBOL(screen_info);
89 struct sys_desc_table_struct {
90 unsigned short length;
91 unsigned char table[0];
94 struct edid_info edid_info;
95 EXPORT_SYMBOL_GPL(edid_info);
97 extern int root_mountflags;
99 char __initdata command_line[COMMAND_LINE_SIZE];
101 struct resource standard_io_resources[] = {
102 { .name = "dma1", .start = 0x00, .end = 0x1f,
103 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
104 { .name = "pic1", .start = 0x20, .end = 0x21,
105 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
106 { .name = "timer0", .start = 0x40, .end = 0x43,
107 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
108 { .name = "timer1", .start = 0x50, .end = 0x53,
109 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
110 { .name = "keyboard", .start = 0x60, .end = 0x6f,
111 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
112 { .name = "dma page reg", .start = 0x80, .end = 0x8f,
113 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
114 { .name = "pic2", .start = 0xa0, .end = 0xa1,
115 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
116 { .name = "dma2", .start = 0xc0, .end = 0xdf,
117 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
118 { .name = "fpu", .start = 0xf0, .end = 0xff,
119 .flags = IORESOURCE_BUSY | IORESOURCE_IO }
122 #define IORESOURCE_RAM (IORESOURCE_BUSY | IORESOURCE_MEM)
124 struct resource data_resource = {
125 .name = "Kernel data",
128 .flags = IORESOURCE_RAM,
130 struct resource code_resource = {
131 .name = "Kernel code",
134 .flags = IORESOURCE_RAM,
137 #ifdef CONFIG_PROC_VMCORE
138 /* elfcorehdr= specifies the location of elf core header
139 * stored by the crashed kernel. This option will be passed
140 * by kexec loader to the capture kernel.
142 static int __init setup_elfcorehdr(char *arg)
147 elfcorehdr_addr = memparse(arg, &end);
148 return end > arg ? 0 : -EINVAL;
150 early_param("elfcorehdr", setup_elfcorehdr);
155 contig_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
157 unsigned long bootmap_size, bootmap;
159 bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
160 bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size);
162 panic("Cannot find bootmem map of size %ld\n",bootmap_size);
163 bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn);
164 e820_register_active_regions(0, start_pfn, end_pfn);
165 free_bootmem_with_active_regions(0, end_pfn);
166 reserve_bootmem(bootmap, bootmap_size);
170 #if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
172 #ifdef CONFIG_EDD_MODULE
176 * copy_edd() - Copy the BIOS EDD information
177 * from boot_params into a safe place.
180 static inline void copy_edd(void)
182 memcpy(edd.mbr_signature, boot_params.edd_mbr_sig_buffer,
183 sizeof(edd.mbr_signature));
184 memcpy(edd.edd_info, boot_params.eddbuf, sizeof(edd.edd_info));
185 edd.mbr_signature_nr = boot_params.edd_mbr_sig_buf_entries;
186 edd.edd_info_nr = boot_params.eddbuf_entries;
189 static inline void copy_edd(void)
195 static void __init reserve_crashkernel(void)
197 unsigned long long free_mem;
198 unsigned long long crash_size, crash_base;
201 free_mem = ((unsigned long long)max_low_pfn - min_low_pfn) << PAGE_SHIFT;
203 ret = parse_crashkernel(boot_command_line, free_mem,
204 &crash_size, &crash_base);
205 if (ret == 0 && crash_size) {
206 if (crash_base > 0) {
207 printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
208 "for crashkernel (System RAM: %ldMB)\n",
209 (unsigned long)(crash_size >> 20),
210 (unsigned long)(crash_base >> 20),
211 (unsigned long)(free_mem >> 20));
212 crashk_res.start = crash_base;
213 crashk_res.end = crash_base + crash_size - 1;
214 reserve_bootmem(crash_base, crash_size);
216 printk(KERN_INFO "crashkernel reservation failed - "
217 "you have to specify a base address\n");
221 static inline void __init reserve_crashkernel(void)
225 #define EBDA_ADDR_POINTER 0x40E
227 unsigned __initdata ebda_addr;
228 unsigned __initdata ebda_size;
230 static void discover_ebda(void)
233 * there is a real-mode segmented pointer pointing to the
234 * 4K EBDA area at 0x40E
236 ebda_addr = *(unsigned short *)__va(EBDA_ADDR_POINTER);
239 ebda_size = *(unsigned short *)__va(ebda_addr);
241 /* Round EBDA up to pages */
245 ebda_size = round_up(ebda_size + (ebda_addr & ~PAGE_MASK), PAGE_SIZE);
246 if (ebda_size > 64*1024)
250 void __init setup_arch(char **cmdline_p)
252 printk(KERN_INFO "Command line: %s\n", boot_command_line);
254 ROOT_DEV = old_decode_dev(boot_params.hdr.root_dev);
255 screen_info = boot_params.screen_info;
256 edid_info = boot_params.edid_info;
257 saved_video_mode = boot_params.hdr.vid_mode;
258 bootloader_type = boot_params.hdr.type_of_loader;
260 #ifdef CONFIG_BLK_DEV_RAM
261 rd_image_start = boot_params.hdr.ram_size & RAMDISK_IMAGE_START_MASK;
262 rd_prompt = ((boot_params.hdr.ram_size & RAMDISK_PROMPT_FLAG) != 0);
263 rd_doload = ((boot_params.hdr.ram_size & RAMDISK_LOAD_FLAG) != 0);
265 setup_memory_region();
268 if (!boot_params.hdr.root_flags)
269 root_mountflags &= ~MS_RDONLY;
270 init_mm.start_code = (unsigned long) &_text;
271 init_mm.end_code = (unsigned long) &_etext;
272 init_mm.end_data = (unsigned long) &_edata;
273 init_mm.brk = (unsigned long) &_end;
275 code_resource.start = virt_to_phys(&_text);
276 code_resource.end = virt_to_phys(&_etext)-1;
277 data_resource.start = virt_to_phys(&_etext);
278 data_resource.end = virt_to_phys(&_edata)-1;
280 early_identify_cpu(&boot_cpu_data);
282 strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
283 *cmdline_p = command_line;
287 finish_e820_parsing();
289 e820_register_active_regions(0, 0, -1UL);
291 * partially used pages are not usable - thus
292 * we are rounding upwards:
294 end_pfn = e820_end_of_ram();
295 num_physpages = end_pfn;
301 init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT));
306 /* setup to use the static apicid table during kernel startup */
307 x86_cpu_to_apicid_ptr = (void *)&x86_cpu_to_apicid_init;
312 * Initialize the ACPI boot-time table parser (gets the RSDP and SDT).
313 * Call this early for SRAT node setup.
315 acpi_boot_table_init();
318 /* How many end-of-memory variables you have, grandma! */
319 max_low_pfn = end_pfn;
321 high_memory = (void *)__va(end_pfn * PAGE_SIZE - 1) + 1;
323 /* Remove active ranges so rediscovery with NUMA-awareness happens */
324 remove_all_active_ranges();
326 #ifdef CONFIG_ACPI_NUMA
328 * Parse SRAT to discover nodes.
334 numa_initmem_init(0, end_pfn);
336 contig_initmem_init(0, end_pfn);
339 /* Reserve direct mapping */
340 reserve_bootmem_generic(table_start << PAGE_SHIFT,
341 (table_end - table_start) << PAGE_SHIFT);
344 reserve_bootmem_generic(__pa_symbol(&_text),
345 __pa_symbol(&_end) - __pa_symbol(&_text));
348 * reserve physical page 0 - it's a special BIOS page on many boxes,
349 * enabling clean reboots, SMP operation, laptop functions.
351 reserve_bootmem_generic(0, PAGE_SIZE);
353 /* reserve ebda region */
355 reserve_bootmem_generic(ebda_addr, ebda_size);
357 /* reserve nodemap region */
359 reserve_bootmem_generic(nodemap_addr, nodemap_size);
363 /* Reserve SMP trampoline */
364 reserve_bootmem_generic(SMP_TRAMPOLINE_BASE, 2*PAGE_SIZE);
367 #ifdef CONFIG_ACPI_SLEEP
369 * Reserve low memory region for sleep support.
371 acpi_reserve_bootmem();
374 * Find and reserve possible boot-time SMP configuration:
377 #ifdef CONFIG_BLK_DEV_INITRD
378 if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) {
379 unsigned long ramdisk_image = boot_params.hdr.ramdisk_image;
380 unsigned long ramdisk_size = boot_params.hdr.ramdisk_size;
381 unsigned long ramdisk_end = ramdisk_image + ramdisk_size;
382 unsigned long end_of_mem = end_pfn << PAGE_SHIFT;
384 if (ramdisk_end <= end_of_mem) {
385 reserve_bootmem_generic(ramdisk_image, ramdisk_size);
386 initrd_start = ramdisk_image + PAGE_OFFSET;
387 initrd_end = initrd_start+ramdisk_size;
389 printk(KERN_ERR "initrd extends beyond end of memory "
390 "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
391 ramdisk_end, end_of_mem);
396 reserve_crashkernel();
404 * set this early, so we dont allocate cpu0
405 * if MADT list doesnt list BSP first
406 * mpparse.c/MP_processor_info() allocates logical cpu numbers.
408 cpu_set(0, cpu_present_map);
411 * Read APIC and some other early information from ACPI tables.
419 * get boot-time SMP configuration:
421 if (smp_found_config)
423 init_apic_mappings();
426 * We trust e820 completely. No explicit ROM probing in memory.
428 e820_reserve_resources();
429 e820_mark_nosave_regions();
433 /* request I/O space for devices used on all i[345]86 PCs */
434 for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++)
435 request_resource(&ioport_resource, &standard_io_resources[i]);
441 #if defined(CONFIG_VGA_CONSOLE)
442 conswitchp = &vga_con;
443 #elif defined(CONFIG_DUMMY_CONSOLE)
444 conswitchp = &dummy_con;
449 static int __cpuinit get_model_name(struct cpuinfo_x86 *c)
453 if (c->extended_cpuid_level < 0x80000004)
456 v = (unsigned int *) c->x86_model_id;
457 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
458 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
459 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
460 c->x86_model_id[48] = 0;
465 static void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
467 unsigned int n, dummy, eax, ebx, ecx, edx;
469 n = c->extended_cpuid_level;
471 if (n >= 0x80000005) {
472 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
473 printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
474 edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
475 c->x86_cache_size=(ecx>>24)+(edx>>24);
476 /* On K8 L1 TLB is inclusive, so don't count it */
480 if (n >= 0x80000006) {
481 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
482 ecx = cpuid_ecx(0x80000006);
483 c->x86_cache_size = ecx >> 16;
484 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
486 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
487 c->x86_cache_size, ecx & 0xFF);
491 cpuid(0x80000007, &dummy, &dummy, &dummy, &c->x86_power);
492 if (n >= 0x80000008) {
493 cpuid(0x80000008, &eax, &dummy, &dummy, &dummy);
494 c->x86_virt_bits = (eax >> 8) & 0xff;
495 c->x86_phys_bits = eax & 0xff;
500 static int nearby_node(int apicid)
503 for (i = apicid - 1; i >= 0; i--) {
504 int node = apicid_to_node[i];
505 if (node != NUMA_NO_NODE && node_online(node))
508 for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
509 int node = apicid_to_node[i];
510 if (node != NUMA_NO_NODE && node_online(node))
513 return first_node(node_online_map); /* Shouldn't happen */
518 * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
519 * Assumes number of cores is a power of two.
521 static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
526 int cpu = smp_processor_id();
528 unsigned apicid = hard_smp_processor_id();
530 unsigned ecx = cpuid_ecx(0x80000008);
532 c->x86_max_cores = (ecx & 0xff) + 1;
534 /* CPU telling us the core id bits shift? */
535 bits = (ecx >> 12) & 0xF;
537 /* Otherwise recompute */
539 while ((1 << bits) < c->x86_max_cores)
543 /* Low order bits define the core id (index of core in socket) */
544 c->cpu_core_id = c->phys_proc_id & ((1 << bits)-1);
545 /* Convert the APIC ID into the socket ID */
546 c->phys_proc_id = phys_pkg_id(bits);
549 node = c->phys_proc_id;
550 if (apicid_to_node[apicid] != NUMA_NO_NODE)
551 node = apicid_to_node[apicid];
552 if (!node_online(node)) {
553 /* Two possibilities here:
554 - The CPU is missing memory and no node was created.
555 In that case try picking one from a nearby CPU
556 - The APIC IDs differ from the HyperTransport node IDs
557 which the K8 northbridge parsing fills in.
558 Assume they are all increased by a constant offset,
559 but in the same order as the HT nodeids.
560 If that doesn't result in a usable node fall back to the
561 path for the previous case. */
562 int ht_nodeid = apicid - (cpu_data(0).phys_proc_id << bits);
563 if (ht_nodeid >= 0 &&
564 apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
565 node = apicid_to_node[ht_nodeid];
566 /* Pick a nearby node */
567 if (!node_online(node))
568 node = nearby_node(apicid);
570 numa_set_node(cpu, node);
572 printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
577 #define ENABLE_C1E_MASK 0x18000000
578 #define CPUID_PROCESSOR_SIGNATURE 1
579 #define CPUID_XFAM 0x0ff00000
580 #define CPUID_XFAM_K8 0x00000000
581 #define CPUID_XFAM_10H 0x00100000
582 #define CPUID_XFAM_11H 0x00200000
583 #define CPUID_XMOD 0x000f0000
584 #define CPUID_XMOD_REV_F 0x00040000
586 /* AMD systems with C1E don't have a working lAPIC timer. Check for that. */
587 static __cpuinit int amd_apic_timer_broken(void)
590 u32 eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE);
591 switch (eax & CPUID_XFAM) {
593 if ((eax & CPUID_XMOD) < CPUID_XMOD_REV_F)
597 rdmsr(MSR_K8_ENABLE_C1E, lo, hi);
598 if (lo & ENABLE_C1E_MASK)
602 /* err on the side of caution */
608 static void __cpuinit init_amd(struct cpuinfo_x86 *c)
616 * Disable TLB flush filter by setting HWCR.FFDIS on K8
617 * bit 6 of msr C001_0015
619 * Errata 63 for SH-B3 steppings
620 * Errata 122 for all steppings (F+ have it disabled by default)
623 rdmsrl(MSR_K8_HWCR, value);
625 wrmsrl(MSR_K8_HWCR, value);
629 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
630 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
631 clear_bit(0*32+31, &c->x86_capability);
633 /* On C+ stepping K8 rep microcode works well for copy/memset */
634 level = cpuid_eax(1);
635 if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58))
636 set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
637 if (c->x86 == 0x10 || c->x86 == 0x11)
638 set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
640 /* Enable workaround for FXSAVE leak */
642 set_bit(X86_FEATURE_FXSAVE_LEAK, &c->x86_capability);
644 level = get_model_name(c);
648 /* Should distinguish Models here, but this is only
649 a fallback anyways. */
650 strcpy(c->x86_model_id, "Hammer");
654 display_cacheinfo(c);
656 /* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */
657 if (c->x86_power & (1<<8))
658 set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
660 /* Multi core CPU? */
661 if (c->extended_cpuid_level >= 0x80000008)
664 if (c->extended_cpuid_level >= 0x80000006 &&
665 (cpuid_edx(0x80000006) & 0xf000))
666 num_cache_leaves = 4;
668 num_cache_leaves = 3;
670 if (c->x86 == 0xf || c->x86 == 0x10 || c->x86 == 0x11)
671 set_bit(X86_FEATURE_K8, &c->x86_capability);
673 /* RDTSC can be speculated around */
674 clear_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
676 /* Family 10 doesn't support C states in MWAIT so don't use it */
677 if (c->x86 == 0x10 && !force_mwait)
678 clear_bit(X86_FEATURE_MWAIT, &c->x86_capability);
680 if (amd_apic_timer_broken())
681 disable_apic_timer = 1;
684 static void __cpuinit detect_ht(struct cpuinfo_x86 *c)
687 u32 eax, ebx, ecx, edx;
688 int index_msb, core_bits;
690 cpuid(1, &eax, &ebx, &ecx, &edx);
693 if (!cpu_has(c, X86_FEATURE_HT))
695 if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
698 smp_num_siblings = (ebx & 0xff0000) >> 16;
700 if (smp_num_siblings == 1) {
701 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
702 } else if (smp_num_siblings > 1 ) {
704 if (smp_num_siblings > NR_CPUS) {
705 printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);
706 smp_num_siblings = 1;
710 index_msb = get_count_order(smp_num_siblings);
711 c->phys_proc_id = phys_pkg_id(index_msb);
713 smp_num_siblings = smp_num_siblings / c->x86_max_cores;
715 index_msb = get_count_order(smp_num_siblings) ;
717 core_bits = get_count_order(c->x86_max_cores);
719 c->cpu_core_id = phys_pkg_id(index_msb) &
720 ((1 << core_bits) - 1);
723 if ((c->x86_max_cores * smp_num_siblings) > 1) {
724 printk(KERN_INFO "CPU: Physical Processor ID: %d\n", c->phys_proc_id);
725 printk(KERN_INFO "CPU: Processor Core ID: %d\n", c->cpu_core_id);
732 * find out the number of processor cores on the die
734 static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
738 if (c->cpuid_level < 4)
741 cpuid_count(4, 0, &eax, &t, &t, &t);
744 return ((eax >> 26) + 1);
749 static void srat_detect_node(void)
753 int cpu = smp_processor_id();
754 int apicid = hard_smp_processor_id();
756 /* Don't do the funky fallback heuristics the AMD version employs
758 node = apicid_to_node[apicid];
759 if (node == NUMA_NO_NODE)
760 node = first_node(node_online_map);
761 numa_set_node(cpu, node);
763 printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
767 static void __cpuinit init_intel(struct cpuinfo_x86 *c)
772 init_intel_cacheinfo(c);
773 if (c->cpuid_level > 9 ) {
774 unsigned eax = cpuid_eax(10);
775 /* Check for version and the number of counters */
776 if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
777 set_bit(X86_FEATURE_ARCH_PERFMON, &c->x86_capability);
782 rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
784 set_bit(X86_FEATURE_BTS, c->x86_capability);
786 set_bit(X86_FEATURE_PEBS, c->x86_capability);
789 n = c->extended_cpuid_level;
790 if (n >= 0x80000008) {
791 unsigned eax = cpuid_eax(0x80000008);
792 c->x86_virt_bits = (eax >> 8) & 0xff;
793 c->x86_phys_bits = eax & 0xff;
794 /* CPUID workaround for Intel 0F34 CPU */
795 if (c->x86_vendor == X86_VENDOR_INTEL &&
796 c->x86 == 0xF && c->x86_model == 0x3 &&
798 c->x86_phys_bits = 36;
802 c->x86_cache_alignment = c->x86_clflush_size * 2;
803 if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
804 (c->x86 == 0x6 && c->x86_model >= 0x0e))
805 set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
807 set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
809 set_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
811 clear_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
812 c->x86_max_cores = intel_num_cpu_cores(c);
817 static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
819 char *v = c->x86_vendor_id;
821 if (!strcmp(v, "AuthenticAMD"))
822 c->x86_vendor = X86_VENDOR_AMD;
823 else if (!strcmp(v, "GenuineIntel"))
824 c->x86_vendor = X86_VENDOR_INTEL;
826 c->x86_vendor = X86_VENDOR_UNKNOWN;
829 struct cpu_model_info {
832 char *model_names[16];
835 /* Do some early cpuid on the boot CPU to get some parameter that are
836 needed before check_bugs. Everything advanced is in identify_cpu
838 void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
842 c->loops_per_jiffy = loops_per_jiffy;
843 c->x86_cache_size = -1;
844 c->x86_vendor = X86_VENDOR_UNKNOWN;
845 c->x86_model = c->x86_mask = 0; /* So far unknown... */
846 c->x86_vendor_id[0] = '\0'; /* Unset */
847 c->x86_model_id[0] = '\0'; /* Unset */
848 c->x86_clflush_size = 64;
849 c->x86_cache_alignment = c->x86_clflush_size;
850 c->x86_max_cores = 1;
851 c->extended_cpuid_level = 0;
852 memset(&c->x86_capability, 0, sizeof c->x86_capability);
854 /* Get vendor name */
855 cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
856 (unsigned int *)&c->x86_vendor_id[0],
857 (unsigned int *)&c->x86_vendor_id[8],
858 (unsigned int *)&c->x86_vendor_id[4]);
862 /* Initialize the standard set of capabilities */
863 /* Note that the vendor-specific code below might override */
865 /* Intel-defined flags: level 0x00000001 */
866 if (c->cpuid_level >= 0x00000001) {
868 cpuid(0x00000001, &tfms, &misc, &c->x86_capability[4],
869 &c->x86_capability[0]);
870 c->x86 = (tfms >> 8) & 0xf;
871 c->x86_model = (tfms >> 4) & 0xf;
872 c->x86_mask = tfms & 0xf;
874 c->x86 += (tfms >> 20) & 0xff;
876 c->x86_model += ((tfms >> 16) & 0xF) << 4;
877 if (c->x86_capability[0] & (1<<19))
878 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
880 /* Have CPUID level 0 only - unheard of */
885 c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff;
891 * This does the hard work of actually picking apart the CPU stuff...
893 void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
898 early_identify_cpu(c);
900 /* AMD-defined flags: level 0x80000001 */
901 xlvl = cpuid_eax(0x80000000);
902 c->extended_cpuid_level = xlvl;
903 if ((xlvl & 0xffff0000) == 0x80000000) {
904 if (xlvl >= 0x80000001) {
905 c->x86_capability[1] = cpuid_edx(0x80000001);
906 c->x86_capability[6] = cpuid_ecx(0x80000001);
908 if (xlvl >= 0x80000004)
909 get_model_name(c); /* Default name */
912 /* Transmeta-defined flags: level 0x80860001 */
913 xlvl = cpuid_eax(0x80860000);
914 if ((xlvl & 0xffff0000) == 0x80860000) {
915 /* Don't set x86_cpuid_level here for now to not confuse. */
916 if (xlvl >= 0x80860001)
917 c->x86_capability[2] = cpuid_edx(0x80860001);
920 init_scattered_cpuid_features(c);
922 c->apicid = phys_pkg_id(0);
925 * Vendor-specific initialization. In this section we
926 * canonicalize the feature flags, meaning if there are
927 * features a certain CPU supports which CPUID doesn't
928 * tell us, CPUID claiming incorrect flags, or other bugs,
929 * we handle them here.
931 * At the end of this section, c->x86_capability better
932 * indicate the features this CPU genuinely supports!
934 switch (c->x86_vendor) {
939 case X86_VENDOR_INTEL:
943 case X86_VENDOR_UNKNOWN:
945 display_cacheinfo(c);
949 select_idle_routine(c);
953 * On SMP, boot_cpu_data holds the common feature set between
954 * all CPUs; so make sure that we indicate which features are
955 * common between the CPUs. The first time this routine gets
956 * executed, c == &boot_cpu_data.
958 if (c != &boot_cpu_data) {
959 /* AND the already accumulated flags with these */
960 for (i = 0 ; i < NCAPINTS ; i++)
961 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
964 #ifdef CONFIG_X86_MCE
967 if (c != &boot_cpu_data)
970 numa_add_cpu(smp_processor_id());
975 void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
977 if (c->x86_model_id[0])
978 printk("%s", c->x86_model_id);
980 if (c->x86_mask || c->cpuid_level >= 0)
981 printk(" stepping %02x\n", c->x86_mask);
987 * Get CPU information for use by the procfs.
990 static int show_cpuinfo(struct seq_file *m, void *v)
992 struct cpuinfo_x86 *c = v;
996 * These flag bits must match the definitions in <asm/cpufeature.h>.
997 * NULL means this bit is undefined or reserved; either way it doesn't
998 * have meaning as far as Linux is concerned. Note that it's important
999 * to realize there is a difference between this table and CPUID -- if
1000 * applications want to get the raw CPUID data, they should access
1001 * /dev/cpu/<cpu_nr>/cpuid instead.
1003 static const char *const x86_cap_flags[] = {
1005 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
1006 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
1007 "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
1008 "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", "pbe",
1011 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1012 NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
1013 NULL, NULL, NULL, NULL, "nx", NULL, "mmxext", NULL,
1014 NULL, "fxsr_opt", "pdpe1gb", "rdtscp", NULL, "lm",
1015 "3dnowext", "3dnow",
1017 /* Transmeta-defined */
1018 "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
1019 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1020 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1021 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1023 /* Other (Linux-defined) */
1024 "cxmmx", "k6_mtrr", "cyrix_arr", "centaur_mcr",
1025 NULL, NULL, NULL, NULL,
1026 "constant_tsc", "up", NULL, "arch_perfmon",
1027 "pebs", "bts", NULL, "sync_rdtsc",
1028 "rep_good", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1029 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1031 /* Intel-defined (#2) */
1032 "pni", NULL, NULL, "monitor", "ds_cpl", "vmx", "smx", "est",
1033 "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
1034 NULL, NULL, "dca", NULL, NULL, NULL, NULL, "popcnt",
1035 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1037 /* VIA/Cyrix/Centaur-defined */
1038 NULL, NULL, "rng", "rng_en", NULL, NULL, "ace", "ace_en",
1039 "ace2", "ace2_en", "phe", "phe_en", "pmm", "pmm_en", NULL, NULL,
1040 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1041 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1043 /* AMD-defined (#2) */
1044 "lahf_lm", "cmp_legacy", "svm", "extapic", "cr8_legacy",
1045 "altmovcr8", "abm", "sse4a",
1046 "misalignsse", "3dnowprefetch",
1047 "osvw", "ibs", NULL, NULL, NULL, NULL,
1048 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1049 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1051 /* Auxiliary (Linux-defined) */
1052 "ida", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1053 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1054 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1055 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1057 static const char *const x86_power_flags[] = {
1058 "ts", /* temperature sensor */
1059 "fid", /* frequency id control */
1060 "vid", /* voltage id control */
1061 "ttp", /* thermal trip */
1066 "", /* tsc invariant mapped to constant_tsc */
1072 if (!cpu_online(c->cpu_index))
1077 seq_printf(m,"processor\t: %u\n"
1079 "cpu family\t: %d\n"
1081 "model name\t: %s\n",
1083 c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
1086 c->x86_model_id[0] ? c->x86_model_id : "unknown");
1088 if (c->x86_mask || c->cpuid_level >= 0)
1089 seq_printf(m, "stepping\t: %d\n", c->x86_mask);
1091 seq_printf(m, "stepping\t: unknown\n");
1093 if (cpu_has(c,X86_FEATURE_TSC)) {
1094 unsigned int freq = cpufreq_quick_get((unsigned)cpu);
1097 seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
1098 freq / 1000, (freq % 1000));
1102 if (c->x86_cache_size >= 0)
1103 seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
1106 if (smp_num_siblings * c->x86_max_cores > 1) {
1107 seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
1108 seq_printf(m, "siblings\t: %d\n",
1109 cpus_weight(per_cpu(cpu_core_map, cpu)));
1110 seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
1111 seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
1117 "fpu_exception\t: yes\n"
1118 "cpuid level\t: %d\n"
1125 for ( i = 0 ; i < 32*NCAPINTS ; i++ )
1126 if (cpu_has(c, i) && x86_cap_flags[i] != NULL)
1127 seq_printf(m, " %s", x86_cap_flags[i]);
1130 seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
1131 c->loops_per_jiffy/(500000/HZ),
1132 (c->loops_per_jiffy/(5000/HZ)) % 100);
1134 if (c->x86_tlbsize > 0)
1135 seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize);
1136 seq_printf(m, "clflush size\t: %d\n", c->x86_clflush_size);
1137 seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment);
1139 seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n",
1140 c->x86_phys_bits, c->x86_virt_bits);
1142 seq_printf(m, "power management:");
1145 for (i = 0; i < 32; i++)
1146 if (c->x86_power & (1 << i)) {
1147 if (i < ARRAY_SIZE(x86_power_flags) &&
1149 seq_printf(m, "%s%s",
1150 x86_power_flags[i][0]?" ":"",
1151 x86_power_flags[i]);
1153 seq_printf(m, " [%d]", i);
1157 seq_printf(m, "\n\n");
1162 static void *c_start(struct seq_file *m, loff_t *pos)
1164 if (*pos == 0) /* just in case, cpu 0 is not the first */
1165 *pos = first_cpu(cpu_possible_map);
1166 if ((*pos) < NR_CPUS && cpu_possible(*pos))
1167 return &cpu_data(*pos);
1171 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1173 *pos = next_cpu(*pos, cpu_possible_map);
1174 return c_start(m, pos);
1177 static void c_stop(struct seq_file *m, void *v)
1181 struct seq_operations cpuinfo_op = {
1185 .show = show_cpuinfo,