2 * Copyright (C) 1995 Linus Torvalds
6 * This file handles the architecture-dependent parts of initialization
9 #include <linux/errno.h>
10 #include <linux/sched.h>
11 #include <linux/kernel.h>
13 #include <linux/stddef.h>
14 #include <linux/unistd.h>
15 #include <linux/ptrace.h>
16 #include <linux/slab.h>
17 #include <linux/user.h>
18 #include <linux/a.out.h>
19 #include <linux/screen_info.h>
20 #include <linux/ioport.h>
21 #include <linux/delay.h>
22 #include <linux/init.h>
23 #include <linux/initrd.h>
24 #include <linux/highmem.h>
25 #include <linux/bootmem.h>
26 #include <linux/module.h>
27 #include <asm/processor.h>
28 #include <linux/console.h>
29 #include <linux/seq_file.h>
30 #include <linux/crash_dump.h>
31 #include <linux/root_dev.h>
32 #include <linux/pci.h>
33 #include <linux/acpi.h>
34 #include <linux/kallsyms.h>
35 #include <linux/edd.h>
36 #include <linux/mmzone.h>
37 #include <linux/kexec.h>
38 #include <linux/cpufreq.h>
39 #include <linux/dmi.h>
40 #include <linux/dma-mapping.h>
41 #include <linux/ctype.h>
44 #include <asm/uaccess.h>
45 #include <asm/system.h>
50 #include <video/edid.h>
53 #include <asm/mpspec.h>
54 #include <asm/mmu_context.h>
55 #include <asm/proto.h>
56 #include <asm/setup.h>
57 #include <asm/mach_apic.h>
59 #include <asm/sections.h>
66 struct cpuinfo_x86 boot_cpu_data __read_mostly;
67 EXPORT_SYMBOL(boot_cpu_data);
69 unsigned long mmu_cr4_features;
71 /* Boot loader ID as an integer, for the benefit of proc_dointvec */
74 unsigned long saved_video_mode;
76 int force_mwait __cpuinitdata;
82 char dmi_alloc_data[DMI_MAX_DATA];
87 struct screen_info screen_info;
88 EXPORT_SYMBOL(screen_info);
89 struct sys_desc_table_struct {
90 unsigned short length;
91 unsigned char table[0];
94 struct edid_info edid_info;
95 EXPORT_SYMBOL_GPL(edid_info);
97 extern int root_mountflags;
99 char __initdata command_line[COMMAND_LINE_SIZE];
101 struct resource standard_io_resources[] = {
102 { .name = "dma1", .start = 0x00, .end = 0x1f,
103 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
104 { .name = "pic1", .start = 0x20, .end = 0x21,
105 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
106 { .name = "timer0", .start = 0x40, .end = 0x43,
107 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
108 { .name = "timer1", .start = 0x50, .end = 0x53,
109 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
110 { .name = "keyboard", .start = 0x60, .end = 0x6f,
111 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
112 { .name = "dma page reg", .start = 0x80, .end = 0x8f,
113 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
114 { .name = "pic2", .start = 0xa0, .end = 0xa1,
115 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
116 { .name = "dma2", .start = 0xc0, .end = 0xdf,
117 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
118 { .name = "fpu", .start = 0xf0, .end = 0xff,
119 .flags = IORESOURCE_BUSY | IORESOURCE_IO }
122 #define IORESOURCE_RAM (IORESOURCE_BUSY | IORESOURCE_MEM)
124 struct resource data_resource = {
125 .name = "Kernel data",
128 .flags = IORESOURCE_RAM,
130 struct resource code_resource = {
131 .name = "Kernel code",
134 .flags = IORESOURCE_RAM,
137 #ifdef CONFIG_PROC_VMCORE
138 /* elfcorehdr= specifies the location of elf core header
139 * stored by the crashed kernel. This option will be passed
140 * by kexec loader to the capture kernel.
142 static int __init setup_elfcorehdr(char *arg)
147 elfcorehdr_addr = memparse(arg, &end);
148 return end > arg ? 0 : -EINVAL;
150 early_param("elfcorehdr", setup_elfcorehdr);
155 contig_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
157 unsigned long bootmap_size, bootmap;
159 bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
160 bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size);
162 panic("Cannot find bootmem map of size %ld\n",bootmap_size);
163 bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn);
164 e820_register_active_regions(0, start_pfn, end_pfn);
165 free_bootmem_with_active_regions(0, end_pfn);
166 reserve_bootmem(bootmap, bootmap_size);
170 #if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
172 #ifdef CONFIG_EDD_MODULE
176 * copy_edd() - Copy the BIOS EDD information
177 * from boot_params into a safe place.
180 static inline void copy_edd(void)
182 memcpy(edd.mbr_signature, boot_params.edd_mbr_sig_buffer,
183 sizeof(edd.mbr_signature));
184 memcpy(edd.edd_info, boot_params.eddbuf, sizeof(edd.edd_info));
185 edd.mbr_signature_nr = boot_params.edd_mbr_sig_buf_entries;
186 edd.edd_info_nr = boot_params.eddbuf_entries;
189 static inline void copy_edd(void)
195 static void __init reserve_crashkernel(void)
197 unsigned long long free_mem;
198 unsigned long long crash_size, crash_base;
201 free_mem = ((unsigned long long)max_low_pfn - min_low_pfn) << PAGE_SHIFT;
203 ret = parse_crashkernel(boot_command_line, free_mem,
204 &crash_size, &crash_base);
205 if (ret == 0 && crash_size) {
206 if (crash_base > 0) {
207 printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
208 "for crashkernel (System RAM: %ldMB)\n",
209 (unsigned long)(crash_size >> 20),
210 (unsigned long)(crash_base >> 20),
211 (unsigned long)(free_mem >> 20));
212 crashk_res.start = crash_base;
213 crashk_res.end = crash_base + crash_size - 1;
214 reserve_bootmem(crash_base, crash_size);
216 printk(KERN_INFO "crashkernel reservation failed - "
217 "you have to specify a base address\n");
221 static inline void __init reserve_crashkernel(void)
225 #define EBDA_ADDR_POINTER 0x40E
227 unsigned __initdata ebda_addr;
228 unsigned __initdata ebda_size;
230 static void discover_ebda(void)
233 * there is a real-mode segmented pointer pointing to the
234 * 4K EBDA area at 0x40E
236 ebda_addr = *(unsigned short *)__va(EBDA_ADDR_POINTER);
239 ebda_size = *(unsigned short *)__va(ebda_addr);
241 /* Round EBDA up to pages */
245 ebda_size = round_up(ebda_size + (ebda_addr & ~PAGE_MASK), PAGE_SIZE);
246 if (ebda_size > 64*1024)
250 void __init setup_arch(char **cmdline_p)
252 printk(KERN_INFO "Command line: %s\n", boot_command_line);
254 ROOT_DEV = old_decode_dev(boot_params.hdr.root_dev);
255 screen_info = boot_params.screen_info;
256 edid_info = boot_params.edid_info;
257 saved_video_mode = boot_params.hdr.vid_mode;
258 bootloader_type = boot_params.hdr.type_of_loader;
260 #ifdef CONFIG_BLK_DEV_RAM
261 rd_image_start = boot_params.hdr.ram_size & RAMDISK_IMAGE_START_MASK;
262 rd_prompt = ((boot_params.hdr.ram_size & RAMDISK_PROMPT_FLAG) != 0);
263 rd_doload = ((boot_params.hdr.ram_size & RAMDISK_LOAD_FLAG) != 0);
265 setup_memory_region();
268 if (!boot_params.hdr.root_flags)
269 root_mountflags &= ~MS_RDONLY;
270 init_mm.start_code = (unsigned long) &_text;
271 init_mm.end_code = (unsigned long) &_etext;
272 init_mm.end_data = (unsigned long) &_edata;
273 init_mm.brk = (unsigned long) &_end;
275 code_resource.start = virt_to_phys(&_text);
276 code_resource.end = virt_to_phys(&_etext)-1;
277 data_resource.start = virt_to_phys(&_etext);
278 data_resource.end = virt_to_phys(&_edata)-1;
280 early_identify_cpu(&boot_cpu_data);
282 strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
283 *cmdline_p = command_line;
287 finish_e820_parsing();
289 e820_register_active_regions(0, 0, -1UL);
291 * partially used pages are not usable - thus
292 * we are rounding upwards:
294 end_pfn = e820_end_of_ram();
295 num_physpages = end_pfn;
301 init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT));
307 * Initialize the ACPI boot-time table parser (gets the RSDP and SDT).
308 * Call this early for SRAT node setup.
310 acpi_boot_table_init();
313 /* How many end-of-memory variables you have, grandma! */
314 max_low_pfn = end_pfn;
316 high_memory = (void *)__va(end_pfn * PAGE_SIZE - 1) + 1;
318 /* Remove active ranges so rediscovery with NUMA-awareness happens */
319 remove_all_active_ranges();
321 #ifdef CONFIG_ACPI_NUMA
323 * Parse SRAT to discover nodes.
329 numa_initmem_init(0, end_pfn);
331 contig_initmem_init(0, end_pfn);
334 /* Reserve direct mapping */
335 reserve_bootmem_generic(table_start << PAGE_SHIFT,
336 (table_end - table_start) << PAGE_SHIFT);
339 reserve_bootmem_generic(__pa_symbol(&_text),
340 __pa_symbol(&_end) - __pa_symbol(&_text));
343 * reserve physical page 0 - it's a special BIOS page on many boxes,
344 * enabling clean reboots, SMP operation, laptop functions.
346 reserve_bootmem_generic(0, PAGE_SIZE);
348 /* reserve ebda region */
350 reserve_bootmem_generic(ebda_addr, ebda_size);
352 /* reserve nodemap region */
354 reserve_bootmem_generic(nodemap_addr, nodemap_size);
358 /* Reserve SMP trampoline */
359 reserve_bootmem_generic(SMP_TRAMPOLINE_BASE, 2*PAGE_SIZE);
362 #ifdef CONFIG_ACPI_SLEEP
364 * Reserve low memory region for sleep support.
366 acpi_reserve_bootmem();
369 * Find and reserve possible boot-time SMP configuration:
372 #ifdef CONFIG_BLK_DEV_INITRD
373 if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) {
374 unsigned long ramdisk_image = boot_params.hdr.ramdisk_image;
375 unsigned long ramdisk_size = boot_params.hdr.ramdisk_size;
376 unsigned long ramdisk_end = ramdisk_image + ramdisk_size;
377 unsigned long end_of_mem = end_pfn << PAGE_SHIFT;
379 if (ramdisk_end <= end_of_mem) {
380 reserve_bootmem_generic(ramdisk_image, ramdisk_size);
381 initrd_start = ramdisk_image + PAGE_OFFSET;
382 initrd_end = initrd_start+ramdisk_size;
384 printk(KERN_ERR "initrd extends beyond end of memory "
385 "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
386 ramdisk_end, end_of_mem);
391 reserve_crashkernel();
399 * set this early, so we dont allocate cpu0
400 * if MADT list doesnt list BSP first
401 * mpparse.c/MP_processor_info() allocates logical cpu numbers.
403 cpu_set(0, cpu_present_map);
406 * Read APIC and some other early information from ACPI tables.
414 * get boot-time SMP configuration:
416 if (smp_found_config)
418 init_apic_mappings();
421 * We trust e820 completely. No explicit ROM probing in memory.
423 e820_reserve_resources();
424 e820_mark_nosave_regions();
428 /* request I/O space for devices used on all i[345]86 PCs */
429 for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++)
430 request_resource(&ioport_resource, &standard_io_resources[i]);
436 #if defined(CONFIG_VGA_CONSOLE)
437 conswitchp = &vga_con;
438 #elif defined(CONFIG_DUMMY_CONSOLE)
439 conswitchp = &dummy_con;
444 static int __cpuinit get_model_name(struct cpuinfo_x86 *c)
448 if (c->extended_cpuid_level < 0x80000004)
451 v = (unsigned int *) c->x86_model_id;
452 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
453 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
454 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
455 c->x86_model_id[48] = 0;
460 static void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
462 unsigned int n, dummy, eax, ebx, ecx, edx;
464 n = c->extended_cpuid_level;
466 if (n >= 0x80000005) {
467 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
468 printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
469 edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
470 c->x86_cache_size=(ecx>>24)+(edx>>24);
471 /* On K8 L1 TLB is inclusive, so don't count it */
475 if (n >= 0x80000006) {
476 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
477 ecx = cpuid_ecx(0x80000006);
478 c->x86_cache_size = ecx >> 16;
479 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
481 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
482 c->x86_cache_size, ecx & 0xFF);
486 cpuid(0x80000007, &dummy, &dummy, &dummy, &c->x86_power);
487 if (n >= 0x80000008) {
488 cpuid(0x80000008, &eax, &dummy, &dummy, &dummy);
489 c->x86_virt_bits = (eax >> 8) & 0xff;
490 c->x86_phys_bits = eax & 0xff;
495 static int nearby_node(int apicid)
498 for (i = apicid - 1; i >= 0; i--) {
499 int node = apicid_to_node[i];
500 if (node != NUMA_NO_NODE && node_online(node))
503 for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
504 int node = apicid_to_node[i];
505 if (node != NUMA_NO_NODE && node_online(node))
508 return first_node(node_online_map); /* Shouldn't happen */
513 * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
514 * Assumes number of cores is a power of two.
516 static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
521 int cpu = smp_processor_id();
523 unsigned apicid = hard_smp_processor_id();
525 unsigned ecx = cpuid_ecx(0x80000008);
527 c->x86_max_cores = (ecx & 0xff) + 1;
529 /* CPU telling us the core id bits shift? */
530 bits = (ecx >> 12) & 0xF;
532 /* Otherwise recompute */
534 while ((1 << bits) < c->x86_max_cores)
538 /* Low order bits define the core id (index of core in socket) */
539 c->cpu_core_id = c->phys_proc_id & ((1 << bits)-1);
540 /* Convert the APIC ID into the socket ID */
541 c->phys_proc_id = phys_pkg_id(bits);
544 node = c->phys_proc_id;
545 if (apicid_to_node[apicid] != NUMA_NO_NODE)
546 node = apicid_to_node[apicid];
547 if (!node_online(node)) {
548 /* Two possibilities here:
549 - The CPU is missing memory and no node was created.
550 In that case try picking one from a nearby CPU
551 - The APIC IDs differ from the HyperTransport node IDs
552 which the K8 northbridge parsing fills in.
553 Assume they are all increased by a constant offset,
554 but in the same order as the HT nodeids.
555 If that doesn't result in a usable node fall back to the
556 path for the previous case. */
557 int ht_nodeid = apicid - (cpu_data[0].phys_proc_id << bits);
558 if (ht_nodeid >= 0 &&
559 apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
560 node = apicid_to_node[ht_nodeid];
561 /* Pick a nearby node */
562 if (!node_online(node))
563 node = nearby_node(apicid);
565 numa_set_node(cpu, node);
567 printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
572 #define ENABLE_C1E_MASK 0x18000000
573 #define CPUID_PROCESSOR_SIGNATURE 1
574 #define CPUID_XFAM 0x0ff00000
575 #define CPUID_XFAM_K8 0x00000000
576 #define CPUID_XFAM_10H 0x00100000
577 #define CPUID_XFAM_11H 0x00200000
578 #define CPUID_XMOD 0x000f0000
579 #define CPUID_XMOD_REV_F 0x00040000
581 /* AMD systems with C1E don't have a working lAPIC timer. Check for that. */
582 static __cpuinit int amd_apic_timer_broken(void)
585 u32 eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE);
586 switch (eax & CPUID_XFAM) {
588 if ((eax & CPUID_XMOD) < CPUID_XMOD_REV_F)
592 rdmsr(MSR_K8_ENABLE_C1E, lo, hi);
593 if (lo & ENABLE_C1E_MASK)
597 /* err on the side of caution */
603 static void __cpuinit init_amd(struct cpuinfo_x86 *c)
611 * Disable TLB flush filter by setting HWCR.FFDIS on K8
612 * bit 6 of msr C001_0015
614 * Errata 63 for SH-B3 steppings
615 * Errata 122 for all steppings (F+ have it disabled by default)
618 rdmsrl(MSR_K8_HWCR, value);
620 wrmsrl(MSR_K8_HWCR, value);
624 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
625 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
626 clear_bit(0*32+31, &c->x86_capability);
628 /* On C+ stepping K8 rep microcode works well for copy/memset */
629 level = cpuid_eax(1);
630 if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58))
631 set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
632 if (c->x86 == 0x10 || c->x86 == 0x11)
633 set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
635 /* Enable workaround for FXSAVE leak */
637 set_bit(X86_FEATURE_FXSAVE_LEAK, &c->x86_capability);
639 level = get_model_name(c);
643 /* Should distinguish Models here, but this is only
644 a fallback anyways. */
645 strcpy(c->x86_model_id, "Hammer");
649 display_cacheinfo(c);
651 /* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */
652 if (c->x86_power & (1<<8))
653 set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
655 /* Multi core CPU? */
656 if (c->extended_cpuid_level >= 0x80000008)
659 if (c->extended_cpuid_level >= 0x80000006 &&
660 (cpuid_edx(0x80000006) & 0xf000))
661 num_cache_leaves = 4;
663 num_cache_leaves = 3;
665 if (c->x86 == 0xf || c->x86 == 0x10 || c->x86 == 0x11)
666 set_bit(X86_FEATURE_K8, &c->x86_capability);
668 /* RDTSC can be speculated around */
669 clear_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
671 /* Family 10 doesn't support C states in MWAIT so don't use it */
672 if (c->x86 == 0x10 && !force_mwait)
673 clear_bit(X86_FEATURE_MWAIT, &c->x86_capability);
675 if (amd_apic_timer_broken())
676 disable_apic_timer = 1;
679 static void __cpuinit detect_ht(struct cpuinfo_x86 *c)
682 u32 eax, ebx, ecx, edx;
683 int index_msb, core_bits;
685 cpuid(1, &eax, &ebx, &ecx, &edx);
688 if (!cpu_has(c, X86_FEATURE_HT))
690 if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
693 smp_num_siblings = (ebx & 0xff0000) >> 16;
695 if (smp_num_siblings == 1) {
696 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
697 } else if (smp_num_siblings > 1 ) {
699 if (smp_num_siblings > NR_CPUS) {
700 printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);
701 smp_num_siblings = 1;
705 index_msb = get_count_order(smp_num_siblings);
706 c->phys_proc_id = phys_pkg_id(index_msb);
708 smp_num_siblings = smp_num_siblings / c->x86_max_cores;
710 index_msb = get_count_order(smp_num_siblings) ;
712 core_bits = get_count_order(c->x86_max_cores);
714 c->cpu_core_id = phys_pkg_id(index_msb) &
715 ((1 << core_bits) - 1);
718 if ((c->x86_max_cores * smp_num_siblings) > 1) {
719 printk(KERN_INFO "CPU: Physical Processor ID: %d\n", c->phys_proc_id);
720 printk(KERN_INFO "CPU: Processor Core ID: %d\n", c->cpu_core_id);
727 * find out the number of processor cores on the die
729 static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
733 if (c->cpuid_level < 4)
736 cpuid_count(4, 0, &eax, &t, &t, &t);
739 return ((eax >> 26) + 1);
744 static void srat_detect_node(void)
748 int cpu = smp_processor_id();
749 int apicid = hard_smp_processor_id();
751 /* Don't do the funky fallback heuristics the AMD version employs
753 node = apicid_to_node[apicid];
754 if (node == NUMA_NO_NODE)
755 node = first_node(node_online_map);
756 numa_set_node(cpu, node);
758 printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
762 static void __cpuinit init_intel(struct cpuinfo_x86 *c)
767 init_intel_cacheinfo(c);
768 if (c->cpuid_level > 9 ) {
769 unsigned eax = cpuid_eax(10);
770 /* Check for version and the number of counters */
771 if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
772 set_bit(X86_FEATURE_ARCH_PERFMON, &c->x86_capability);
777 rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
779 set_bit(X86_FEATURE_BTS, c->x86_capability);
781 set_bit(X86_FEATURE_PEBS, c->x86_capability);
784 n = c->extended_cpuid_level;
785 if (n >= 0x80000008) {
786 unsigned eax = cpuid_eax(0x80000008);
787 c->x86_virt_bits = (eax >> 8) & 0xff;
788 c->x86_phys_bits = eax & 0xff;
789 /* CPUID workaround for Intel 0F34 CPU */
790 if (c->x86_vendor == X86_VENDOR_INTEL &&
791 c->x86 == 0xF && c->x86_model == 0x3 &&
793 c->x86_phys_bits = 36;
797 c->x86_cache_alignment = c->x86_clflush_size * 2;
798 if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
799 (c->x86 == 0x6 && c->x86_model >= 0x0e))
800 set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
802 set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
804 set_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
806 clear_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
807 c->x86_max_cores = intel_num_cpu_cores(c);
812 static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
814 char *v = c->x86_vendor_id;
816 if (!strcmp(v, "AuthenticAMD"))
817 c->x86_vendor = X86_VENDOR_AMD;
818 else if (!strcmp(v, "GenuineIntel"))
819 c->x86_vendor = X86_VENDOR_INTEL;
821 c->x86_vendor = X86_VENDOR_UNKNOWN;
824 struct cpu_model_info {
827 char *model_names[16];
830 /* Do some early cpuid on the boot CPU to get some parameter that are
831 needed before check_bugs. Everything advanced is in identify_cpu
833 void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
837 c->loops_per_jiffy = loops_per_jiffy;
838 c->x86_cache_size = -1;
839 c->x86_vendor = X86_VENDOR_UNKNOWN;
840 c->x86_model = c->x86_mask = 0; /* So far unknown... */
841 c->x86_vendor_id[0] = '\0'; /* Unset */
842 c->x86_model_id[0] = '\0'; /* Unset */
843 c->x86_clflush_size = 64;
844 c->x86_cache_alignment = c->x86_clflush_size;
845 c->x86_max_cores = 1;
846 c->extended_cpuid_level = 0;
847 memset(&c->x86_capability, 0, sizeof c->x86_capability);
849 /* Get vendor name */
850 cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
851 (unsigned int *)&c->x86_vendor_id[0],
852 (unsigned int *)&c->x86_vendor_id[8],
853 (unsigned int *)&c->x86_vendor_id[4]);
857 /* Initialize the standard set of capabilities */
858 /* Note that the vendor-specific code below might override */
860 /* Intel-defined flags: level 0x00000001 */
861 if (c->cpuid_level >= 0x00000001) {
863 cpuid(0x00000001, &tfms, &misc, &c->x86_capability[4],
864 &c->x86_capability[0]);
865 c->x86 = (tfms >> 8) & 0xf;
866 c->x86_model = (tfms >> 4) & 0xf;
867 c->x86_mask = tfms & 0xf;
869 c->x86 += (tfms >> 20) & 0xff;
871 c->x86_model += ((tfms >> 16) & 0xF) << 4;
872 if (c->x86_capability[0] & (1<<19))
873 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
875 /* Have CPUID level 0 only - unheard of */
880 c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff;
885 * This does the hard work of actually picking apart the CPU stuff...
887 void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
892 early_identify_cpu(c);
894 /* AMD-defined flags: level 0x80000001 */
895 xlvl = cpuid_eax(0x80000000);
896 c->extended_cpuid_level = xlvl;
897 if ((xlvl & 0xffff0000) == 0x80000000) {
898 if (xlvl >= 0x80000001) {
899 c->x86_capability[1] = cpuid_edx(0x80000001);
900 c->x86_capability[6] = cpuid_ecx(0x80000001);
902 if (xlvl >= 0x80000004)
903 get_model_name(c); /* Default name */
906 /* Transmeta-defined flags: level 0x80860001 */
907 xlvl = cpuid_eax(0x80860000);
908 if ((xlvl & 0xffff0000) == 0x80860000) {
909 /* Don't set x86_cpuid_level here for now to not confuse. */
910 if (xlvl >= 0x80860001)
911 c->x86_capability[2] = cpuid_edx(0x80860001);
914 init_scattered_cpuid_features(c);
916 c->apicid = phys_pkg_id(0);
919 * Vendor-specific initialization. In this section we
920 * canonicalize the feature flags, meaning if there are
921 * features a certain CPU supports which CPUID doesn't
922 * tell us, CPUID claiming incorrect flags, or other bugs,
923 * we handle them here.
925 * At the end of this section, c->x86_capability better
926 * indicate the features this CPU genuinely supports!
928 switch (c->x86_vendor) {
933 case X86_VENDOR_INTEL:
937 case X86_VENDOR_UNKNOWN:
939 display_cacheinfo(c);
943 select_idle_routine(c);
947 * On SMP, boot_cpu_data holds the common feature set between
948 * all CPUs; so make sure that we indicate which features are
949 * common between the CPUs. The first time this routine gets
950 * executed, c == &boot_cpu_data.
952 if (c != &boot_cpu_data) {
953 /* AND the already accumulated flags with these */
954 for (i = 0 ; i < NCAPINTS ; i++)
955 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
958 #ifdef CONFIG_X86_MCE
961 if (c != &boot_cpu_data)
964 numa_add_cpu(smp_processor_id());
969 void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
971 if (c->x86_model_id[0])
972 printk("%s", c->x86_model_id);
974 if (c->x86_mask || c->cpuid_level >= 0)
975 printk(" stepping %02x\n", c->x86_mask);
981 * Get CPU information for use by the procfs.
984 static int show_cpuinfo(struct seq_file *m, void *v)
986 struct cpuinfo_x86 *c = v;
989 * These flag bits must match the definitions in <asm/cpufeature.h>.
990 * NULL means this bit is undefined or reserved; either way it doesn't
991 * have meaning as far as Linux is concerned. Note that it's important
992 * to realize there is a difference between this table and CPUID -- if
993 * applications want to get the raw CPUID data, they should access
994 * /dev/cpu/<cpu_nr>/cpuid instead.
996 static const char *const x86_cap_flags[] = {
998 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
999 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
1000 "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
1001 "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", "pbe",
1004 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1005 NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
1006 NULL, NULL, NULL, NULL, "nx", NULL, "mmxext", NULL,
1007 NULL, "fxsr_opt", "pdpe1gb", "rdtscp", NULL, "lm",
1008 "3dnowext", "3dnow",
1010 /* Transmeta-defined */
1011 "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
1012 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1013 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1014 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1016 /* Other (Linux-defined) */
1017 "cxmmx", "k6_mtrr", "cyrix_arr", "centaur_mcr",
1018 NULL, NULL, NULL, NULL,
1019 "constant_tsc", "up", NULL, "arch_perfmon",
1020 "pebs", "bts", NULL, "sync_rdtsc",
1021 "rep_good", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1022 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1024 /* Intel-defined (#2) */
1025 "pni", NULL, NULL, "monitor", "ds_cpl", "vmx", "smx", "est",
1026 "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
1027 NULL, NULL, "dca", NULL, NULL, NULL, NULL, "popcnt",
1028 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1030 /* VIA/Cyrix/Centaur-defined */
1031 NULL, NULL, "rng", "rng_en", NULL, NULL, "ace", "ace_en",
1032 "ace2", "ace2_en", "phe", "phe_en", "pmm", "pmm_en", NULL, NULL,
1033 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1034 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1036 /* AMD-defined (#2) */
1037 "lahf_lm", "cmp_legacy", "svm", "extapic", "cr8_legacy",
1038 "altmovcr8", "abm", "sse4a",
1039 "misalignsse", "3dnowprefetch",
1040 "osvw", "ibs", NULL, NULL, NULL, NULL,
1041 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1042 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1044 /* Auxiliary (Linux-defined) */
1045 "ida", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1046 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1047 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1048 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1050 static const char *const x86_power_flags[] = {
1051 "ts", /* temperature sensor */
1052 "fid", /* frequency id control */
1053 "vid", /* voltage id control */
1054 "ttp", /* thermal trip */
1059 "", /* tsc invariant mapped to constant_tsc */
1065 if (!cpu_online(c-cpu_data))
1069 seq_printf(m,"processor\t: %u\n"
1071 "cpu family\t: %d\n"
1073 "model name\t: %s\n",
1074 (unsigned)(c-cpu_data),
1075 c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
1078 c->x86_model_id[0] ? c->x86_model_id : "unknown");
1080 if (c->x86_mask || c->cpuid_level >= 0)
1081 seq_printf(m, "stepping\t: %d\n", c->x86_mask);
1083 seq_printf(m, "stepping\t: unknown\n");
1085 if (cpu_has(c,X86_FEATURE_TSC)) {
1086 unsigned int freq = cpufreq_quick_get((unsigned)(c-cpu_data));
1089 seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
1090 freq / 1000, (freq % 1000));
1094 if (c->x86_cache_size >= 0)
1095 seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
1098 if (smp_num_siblings * c->x86_max_cores > 1) {
1099 int cpu = c - cpu_data;
1100 seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
1101 seq_printf(m, "siblings\t: %d\n",
1102 cpus_weight(per_cpu(cpu_core_map, cpu)));
1103 seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
1104 seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
1110 "fpu_exception\t: yes\n"
1111 "cpuid level\t: %d\n"
1118 for ( i = 0 ; i < 32*NCAPINTS ; i++ )
1119 if (cpu_has(c, i) && x86_cap_flags[i] != NULL)
1120 seq_printf(m, " %s", x86_cap_flags[i]);
1123 seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
1124 c->loops_per_jiffy/(500000/HZ),
1125 (c->loops_per_jiffy/(5000/HZ)) % 100);
1127 if (c->x86_tlbsize > 0)
1128 seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize);
1129 seq_printf(m, "clflush size\t: %d\n", c->x86_clflush_size);
1130 seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment);
1132 seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n",
1133 c->x86_phys_bits, c->x86_virt_bits);
1135 seq_printf(m, "power management:");
1138 for (i = 0; i < 32; i++)
1139 if (c->x86_power & (1 << i)) {
1140 if (i < ARRAY_SIZE(x86_power_flags) &&
1142 seq_printf(m, "%s%s",
1143 x86_power_flags[i][0]?" ":"",
1144 x86_power_flags[i]);
1146 seq_printf(m, " [%d]", i);
1150 seq_printf(m, "\n\n");
1155 static void *c_start(struct seq_file *m, loff_t *pos)
1157 return *pos < NR_CPUS ? cpu_data + *pos : NULL;
1160 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1163 return c_start(m, pos);
1166 static void c_stop(struct seq_file *m, void *v)
1170 struct seq_operations cpuinfo_op = {
1174 .show = show_cpuinfo,