2 * Copyright (C) 1995 Linus Torvalds
6 * This file handles the architecture-dependent parts of initialization
9 #include <linux/errno.h>
10 #include <linux/sched.h>
11 #include <linux/kernel.h>
13 #include <linux/stddef.h>
14 #include <linux/unistd.h>
15 #include <linux/ptrace.h>
16 #include <linux/slab.h>
17 #include <linux/user.h>
18 #include <linux/a.out.h>
19 #include <linux/screen_info.h>
20 #include <linux/ioport.h>
21 #include <linux/delay.h>
22 #include <linux/init.h>
23 #include <linux/initrd.h>
24 #include <linux/highmem.h>
25 #include <linux/bootmem.h>
26 #include <linux/module.h>
27 #include <asm/processor.h>
28 #include <linux/console.h>
29 #include <linux/seq_file.h>
30 #include <linux/crash_dump.h>
31 #include <linux/root_dev.h>
32 #include <linux/pci.h>
33 #include <linux/acpi.h>
34 #include <linux/kallsyms.h>
35 #include <linux/edd.h>
36 #include <linux/mmzone.h>
37 #include <linux/kexec.h>
38 #include <linux/cpufreq.h>
39 #include <linux/dmi.h>
40 #include <linux/dma-mapping.h>
41 #include <linux/ctype.h>
44 #include <asm/uaccess.h>
45 #include <asm/system.h>
50 #include <video/edid.h>
53 #include <asm/mpspec.h>
54 #include <asm/mmu_context.h>
55 #include <asm/proto.h>
56 #include <asm/setup.h>
57 #include <asm/mach_apic.h>
59 #include <asm/sections.h>
61 #include <asm/cacheflush.h>
67 struct cpuinfo_x86 boot_cpu_data __read_mostly;
68 EXPORT_SYMBOL(boot_cpu_data);
70 unsigned long mmu_cr4_features;
72 /* Boot loader ID as an integer, for the benefit of proc_dointvec */
75 unsigned long saved_video_mode;
77 int force_mwait __cpuinitdata;
83 char dmi_alloc_data[DMI_MAX_DATA];
88 struct screen_info screen_info;
89 EXPORT_SYMBOL(screen_info);
90 struct sys_desc_table_struct {
91 unsigned short length;
92 unsigned char table[0];
95 struct edid_info edid_info;
96 EXPORT_SYMBOL_GPL(edid_info);
98 extern int root_mountflags;
100 char __initdata command_line[COMMAND_LINE_SIZE];
102 struct resource standard_io_resources[] = {
103 { .name = "dma1", .start = 0x00, .end = 0x1f,
104 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
105 { .name = "pic1", .start = 0x20, .end = 0x21,
106 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
107 { .name = "timer0", .start = 0x40, .end = 0x43,
108 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
109 { .name = "timer1", .start = 0x50, .end = 0x53,
110 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
111 { .name = "keyboard", .start = 0x60, .end = 0x6f,
112 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
113 { .name = "dma page reg", .start = 0x80, .end = 0x8f,
114 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
115 { .name = "pic2", .start = 0xa0, .end = 0xa1,
116 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
117 { .name = "dma2", .start = 0xc0, .end = 0xdf,
118 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
119 { .name = "fpu", .start = 0xf0, .end = 0xff,
120 .flags = IORESOURCE_BUSY | IORESOURCE_IO }
123 #define IORESOURCE_RAM (IORESOURCE_BUSY | IORESOURCE_MEM)
125 struct resource data_resource = {
126 .name = "Kernel data",
129 .flags = IORESOURCE_RAM,
131 struct resource code_resource = {
132 .name = "Kernel code",
135 .flags = IORESOURCE_RAM,
137 struct resource bss_resource = {
138 .name = "Kernel bss",
141 .flags = IORESOURCE_RAM,
144 #ifdef CONFIG_PROC_VMCORE
145 /* elfcorehdr= specifies the location of elf core header
146 * stored by the crashed kernel. This option will be passed
147 * by kexec loader to the capture kernel.
149 static int __init setup_elfcorehdr(char *arg)
154 elfcorehdr_addr = memparse(arg, &end);
155 return end > arg ? 0 : -EINVAL;
157 early_param("elfcorehdr", setup_elfcorehdr);
162 contig_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
164 unsigned long bootmap_size, bootmap;
166 bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
167 bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size);
169 panic("Cannot find bootmem map of size %ld\n",bootmap_size);
170 bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn);
171 e820_register_active_regions(0, start_pfn, end_pfn);
172 free_bootmem_with_active_regions(0, end_pfn);
173 reserve_bootmem(bootmap, bootmap_size);
177 #if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
179 #ifdef CONFIG_EDD_MODULE
183 * copy_edd() - Copy the BIOS EDD information
184 * from boot_params into a safe place.
187 static inline void copy_edd(void)
189 memcpy(edd.mbr_signature, boot_params.edd_mbr_sig_buffer,
190 sizeof(edd.mbr_signature));
191 memcpy(edd.edd_info, boot_params.eddbuf, sizeof(edd.edd_info));
192 edd.mbr_signature_nr = boot_params.edd_mbr_sig_buf_entries;
193 edd.edd_info_nr = boot_params.eddbuf_entries;
196 static inline void copy_edd(void)
202 static void __init reserve_crashkernel(void)
204 unsigned long long free_mem;
205 unsigned long long crash_size, crash_base;
208 free_mem = ((unsigned long long)max_low_pfn - min_low_pfn) << PAGE_SHIFT;
210 ret = parse_crashkernel(boot_command_line, free_mem,
211 &crash_size, &crash_base);
212 if (ret == 0 && crash_size) {
213 if (crash_base > 0) {
214 printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
215 "for crashkernel (System RAM: %ldMB)\n",
216 (unsigned long)(crash_size >> 20),
217 (unsigned long)(crash_base >> 20),
218 (unsigned long)(free_mem >> 20));
219 crashk_res.start = crash_base;
220 crashk_res.end = crash_base + crash_size - 1;
221 reserve_bootmem(crash_base, crash_size);
223 printk(KERN_INFO "crashkernel reservation failed - "
224 "you have to specify a base address\n");
228 static inline void __init reserve_crashkernel(void)
232 #define EBDA_ADDR_POINTER 0x40E
234 unsigned __initdata ebda_addr;
235 unsigned __initdata ebda_size;
237 static void discover_ebda(void)
240 * there is a real-mode segmented pointer pointing to the
241 * 4K EBDA area at 0x40E
243 ebda_addr = *(unsigned short *)__va(EBDA_ADDR_POINTER);
246 ebda_size = *(unsigned short *)__va(ebda_addr);
248 /* Round EBDA up to pages */
252 ebda_size = round_up(ebda_size + (ebda_addr & ~PAGE_MASK), PAGE_SIZE);
253 if (ebda_size > 64*1024)
257 void __init setup_arch(char **cmdline_p)
259 printk(KERN_INFO "Command line: %s\n", boot_command_line);
261 ROOT_DEV = old_decode_dev(boot_params.hdr.root_dev);
262 screen_info = boot_params.screen_info;
263 edid_info = boot_params.edid_info;
264 saved_video_mode = boot_params.hdr.vid_mode;
265 bootloader_type = boot_params.hdr.type_of_loader;
267 #ifdef CONFIG_BLK_DEV_RAM
268 rd_image_start = boot_params.hdr.ram_size & RAMDISK_IMAGE_START_MASK;
269 rd_prompt = ((boot_params.hdr.ram_size & RAMDISK_PROMPT_FLAG) != 0);
270 rd_doload = ((boot_params.hdr.ram_size & RAMDISK_LOAD_FLAG) != 0);
272 setup_memory_region();
275 if (!boot_params.hdr.root_flags)
276 root_mountflags &= ~MS_RDONLY;
277 init_mm.start_code = (unsigned long) &_text;
278 init_mm.end_code = (unsigned long) &_etext;
279 init_mm.end_data = (unsigned long) &_edata;
280 init_mm.brk = (unsigned long) &_end;
282 code_resource.start = virt_to_phys(&_text);
283 code_resource.end = virt_to_phys(&_etext)-1;
284 data_resource.start = virt_to_phys(&_etext);
285 data_resource.end = virt_to_phys(&_edata)-1;
286 bss_resource.start = virt_to_phys(&__bss_start);
287 bss_resource.end = virt_to_phys(&__bss_stop)-1;
289 early_identify_cpu(&boot_cpu_data);
291 strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
292 *cmdline_p = command_line;
296 finish_e820_parsing();
298 e820_register_active_regions(0, 0, -1UL);
300 * partially used pages are not usable - thus
301 * we are rounding upwards:
303 end_pfn = e820_end_of_ram();
304 num_physpages = end_pfn;
310 init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT));
315 /* setup to use the static apicid table during kernel startup */
316 x86_cpu_to_apicid_ptr = (void *)&x86_cpu_to_apicid_init;
321 * Initialize the ACPI boot-time table parser (gets the RSDP and SDT).
322 * Call this early for SRAT node setup.
324 acpi_boot_table_init();
327 /* How many end-of-memory variables you have, grandma! */
328 max_low_pfn = end_pfn;
330 high_memory = (void *)__va(end_pfn * PAGE_SIZE - 1) + 1;
332 /* Remove active ranges so rediscovery with NUMA-awareness happens */
333 remove_all_active_ranges();
335 #ifdef CONFIG_ACPI_NUMA
337 * Parse SRAT to discover nodes.
343 numa_initmem_init(0, end_pfn);
345 contig_initmem_init(0, end_pfn);
348 /* Reserve direct mapping */
349 reserve_bootmem_generic(table_start << PAGE_SHIFT,
350 (table_end - table_start) << PAGE_SHIFT);
353 reserve_bootmem_generic(__pa_symbol(&_text),
354 __pa_symbol(&_end) - __pa_symbol(&_text));
357 * reserve physical page 0 - it's a special BIOS page on many boxes,
358 * enabling clean reboots, SMP operation, laptop functions.
360 reserve_bootmem_generic(0, PAGE_SIZE);
362 /* reserve ebda region */
364 reserve_bootmem_generic(ebda_addr, ebda_size);
366 /* reserve nodemap region */
368 reserve_bootmem_generic(nodemap_addr, nodemap_size);
372 /* Reserve SMP trampoline */
373 reserve_bootmem_generic(SMP_TRAMPOLINE_BASE, 2*PAGE_SIZE);
376 #ifdef CONFIG_ACPI_SLEEP
378 * Reserve low memory region for sleep support.
380 acpi_reserve_bootmem();
383 * Find and reserve possible boot-time SMP configuration:
386 #ifdef CONFIG_BLK_DEV_INITRD
387 if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) {
388 unsigned long ramdisk_image = boot_params.hdr.ramdisk_image;
389 unsigned long ramdisk_size = boot_params.hdr.ramdisk_size;
390 unsigned long ramdisk_end = ramdisk_image + ramdisk_size;
391 unsigned long end_of_mem = end_pfn << PAGE_SHIFT;
393 if (ramdisk_end <= end_of_mem) {
394 reserve_bootmem_generic(ramdisk_image, ramdisk_size);
395 initrd_start = ramdisk_image + PAGE_OFFSET;
396 initrd_end = initrd_start+ramdisk_size;
398 printk(KERN_ERR "initrd extends beyond end of memory "
399 "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
400 ramdisk_end, end_of_mem);
405 reserve_crashkernel();
413 * set this early, so we dont allocate cpu0
414 * if MADT list doesnt list BSP first
415 * mpparse.c/MP_processor_info() allocates logical cpu numbers.
417 cpu_set(0, cpu_present_map);
420 * Read APIC and some other early information from ACPI tables.
428 * get boot-time SMP configuration:
430 if (smp_found_config)
432 init_apic_mappings();
435 * We trust e820 completely. No explicit ROM probing in memory.
437 e820_reserve_resources();
438 e820_mark_nosave_regions();
442 /* request I/O space for devices used on all i[345]86 PCs */
443 for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++)
444 request_resource(&ioport_resource, &standard_io_resources[i]);
450 #if defined(CONFIG_VGA_CONSOLE)
451 conswitchp = &vga_con;
452 #elif defined(CONFIG_DUMMY_CONSOLE)
453 conswitchp = &dummy_con;
458 static int __cpuinit get_model_name(struct cpuinfo_x86 *c)
462 if (c->extended_cpuid_level < 0x80000004)
465 v = (unsigned int *) c->x86_model_id;
466 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
467 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
468 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
469 c->x86_model_id[48] = 0;
474 static void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
476 unsigned int n, dummy, eax, ebx, ecx, edx;
478 n = c->extended_cpuid_level;
480 if (n >= 0x80000005) {
481 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
482 printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
483 edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
484 c->x86_cache_size=(ecx>>24)+(edx>>24);
485 /* On K8 L1 TLB is inclusive, so don't count it */
489 if (n >= 0x80000006) {
490 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
491 ecx = cpuid_ecx(0x80000006);
492 c->x86_cache_size = ecx >> 16;
493 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
495 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
496 c->x86_cache_size, ecx & 0xFF);
500 cpuid(0x80000007, &dummy, &dummy, &dummy, &c->x86_power);
501 if (n >= 0x80000008) {
502 cpuid(0x80000008, &eax, &dummy, &dummy, &dummy);
503 c->x86_virt_bits = (eax >> 8) & 0xff;
504 c->x86_phys_bits = eax & 0xff;
509 static int nearby_node(int apicid)
512 for (i = apicid - 1; i >= 0; i--) {
513 int node = apicid_to_node[i];
514 if (node != NUMA_NO_NODE && node_online(node))
517 for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
518 int node = apicid_to_node[i];
519 if (node != NUMA_NO_NODE && node_online(node))
522 return first_node(node_online_map); /* Shouldn't happen */
527 * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
528 * Assumes number of cores is a power of two.
530 static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
535 int cpu = smp_processor_id();
537 unsigned apicid = hard_smp_processor_id();
539 unsigned ecx = cpuid_ecx(0x80000008);
541 c->x86_max_cores = (ecx & 0xff) + 1;
543 /* CPU telling us the core id bits shift? */
544 bits = (ecx >> 12) & 0xF;
546 /* Otherwise recompute */
548 while ((1 << bits) < c->x86_max_cores)
552 /* Low order bits define the core id (index of core in socket) */
553 c->cpu_core_id = c->phys_proc_id & ((1 << bits)-1);
554 /* Convert the APIC ID into the socket ID */
555 c->phys_proc_id = phys_pkg_id(bits);
558 node = c->phys_proc_id;
559 if (apicid_to_node[apicid] != NUMA_NO_NODE)
560 node = apicid_to_node[apicid];
561 if (!node_online(node)) {
562 /* Two possibilities here:
563 - The CPU is missing memory and no node was created.
564 In that case try picking one from a nearby CPU
565 - The APIC IDs differ from the HyperTransport node IDs
566 which the K8 northbridge parsing fills in.
567 Assume they are all increased by a constant offset,
568 but in the same order as the HT nodeids.
569 If that doesn't result in a usable node fall back to the
570 path for the previous case. */
571 int ht_nodeid = apicid - (cpu_data(0).phys_proc_id << bits);
572 if (ht_nodeid >= 0 &&
573 apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
574 node = apicid_to_node[ht_nodeid];
575 /* Pick a nearby node */
576 if (!node_online(node))
577 node = nearby_node(apicid);
579 numa_set_node(cpu, node);
581 printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
586 #define ENABLE_C1E_MASK 0x18000000
587 #define CPUID_PROCESSOR_SIGNATURE 1
588 #define CPUID_XFAM 0x0ff00000
589 #define CPUID_XFAM_K8 0x00000000
590 #define CPUID_XFAM_10H 0x00100000
591 #define CPUID_XFAM_11H 0x00200000
592 #define CPUID_XMOD 0x000f0000
593 #define CPUID_XMOD_REV_F 0x00040000
595 /* AMD systems with C1E don't have a working lAPIC timer. Check for that. */
596 static __cpuinit int amd_apic_timer_broken(void)
599 u32 eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE);
600 switch (eax & CPUID_XFAM) {
602 if ((eax & CPUID_XMOD) < CPUID_XMOD_REV_F)
606 rdmsr(MSR_K8_ENABLE_C1E, lo, hi);
607 if (lo & ENABLE_C1E_MASK)
611 /* err on the side of caution */
617 static void __cpuinit init_amd(struct cpuinfo_x86 *c)
625 * Disable TLB flush filter by setting HWCR.FFDIS on K8
626 * bit 6 of msr C001_0015
628 * Errata 63 for SH-B3 steppings
629 * Errata 122 for all steppings (F+ have it disabled by default)
632 rdmsrl(MSR_K8_HWCR, value);
634 wrmsrl(MSR_K8_HWCR, value);
638 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
639 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
640 clear_bit(0*32+31, &c->x86_capability);
642 /* On C+ stepping K8 rep microcode works well for copy/memset */
643 level = cpuid_eax(1);
644 if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58))
645 set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
646 if (c->x86 == 0x10 || c->x86 == 0x11)
647 set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
649 /* Enable workaround for FXSAVE leak */
651 set_bit(X86_FEATURE_FXSAVE_LEAK, &c->x86_capability);
653 level = get_model_name(c);
657 /* Should distinguish Models here, but this is only
658 a fallback anyways. */
659 strcpy(c->x86_model_id, "Hammer");
663 display_cacheinfo(c);
665 /* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */
666 if (c->x86_power & (1<<8))
667 set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
669 /* Multi core CPU? */
670 if (c->extended_cpuid_level >= 0x80000008)
673 if (c->extended_cpuid_level >= 0x80000006 &&
674 (cpuid_edx(0x80000006) & 0xf000))
675 num_cache_leaves = 4;
677 num_cache_leaves = 3;
679 if (c->x86 == 0xf || c->x86 == 0x10 || c->x86 == 0x11)
680 set_bit(X86_FEATURE_K8, &c->x86_capability);
682 /* RDTSC can be speculated around */
683 clear_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
685 /* Family 10 doesn't support C states in MWAIT so don't use it */
686 if (c->x86 == 0x10 && !force_mwait)
687 clear_bit(X86_FEATURE_MWAIT, &c->x86_capability);
689 if (amd_apic_timer_broken())
690 disable_apic_timer = 1;
693 static void __cpuinit detect_ht(struct cpuinfo_x86 *c)
696 u32 eax, ebx, ecx, edx;
697 int index_msb, core_bits;
699 cpuid(1, &eax, &ebx, &ecx, &edx);
702 if (!cpu_has(c, X86_FEATURE_HT))
704 if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
707 smp_num_siblings = (ebx & 0xff0000) >> 16;
709 if (smp_num_siblings == 1) {
710 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
711 } else if (smp_num_siblings > 1 ) {
713 if (smp_num_siblings > NR_CPUS) {
714 printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);
715 smp_num_siblings = 1;
719 index_msb = get_count_order(smp_num_siblings);
720 c->phys_proc_id = phys_pkg_id(index_msb);
722 smp_num_siblings = smp_num_siblings / c->x86_max_cores;
724 index_msb = get_count_order(smp_num_siblings) ;
726 core_bits = get_count_order(c->x86_max_cores);
728 c->cpu_core_id = phys_pkg_id(index_msb) &
729 ((1 << core_bits) - 1);
732 if ((c->x86_max_cores * smp_num_siblings) > 1) {
733 printk(KERN_INFO "CPU: Physical Processor ID: %d\n", c->phys_proc_id);
734 printk(KERN_INFO "CPU: Processor Core ID: %d\n", c->cpu_core_id);
741 * find out the number of processor cores on the die
743 static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
747 if (c->cpuid_level < 4)
750 cpuid_count(4, 0, &eax, &t, &t, &t);
753 return ((eax >> 26) + 1);
758 static void srat_detect_node(void)
762 int cpu = smp_processor_id();
763 int apicid = hard_smp_processor_id();
765 /* Don't do the funky fallback heuristics the AMD version employs
767 node = apicid_to_node[apicid];
768 if (node == NUMA_NO_NODE)
769 node = first_node(node_online_map);
770 numa_set_node(cpu, node);
772 printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
776 static void __cpuinit init_intel(struct cpuinfo_x86 *c)
781 init_intel_cacheinfo(c);
782 if (c->cpuid_level > 9 ) {
783 unsigned eax = cpuid_eax(10);
784 /* Check for version and the number of counters */
785 if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
786 set_bit(X86_FEATURE_ARCH_PERFMON, &c->x86_capability);
791 rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
793 set_bit(X86_FEATURE_BTS, c->x86_capability);
795 set_bit(X86_FEATURE_PEBS, c->x86_capability);
798 n = c->extended_cpuid_level;
799 if (n >= 0x80000008) {
800 unsigned eax = cpuid_eax(0x80000008);
801 c->x86_virt_bits = (eax >> 8) & 0xff;
802 c->x86_phys_bits = eax & 0xff;
803 /* CPUID workaround for Intel 0F34 CPU */
804 if (c->x86_vendor == X86_VENDOR_INTEL &&
805 c->x86 == 0xF && c->x86_model == 0x3 &&
807 c->x86_phys_bits = 36;
811 c->x86_cache_alignment = c->x86_clflush_size * 2;
812 if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
813 (c->x86 == 0x6 && c->x86_model >= 0x0e))
814 set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
816 set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
818 set_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
820 clear_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
821 c->x86_max_cores = intel_num_cpu_cores(c);
826 static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
828 char *v = c->x86_vendor_id;
830 if (!strcmp(v, "AuthenticAMD"))
831 c->x86_vendor = X86_VENDOR_AMD;
832 else if (!strcmp(v, "GenuineIntel"))
833 c->x86_vendor = X86_VENDOR_INTEL;
835 c->x86_vendor = X86_VENDOR_UNKNOWN;
838 struct cpu_model_info {
841 char *model_names[16];
844 /* Do some early cpuid on the boot CPU to get some parameter that are
845 needed before check_bugs. Everything advanced is in identify_cpu
847 void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
851 c->loops_per_jiffy = loops_per_jiffy;
852 c->x86_cache_size = -1;
853 c->x86_vendor = X86_VENDOR_UNKNOWN;
854 c->x86_model = c->x86_mask = 0; /* So far unknown... */
855 c->x86_vendor_id[0] = '\0'; /* Unset */
856 c->x86_model_id[0] = '\0'; /* Unset */
857 c->x86_clflush_size = 64;
858 c->x86_cache_alignment = c->x86_clflush_size;
859 c->x86_max_cores = 1;
860 c->extended_cpuid_level = 0;
861 memset(&c->x86_capability, 0, sizeof c->x86_capability);
863 /* Get vendor name */
864 cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
865 (unsigned int *)&c->x86_vendor_id[0],
866 (unsigned int *)&c->x86_vendor_id[8],
867 (unsigned int *)&c->x86_vendor_id[4]);
871 /* Initialize the standard set of capabilities */
872 /* Note that the vendor-specific code below might override */
874 /* Intel-defined flags: level 0x00000001 */
875 if (c->cpuid_level >= 0x00000001) {
877 cpuid(0x00000001, &tfms, &misc, &c->x86_capability[4],
878 &c->x86_capability[0]);
879 c->x86 = (tfms >> 8) & 0xf;
880 c->x86_model = (tfms >> 4) & 0xf;
881 c->x86_mask = tfms & 0xf;
883 c->x86 += (tfms >> 20) & 0xff;
885 c->x86_model += ((tfms >> 16) & 0xF) << 4;
886 if (c->x86_capability[0] & (1<<19))
887 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
889 /* Have CPUID level 0 only - unheard of */
894 c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff;
900 * This does the hard work of actually picking apart the CPU stuff...
902 void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
907 early_identify_cpu(c);
909 /* AMD-defined flags: level 0x80000001 */
910 xlvl = cpuid_eax(0x80000000);
911 c->extended_cpuid_level = xlvl;
912 if ((xlvl & 0xffff0000) == 0x80000000) {
913 if (xlvl >= 0x80000001) {
914 c->x86_capability[1] = cpuid_edx(0x80000001);
915 c->x86_capability[6] = cpuid_ecx(0x80000001);
917 if (xlvl >= 0x80000004)
918 get_model_name(c); /* Default name */
921 /* Transmeta-defined flags: level 0x80860001 */
922 xlvl = cpuid_eax(0x80860000);
923 if ((xlvl & 0xffff0000) == 0x80860000) {
924 /* Don't set x86_cpuid_level here for now to not confuse. */
925 if (xlvl >= 0x80860001)
926 c->x86_capability[2] = cpuid_edx(0x80860001);
929 init_scattered_cpuid_features(c);
931 c->apicid = phys_pkg_id(0);
934 * Vendor-specific initialization. In this section we
935 * canonicalize the feature flags, meaning if there are
936 * features a certain CPU supports which CPUID doesn't
937 * tell us, CPUID claiming incorrect flags, or other bugs,
938 * we handle them here.
940 * At the end of this section, c->x86_capability better
941 * indicate the features this CPU genuinely supports!
943 switch (c->x86_vendor) {
948 case X86_VENDOR_INTEL:
952 case X86_VENDOR_UNKNOWN:
954 display_cacheinfo(c);
958 select_idle_routine(c);
962 * On SMP, boot_cpu_data holds the common feature set between
963 * all CPUs; so make sure that we indicate which features are
964 * common between the CPUs. The first time this routine gets
965 * executed, c == &boot_cpu_data.
967 if (c != &boot_cpu_data) {
968 /* AND the already accumulated flags with these */
969 for (i = 0 ; i < NCAPINTS ; i++)
970 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
973 #ifdef CONFIG_X86_MCE
976 if (c != &boot_cpu_data)
979 numa_add_cpu(smp_processor_id());
984 void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
986 if (c->x86_model_id[0])
987 printk("%s", c->x86_model_id);
989 if (c->x86_mask || c->cpuid_level >= 0)
990 printk(" stepping %02x\n", c->x86_mask);
996 * Get CPU information for use by the procfs.
999 static int show_cpuinfo(struct seq_file *m, void *v)
1001 struct cpuinfo_x86 *c = v;
1005 * These flag bits must match the definitions in <asm/cpufeature.h>.
1006 * NULL means this bit is undefined or reserved; either way it doesn't
1007 * have meaning as far as Linux is concerned. Note that it's important
1008 * to realize there is a difference between this table and CPUID -- if
1009 * applications want to get the raw CPUID data, they should access
1010 * /dev/cpu/<cpu_nr>/cpuid instead.
1012 static const char *const x86_cap_flags[] = {
1014 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
1015 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
1016 "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
1017 "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", "pbe",
1020 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1021 NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
1022 NULL, NULL, NULL, NULL, "nx", NULL, "mmxext", NULL,
1023 NULL, "fxsr_opt", "pdpe1gb", "rdtscp", NULL, "lm",
1024 "3dnowext", "3dnow",
1026 /* Transmeta-defined */
1027 "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
1028 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1029 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1030 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1032 /* Other (Linux-defined) */
1033 "cxmmx", "k6_mtrr", "cyrix_arr", "centaur_mcr",
1034 NULL, NULL, NULL, NULL,
1035 "constant_tsc", "up", NULL, "arch_perfmon",
1036 "pebs", "bts", NULL, "sync_rdtsc",
1037 "rep_good", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1038 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1040 /* Intel-defined (#2) */
1041 "pni", NULL, NULL, "monitor", "ds_cpl", "vmx", "smx", "est",
1042 "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
1043 NULL, NULL, "dca", NULL, NULL, NULL, NULL, "popcnt",
1044 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1046 /* VIA/Cyrix/Centaur-defined */
1047 NULL, NULL, "rng", "rng_en", NULL, NULL, "ace", "ace_en",
1048 "ace2", "ace2_en", "phe", "phe_en", "pmm", "pmm_en", NULL, NULL,
1049 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1050 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1052 /* AMD-defined (#2) */
1053 "lahf_lm", "cmp_legacy", "svm", "extapic", "cr8_legacy",
1054 "altmovcr8", "abm", "sse4a",
1055 "misalignsse", "3dnowprefetch",
1056 "osvw", "ibs", NULL, NULL, NULL, NULL,
1057 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1058 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1060 /* Auxiliary (Linux-defined) */
1061 "ida", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1062 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1063 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1064 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1066 static const char *const x86_power_flags[] = {
1067 "ts", /* temperature sensor */
1068 "fid", /* frequency id control */
1069 "vid", /* voltage id control */
1070 "ttp", /* thermal trip */
1075 "", /* tsc invariant mapped to constant_tsc */
1081 if (!cpu_online(c->cpu_index))
1086 seq_printf(m,"processor\t: %u\n"
1088 "cpu family\t: %d\n"
1090 "model name\t: %s\n",
1092 c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
1095 c->x86_model_id[0] ? c->x86_model_id : "unknown");
1097 if (c->x86_mask || c->cpuid_level >= 0)
1098 seq_printf(m, "stepping\t: %d\n", c->x86_mask);
1100 seq_printf(m, "stepping\t: unknown\n");
1102 if (cpu_has(c,X86_FEATURE_TSC)) {
1103 unsigned int freq = cpufreq_quick_get((unsigned)cpu);
1106 seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
1107 freq / 1000, (freq % 1000));
1111 if (c->x86_cache_size >= 0)
1112 seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
1115 if (smp_num_siblings * c->x86_max_cores > 1) {
1116 seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
1117 seq_printf(m, "siblings\t: %d\n",
1118 cpus_weight(per_cpu(cpu_core_map, cpu)));
1119 seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
1120 seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
1126 "fpu_exception\t: yes\n"
1127 "cpuid level\t: %d\n"
1134 for ( i = 0 ; i < 32*NCAPINTS ; i++ )
1135 if (cpu_has(c, i) && x86_cap_flags[i] != NULL)
1136 seq_printf(m, " %s", x86_cap_flags[i]);
1139 seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
1140 c->loops_per_jiffy/(500000/HZ),
1141 (c->loops_per_jiffy/(5000/HZ)) % 100);
1143 if (c->x86_tlbsize > 0)
1144 seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize);
1145 seq_printf(m, "clflush size\t: %d\n", c->x86_clflush_size);
1146 seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment);
1148 seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n",
1149 c->x86_phys_bits, c->x86_virt_bits);
1151 seq_printf(m, "power management:");
1154 for (i = 0; i < 32; i++)
1155 if (c->x86_power & (1 << i)) {
1156 if (i < ARRAY_SIZE(x86_power_flags) &&
1158 seq_printf(m, "%s%s",
1159 x86_power_flags[i][0]?" ":"",
1160 x86_power_flags[i]);
1162 seq_printf(m, " [%d]", i);
1166 seq_printf(m, "\n\n");
1171 static void *c_start(struct seq_file *m, loff_t *pos)
1173 if (*pos == 0) /* just in case, cpu 0 is not the first */
1174 *pos = first_cpu(cpu_possible_map);
1175 if ((*pos) < NR_CPUS && cpu_possible(*pos))
1176 return &cpu_data(*pos);
1180 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1182 *pos = next_cpu(*pos, cpu_possible_map);
1183 return c_start(m, pos);
1186 static void c_stop(struct seq_file *m, void *v)
1190 struct seq_operations cpuinfo_op = {
1194 .show = show_cpuinfo,