1 #include <linux/init.h>
3 #include <asm/processor.h>
4 #include <asm/ptrace.h>
5 #include <asm/topology.h>
6 #include <asm/numa_64.h>
10 static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
12 if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
13 (c->x86 == 0x6 && c->x86_model >= 0x0e))
14 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
16 set_cpu_cap(c, X86_FEATURE_SYSENTER32);
20 * find out the number of processor cores on the die
22 static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
26 if (c->cpuid_level < 4)
29 cpuid_count(4, 0, &eax, &t, &t, &t);
32 return ((eax >> 26) + 1);
37 static void __cpuinit srat_detect_node(void)
41 int cpu = smp_processor_id();
42 int apicid = hard_smp_processor_id();
44 /* Don't do the funky fallback heuristics the AMD version employs
46 node = apicid_to_node[apicid];
47 if (node == NUMA_NO_NODE || !node_online(node))
48 node = first_node(node_online_map);
49 numa_set_node(cpu, node);
51 printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
55 static void __cpuinit init_intel(struct cpuinfo_x86 *c)
60 init_intel_cacheinfo(c);
61 if (c->cpuid_level > 9) {
62 unsigned eax = cpuid_eax(10);
63 /* Check for version and the number of counters */
64 if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
65 set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
70 rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
72 set_cpu_cap(c, X86_FEATURE_BTS);
74 set_cpu_cap(c, X86_FEATURE_PEBS);
81 n = c->extended_cpuid_level;
82 if (n >= 0x80000008) {
83 unsigned eax = cpuid_eax(0x80000008);
84 c->x86_virt_bits = (eax >> 8) & 0xff;
85 c->x86_phys_bits = eax & 0xff;
89 c->x86_cache_alignment = c->x86_clflush_size * 2;
91 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
92 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
93 c->x86_max_cores = intel_num_cpu_cores(c);
98 static struct cpu_dev intel_cpu_dev __cpuinitdata = {
100 .c_ident = { "GenuineIntel" },
101 .c_early_init = early_init_intel,
102 .c_init = init_intel,
104 cpu_vendor_dev_register(X86_VENDOR_INTEL, &intel_cpu_dev);