2 * Routines to indentify caches on Intel CPU.
5 * Venkatesh Pallipadi : Adding cache identification through cpuid(4)
6 * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
9 #include <linux/init.h>
10 #include <linux/slab.h>
11 #include <linux/device.h>
12 #include <linux/compiler.h>
13 #include <linux/cpu.h>
14 #include <linux/sched.h>
16 #include <asm/processor.h>
27 unsigned char descriptor;
32 /* all the cache descriptor types we care about (no TLB or trace cache entries) */
33 static struct _cache_table cache_table[] __cpuinitdata =
35 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
36 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
37 { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */
38 { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */
39 { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
40 { 0x23, LVL_3, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */
41 { 0x25, LVL_3, 2048 }, /* 8-way set assoc, sectored cache, 64 byte line size */
42 { 0x29, LVL_3, 4096 }, /* 8-way set assoc, sectored cache, 64 byte line size */
43 { 0x2c, LVL_1_DATA, 32 }, /* 8-way set assoc, 64 byte line size */
44 { 0x30, LVL_1_INST, 32 }, /* 8-way set assoc, 64 byte line size */
45 { 0x39, LVL_2, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */
46 { 0x3a, LVL_2, 192 }, /* 6-way set assoc, sectored cache, 64 byte line size */
47 { 0x3b, LVL_2, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */
48 { 0x3c, LVL_2, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */
49 { 0x3d, LVL_2, 384 }, /* 6-way set assoc, sectored cache, 64 byte line size */
50 { 0x3e, LVL_2, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
51 { 0x41, LVL_2, 128 }, /* 4-way set assoc, 32 byte line size */
52 { 0x42, LVL_2, 256 }, /* 4-way set assoc, 32 byte line size */
53 { 0x43, LVL_2, 512 }, /* 4-way set assoc, 32 byte line size */
54 { 0x44, LVL_2, 1024 }, /* 4-way set assoc, 32 byte line size */
55 { 0x45, LVL_2, 2048 }, /* 4-way set assoc, 32 byte line size */
56 { 0x46, LVL_3, 4096 }, /* 4-way set assoc, 64 byte line size */
57 { 0x47, LVL_3, 8192 }, /* 8-way set assoc, 64 byte line size */
58 { 0x49, LVL_3, 4096 }, /* 16-way set assoc, 64 byte line size */
59 { 0x4a, LVL_3, 6144 }, /* 12-way set assoc, 64 byte line size */
60 { 0x4b, LVL_3, 8192 }, /* 16-way set assoc, 64 byte line size */
61 { 0x4c, LVL_3, 12288 }, /* 12-way set assoc, 64 byte line size */
62 { 0x4d, LVL_3, 16384 }, /* 16-way set assoc, 64 byte line size */
63 { 0x60, LVL_1_DATA, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */
64 { 0x66, LVL_1_DATA, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */
65 { 0x67, LVL_1_DATA, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */
66 { 0x68, LVL_1_DATA, 32 }, /* 4-way set assoc, sectored cache, 64 byte line size */
67 { 0x70, LVL_TRACE, 12 }, /* 8-way set assoc */
68 { 0x71, LVL_TRACE, 16 }, /* 8-way set assoc */
69 { 0x72, LVL_TRACE, 32 }, /* 8-way set assoc */
70 { 0x73, LVL_TRACE, 64 }, /* 8-way set assoc */
71 { 0x78, LVL_2, 1024 }, /* 4-way set assoc, 64 byte line size */
72 { 0x79, LVL_2, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */
73 { 0x7a, LVL_2, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */
74 { 0x7b, LVL_2, 512 }, /* 8-way set assoc, sectored cache, 64 byte line size */
75 { 0x7c, LVL_2, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */
76 { 0x7d, LVL_2, 2048 }, /* 8-way set assoc, 64 byte line size */
77 { 0x7f, LVL_2, 512 }, /* 2-way set assoc, 64 byte line size */
78 { 0x82, LVL_2, 256 }, /* 8-way set assoc, 32 byte line size */
79 { 0x83, LVL_2, 512 }, /* 8-way set assoc, 32 byte line size */
80 { 0x84, LVL_2, 1024 }, /* 8-way set assoc, 32 byte line size */
81 { 0x85, LVL_2, 2048 }, /* 8-way set assoc, 32 byte line size */
82 { 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */
83 { 0x87, LVL_2, 1024 }, /* 8-way set assoc, 64 byte line size */
93 CACHE_TYPE_UNIFIED = 3
96 union _cpuid4_leaf_eax {
98 enum _cache_type type:5;
100 unsigned int is_self_initializing:1;
101 unsigned int is_fully_associative:1;
102 unsigned int reserved:4;
103 unsigned int num_threads_sharing:12;
104 unsigned int num_cores_on_die:6;
109 union _cpuid4_leaf_ebx {
111 unsigned int coherency_line_size:12;
112 unsigned int physical_line_partition:10;
113 unsigned int ways_of_associativity:10;
118 union _cpuid4_leaf_ecx {
120 unsigned int number_of_sets:32;
125 struct _cpuid4_info {
126 union _cpuid4_leaf_eax eax;
127 union _cpuid4_leaf_ebx ebx;
128 union _cpuid4_leaf_ecx ecx;
130 cpumask_t shared_cpu_map;
133 static unsigned short num_cache_leaves;
135 static int __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
137 unsigned int eax, ebx, ecx, edx;
138 union _cpuid4_leaf_eax cache_eax;
140 cpuid_count(4, index, &eax, &ebx, &ecx, &edx);
141 cache_eax.full = eax;
142 if (cache_eax.split.type == CACHE_TYPE_NULL)
143 return -EIO; /* better error ? */
145 this_leaf->eax.full = eax;
146 this_leaf->ebx.full = ebx;
147 this_leaf->ecx.full = ecx;
148 this_leaf->size = (this_leaf->ecx.split.number_of_sets + 1) *
149 (this_leaf->ebx.split.coherency_line_size + 1) *
150 (this_leaf->ebx.split.physical_line_partition + 1) *
151 (this_leaf->ebx.split.ways_of_associativity + 1);
155 /* will only be called once; __init is safe here */
156 static int __init find_num_cache_leaves(void)
158 unsigned int eax, ebx, ecx, edx;
159 union _cpuid4_leaf_eax cache_eax;
164 /* Do cpuid(4) loop to find out num_cache_leaves */
165 cpuid_count(4, i, &eax, &ebx, &ecx, &edx);
166 cache_eax.full = eax;
167 } while (cache_eax.split.type != CACHE_TYPE_NULL);
171 unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
173 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */
174 unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
175 unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
177 if (c->cpuid_level > 3) {
178 static int is_initialized;
180 if (is_initialized == 0) {
181 /* Init num_cache_leaves from boot CPU */
182 num_cache_leaves = find_num_cache_leaves();
187 * Whenever possible use cpuid(4), deterministic cache
188 * parameters cpuid leaf to find the cache details
190 for (i = 0; i < num_cache_leaves; i++) {
191 struct _cpuid4_info this_leaf;
195 retval = cpuid4_cache_lookup(i, &this_leaf);
197 switch(this_leaf.eax.split.level) {
199 if (this_leaf.eax.split.type ==
201 new_l1d = this_leaf.size/1024;
202 else if (this_leaf.eax.split.type ==
204 new_l1i = this_leaf.size/1024;
207 new_l2 = this_leaf.size/1024;
210 new_l3 = this_leaf.size/1024;
218 if (c->cpuid_level > 1) {
219 /* supports eax=2 call */
222 unsigned char *dp = (unsigned char *)regs;
224 /* Number of times to iterate */
225 n = cpuid_eax(2) & 0xFF;
227 for ( i = 0 ; i < n ; i++ ) {
228 cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]);
230 /* If bit 31 is set, this is an unknown format */
231 for ( j = 0 ; j < 3 ; j++ ) {
232 if ( regs[j] < 0 ) regs[j] = 0;
235 /* Byte 0 is level count, not a descriptor */
236 for ( j = 1 ; j < 16 ; j++ ) {
237 unsigned char des = dp[j];
240 /* look up this descriptor in the table */
241 while (cache_table[k].descriptor != 0)
243 if (cache_table[k].descriptor == des) {
244 switch (cache_table[k].cache_type) {
246 l1i += cache_table[k].size;
249 l1d += cache_table[k].size;
252 l2 += cache_table[k].size;
255 l3 += cache_table[k].size;
258 trace += cache_table[k].size;
283 printk (KERN_INFO "CPU: Trace cache: %dK uops", trace);
285 printk (KERN_INFO "CPU: L1 I cache: %dK", l1i);
287 printk(", L1 D cache: %dK\n", l1d);
291 printk(KERN_INFO "CPU: L2 cache: %dK\n", l2);
293 printk(KERN_INFO "CPU: L3 cache: %dK\n", l3);
295 c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
301 /* pointer to _cpuid4_info array (for each cache leaf) */
302 static struct _cpuid4_info *cpuid4_info[NR_CPUS];
303 #define CPUID4_INFO_IDX(x,y) (&((cpuid4_info[x])[y]))
306 static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
308 struct _cpuid4_info *this_leaf, *sibling_leaf;
309 unsigned long num_threads_sharing;
311 struct cpuinfo_x86 *c = cpu_data;
313 this_leaf = CPUID4_INFO_IDX(cpu, index);
314 num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing;
316 if (num_threads_sharing == 1)
317 cpu_set(cpu, this_leaf->shared_cpu_map);
319 index_msb = get_count_order(num_threads_sharing);
321 for_each_online_cpu(i) {
322 if (c[i].apicid >> index_msb ==
323 c[cpu].apicid >> index_msb) {
324 cpu_set(i, this_leaf->shared_cpu_map);
325 if (i != cpu && cpuid4_info[i]) {
326 sibling_leaf = CPUID4_INFO_IDX(i, index);
327 cpu_set(cpu, sibling_leaf->shared_cpu_map);
333 static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
335 struct _cpuid4_info *this_leaf, *sibling_leaf;
338 this_leaf = CPUID4_INFO_IDX(cpu, index);
339 for_each_cpu_mask(sibling, this_leaf->shared_cpu_map) {
340 sibling_leaf = CPUID4_INFO_IDX(sibling, index);
341 cpu_clear(cpu, sibling_leaf->shared_cpu_map);
345 static void __init cache_shared_cpu_map_setup(unsigned int cpu, int index) {}
346 static void __init cache_remove_shared_cpu_map(unsigned int cpu, int index) {}
349 static void free_cache_attributes(unsigned int cpu)
351 kfree(cpuid4_info[cpu]);
352 cpuid4_info[cpu] = NULL;
355 static int __cpuinit detect_cache_attributes(unsigned int cpu)
357 struct _cpuid4_info *this_leaf;
362 if (num_cache_leaves == 0)
365 cpuid4_info[cpu] = kmalloc(
366 sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
367 if (unlikely(cpuid4_info[cpu] == NULL))
369 memset(cpuid4_info[cpu], 0,
370 sizeof(struct _cpuid4_info) * num_cache_leaves);
372 oldmask = current->cpus_allowed;
373 retval = set_cpus_allowed(current, cpumask_of_cpu(cpu));
377 /* Do cpuid and store the results */
379 for (j = 0; j < num_cache_leaves; j++) {
380 this_leaf = CPUID4_INFO_IDX(cpu, j);
381 retval = cpuid4_cache_lookup(j, this_leaf);
382 if (unlikely(retval < 0))
384 cache_shared_cpu_map_setup(cpu, j);
386 set_cpus_allowed(current, oldmask);
390 free_cache_attributes(cpu);
396 #include <linux/kobject.h>
397 #include <linux/sysfs.h>
399 extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c */
401 /* pointer to kobject for cpuX/cache */
402 static struct kobject * cache_kobject[NR_CPUS];
404 struct _index_kobject {
407 unsigned short index;
410 /* pointer to array of kobjects for cpuX/cache/indexY */
411 static struct _index_kobject *index_kobject[NR_CPUS];
412 #define INDEX_KOBJECT_PTR(x,y) (&((index_kobject[x])[y]))
414 #define show_one_plus(file_name, object, val) \
415 static ssize_t show_##file_name \
416 (struct _cpuid4_info *this_leaf, char *buf) \
418 return sprintf (buf, "%lu\n", (unsigned long)this_leaf->object + val); \
421 show_one_plus(level, eax.split.level, 0);
422 show_one_plus(coherency_line_size, ebx.split.coherency_line_size, 1);
423 show_one_plus(physical_line_partition, ebx.split.physical_line_partition, 1);
424 show_one_plus(ways_of_associativity, ebx.split.ways_of_associativity, 1);
425 show_one_plus(number_of_sets, ecx.split.number_of_sets, 1);
427 static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf)
429 return sprintf (buf, "%luK\n", this_leaf->size / 1024);
432 static ssize_t show_shared_cpu_map(struct _cpuid4_info *this_leaf, char *buf)
434 char mask_str[NR_CPUS];
435 cpumask_scnprintf(mask_str, NR_CPUS, this_leaf->shared_cpu_map);
436 return sprintf(buf, "%s\n", mask_str);
439 static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf) {
440 switch(this_leaf->eax.split.type) {
441 case CACHE_TYPE_DATA:
442 return sprintf(buf, "Data\n");
444 case CACHE_TYPE_INST:
445 return sprintf(buf, "Instruction\n");
447 case CACHE_TYPE_UNIFIED:
448 return sprintf(buf, "Unified\n");
451 return sprintf(buf, "Unknown\n");
457 struct attribute attr;
458 ssize_t (*show)(struct _cpuid4_info *, char *);
459 ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count);
462 #define define_one_ro(_name) \
463 static struct _cache_attr _name = \
464 __ATTR(_name, 0444, show_##_name, NULL)
466 define_one_ro(level);
468 define_one_ro(coherency_line_size);
469 define_one_ro(physical_line_partition);
470 define_one_ro(ways_of_associativity);
471 define_one_ro(number_of_sets);
473 define_one_ro(shared_cpu_map);
475 static struct attribute * default_attrs[] = {
478 &coherency_line_size.attr,
479 &physical_line_partition.attr,
480 &ways_of_associativity.attr,
481 &number_of_sets.attr,
483 &shared_cpu_map.attr,
487 #define to_object(k) container_of(k, struct _index_kobject, kobj)
488 #define to_attr(a) container_of(a, struct _cache_attr, attr)
490 static ssize_t show(struct kobject * kobj, struct attribute * attr, char * buf)
492 struct _cache_attr *fattr = to_attr(attr);
493 struct _index_kobject *this_leaf = to_object(kobj);
497 fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
503 static ssize_t store(struct kobject * kobj, struct attribute * attr,
504 const char * buf, size_t count)
509 static struct sysfs_ops sysfs_ops = {
514 static struct kobj_type ktype_cache = {
515 .sysfs_ops = &sysfs_ops,
516 .default_attrs = default_attrs,
519 static struct kobj_type ktype_percpu_entry = {
520 .sysfs_ops = &sysfs_ops,
523 static void cpuid4_cache_sysfs_exit(unsigned int cpu)
525 kfree(cache_kobject[cpu]);
526 kfree(index_kobject[cpu]);
527 cache_kobject[cpu] = NULL;
528 index_kobject[cpu] = NULL;
529 free_cache_attributes(cpu);
532 static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
535 if (num_cache_leaves == 0)
538 detect_cache_attributes(cpu);
539 if (cpuid4_info[cpu] == NULL)
542 /* Allocate all required memory */
543 cache_kobject[cpu] = kmalloc(sizeof(struct kobject), GFP_KERNEL);
544 if (unlikely(cache_kobject[cpu] == NULL))
546 memset(cache_kobject[cpu], 0, sizeof(struct kobject));
548 index_kobject[cpu] = kmalloc(
549 sizeof(struct _index_kobject ) * num_cache_leaves, GFP_KERNEL);
550 if (unlikely(index_kobject[cpu] == NULL))
552 memset(index_kobject[cpu], 0,
553 sizeof(struct _index_kobject) * num_cache_leaves);
558 cpuid4_cache_sysfs_exit(cpu);
562 /* Add/Remove cache interface for CPU device */
563 static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
565 unsigned int cpu = sys_dev->id;
567 struct _index_kobject *this_object;
570 retval = cpuid4_cache_sysfs_init(cpu);
571 if (unlikely(retval < 0))
574 cache_kobject[cpu]->parent = &sys_dev->kobj;
575 kobject_set_name(cache_kobject[cpu], "%s", "cache");
576 cache_kobject[cpu]->ktype = &ktype_percpu_entry;
577 retval = kobject_register(cache_kobject[cpu]);
579 for (i = 0; i < num_cache_leaves; i++) {
580 this_object = INDEX_KOBJECT_PTR(cpu,i);
581 this_object->cpu = cpu;
582 this_object->index = i;
583 this_object->kobj.parent = cache_kobject[cpu];
584 kobject_set_name(&(this_object->kobj), "index%1lu", i);
585 this_object->kobj.ktype = &ktype_cache;
586 retval = kobject_register(&(this_object->kobj));
587 if (unlikely(retval)) {
588 for (j = 0; j < i; j++) {
590 &(INDEX_KOBJECT_PTR(cpu,j)->kobj));
592 kobject_unregister(cache_kobject[cpu]);
593 cpuid4_cache_sysfs_exit(cpu);
600 static void __cpuexit cache_remove_dev(struct sys_device * sys_dev)
602 unsigned int cpu = sys_dev->id;
605 for (i = 0; i < num_cache_leaves; i++) {
606 cache_remove_shared_cpu_map(cpu, i);
607 kobject_unregister(&(INDEX_KOBJECT_PTR(cpu,i)->kobj));
609 kobject_unregister(cache_kobject[cpu]);
610 cpuid4_cache_sysfs_exit(cpu);
614 static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
615 unsigned long action, void *hcpu)
617 unsigned int cpu = (unsigned long)hcpu;
618 struct sys_device *sys_dev;
620 sys_dev = get_cpu_sysdev(cpu);
623 cache_add_dev(sys_dev);
626 cache_remove_dev(sys_dev);
632 static struct notifier_block cacheinfo_cpu_notifier =
634 .notifier_call = cacheinfo_cpu_callback,
637 static int __cpuinit cache_sysfs_init(void)
641 if (num_cache_leaves == 0)
644 register_cpu_notifier(&cacheinfo_cpu_notifier);
646 for_each_online_cpu(i) {
647 cacheinfo_cpu_callback(&cacheinfo_cpu_notifier, CPU_ONLINE,
654 device_initcall(cache_sysfs_init);