2 * Routines to indentify caches on Intel CPU.
5 * Venkatesh Pallipadi : Adding cache identification through cpuid(4)
6 * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
7 * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD.
10 #include <linux/init.h>
11 #include <linux/slab.h>
12 #include <linux/device.h>
13 #include <linux/compiler.h>
14 #include <linux/cpu.h>
15 #include <linux/sched.h>
16 #include <linux/pci.h>
18 #include <asm/processor.h>
29 unsigned char descriptor;
34 /* all the cache descriptor types we care about (no TLB or trace cache entries) */
35 static struct _cache_table cache_table[] __cpuinitdata =
37 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
38 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
39 { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */
40 { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */
41 { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
42 { 0x23, LVL_3, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */
43 { 0x25, LVL_3, 2048 }, /* 8-way set assoc, sectored cache, 64 byte line size */
44 { 0x29, LVL_3, 4096 }, /* 8-way set assoc, sectored cache, 64 byte line size */
45 { 0x2c, LVL_1_DATA, 32 }, /* 8-way set assoc, 64 byte line size */
46 { 0x30, LVL_1_INST, 32 }, /* 8-way set assoc, 64 byte line size */
47 { 0x39, LVL_2, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */
48 { 0x3a, LVL_2, 192 }, /* 6-way set assoc, sectored cache, 64 byte line size */
49 { 0x3b, LVL_2, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */
50 { 0x3c, LVL_2, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */
51 { 0x3d, LVL_2, 384 }, /* 6-way set assoc, sectored cache, 64 byte line size */
52 { 0x3e, LVL_2, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
53 { 0x3f, LVL_2, 256 }, /* 2-way set assoc, 64 byte line size */
54 { 0x41, LVL_2, 128 }, /* 4-way set assoc, 32 byte line size */
55 { 0x42, LVL_2, 256 }, /* 4-way set assoc, 32 byte line size */
56 { 0x43, LVL_2, 512 }, /* 4-way set assoc, 32 byte line size */
57 { 0x44, LVL_2, 1024 }, /* 4-way set assoc, 32 byte line size */
58 { 0x45, LVL_2, 2048 }, /* 4-way set assoc, 32 byte line size */
59 { 0x46, LVL_3, 4096 }, /* 4-way set assoc, 64 byte line size */
60 { 0x47, LVL_3, 8192 }, /* 8-way set assoc, 64 byte line size */
61 { 0x49, LVL_3, 4096 }, /* 16-way set assoc, 64 byte line size */
62 { 0x4a, LVL_3, 6144 }, /* 12-way set assoc, 64 byte line size */
63 { 0x4b, LVL_3, 8192 }, /* 16-way set assoc, 64 byte line size */
64 { 0x4c, LVL_3, 12288 }, /* 12-way set assoc, 64 byte line size */
65 { 0x4d, LVL_3, 16384 }, /* 16-way set assoc, 64 byte line size */
66 { 0x4e, LVL_2, 6144 }, /* 24-way set assoc, 64 byte line size */
67 { 0x60, LVL_1_DATA, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */
68 { 0x66, LVL_1_DATA, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */
69 { 0x67, LVL_1_DATA, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */
70 { 0x68, LVL_1_DATA, 32 }, /* 4-way set assoc, sectored cache, 64 byte line size */
71 { 0x70, LVL_TRACE, 12 }, /* 8-way set assoc */
72 { 0x71, LVL_TRACE, 16 }, /* 8-way set assoc */
73 { 0x72, LVL_TRACE, 32 }, /* 8-way set assoc */
74 { 0x73, LVL_TRACE, 64 }, /* 8-way set assoc */
75 { 0x78, LVL_2, 1024 }, /* 4-way set assoc, 64 byte line size */
76 { 0x79, LVL_2, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */
77 { 0x7a, LVL_2, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */
78 { 0x7b, LVL_2, 512 }, /* 8-way set assoc, sectored cache, 64 byte line size */
79 { 0x7c, LVL_2, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */
80 { 0x7d, LVL_2, 2048 }, /* 8-way set assoc, 64 byte line size */
81 { 0x7f, LVL_2, 512 }, /* 2-way set assoc, 64 byte line size */
82 { 0x82, LVL_2, 256 }, /* 8-way set assoc, 32 byte line size */
83 { 0x83, LVL_2, 512 }, /* 8-way set assoc, 32 byte line size */
84 { 0x84, LVL_2, 1024 }, /* 8-way set assoc, 32 byte line size */
85 { 0x85, LVL_2, 2048 }, /* 8-way set assoc, 32 byte line size */
86 { 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */
87 { 0x87, LVL_2, 1024 }, /* 8-way set assoc, 64 byte line size */
97 CACHE_TYPE_UNIFIED = 3
100 union _cpuid4_leaf_eax {
102 enum _cache_type type:5;
103 unsigned int level:3;
104 unsigned int is_self_initializing:1;
105 unsigned int is_fully_associative:1;
106 unsigned int reserved:4;
107 unsigned int num_threads_sharing:12;
108 unsigned int num_cores_on_die:6;
113 union _cpuid4_leaf_ebx {
115 unsigned int coherency_line_size:12;
116 unsigned int physical_line_partition:10;
117 unsigned int ways_of_associativity:10;
122 union _cpuid4_leaf_ecx {
124 unsigned int number_of_sets:32;
129 struct _cpuid4_info {
130 union _cpuid4_leaf_eax eax;
131 union _cpuid4_leaf_ebx ebx;
132 union _cpuid4_leaf_ecx ecx;
134 unsigned long can_disable;
135 DECLARE_BITMAP(shared_cpu_map, NR_CPUS);
138 /* subset of above _cpuid4_info w/o shared_cpu_map */
139 struct _cpuid4_info_regs {
140 union _cpuid4_leaf_eax eax;
141 union _cpuid4_leaf_ebx ebx;
142 union _cpuid4_leaf_ecx ecx;
144 unsigned long can_disable;
148 static struct pci_device_id k8_nb_id[] = {
149 { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1103) },
150 { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1203) },
155 unsigned short num_cache_leaves;
157 /* AMD doesn't have CPUID4. Emulate it here to report the same
158 information to the user. This makes some assumptions about the machine:
159 L2 not shared, no SMT etc. that is currently true on AMD CPUs.
161 In theory the TLBs could be reported as fake type (they are in "dummy").
165 unsigned line_size : 8;
166 unsigned lines_per_tag : 8;
168 unsigned size_in_kb : 8;
175 unsigned line_size : 8;
176 unsigned lines_per_tag : 4;
178 unsigned size_in_kb : 16;
185 unsigned line_size : 8;
186 unsigned lines_per_tag : 4;
189 unsigned size_encoded : 14;
194 static unsigned short assocs[] __cpuinitdata = {
195 [1] = 1, [2] = 2, [4] = 4, [6] = 8,
196 [8] = 16, [0xa] = 32, [0xb] = 48,
201 static unsigned char levels[] __cpuinitdata = { 1, 1, 2, 3 };
202 static unsigned char types[] __cpuinitdata = { 1, 2, 3, 3 };
204 static void __cpuinit
205 amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
206 union _cpuid4_leaf_ebx *ebx,
207 union _cpuid4_leaf_ecx *ecx)
210 unsigned line_size, lines_per_tag, assoc, size_in_kb;
211 union l1_cache l1i, l1d;
214 union l1_cache *l1 = &l1d;
220 cpuid(0x80000005, &dummy, &dummy, &l1d.val, &l1i.val);
221 cpuid(0x80000006, &dummy, &dummy, &l2.val, &l3.val);
230 line_size = l1->line_size;
231 lines_per_tag = l1->lines_per_tag;
232 size_in_kb = l1->size_in_kb;
238 line_size = l2.line_size;
239 lines_per_tag = l2.lines_per_tag;
240 /* cpu_data has errata corrections for K7 applied */
241 size_in_kb = current_cpu_data.x86_cache_size;
247 line_size = l3.line_size;
248 lines_per_tag = l3.lines_per_tag;
249 size_in_kb = l3.size_encoded * 512;
255 eax->split.is_self_initializing = 1;
256 eax->split.type = types[leaf];
257 eax->split.level = levels[leaf];
259 eax->split.num_threads_sharing = current_cpu_data.x86_max_cores - 1;
261 eax->split.num_threads_sharing = 0;
262 eax->split.num_cores_on_die = current_cpu_data.x86_max_cores - 1;
266 eax->split.is_fully_associative = 1;
267 ebx->split.coherency_line_size = line_size - 1;
268 ebx->split.ways_of_associativity = assocs[assoc] - 1;
269 ebx->split.physical_line_partition = lines_per_tag - 1;
270 ecx->split.number_of_sets = (size_in_kb * 1024) / line_size /
271 (ebx->split.ways_of_associativity + 1) - 1;
274 static void __cpuinit
275 amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf)
279 this_leaf->can_disable = 1;
283 __cpuinit cpuid4_cache_lookup_regs(int index,
284 struct _cpuid4_info_regs *this_leaf)
286 union _cpuid4_leaf_eax eax;
287 union _cpuid4_leaf_ebx ebx;
288 union _cpuid4_leaf_ecx ecx;
291 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
292 amd_cpuid4(index, &eax, &ebx, &ecx);
293 if (boot_cpu_data.x86 >= 0x10)
294 amd_check_l3_disable(index, this_leaf);
296 cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
299 if (eax.split.type == CACHE_TYPE_NULL)
300 return -EIO; /* better error ? */
302 this_leaf->eax = eax;
303 this_leaf->ebx = ebx;
304 this_leaf->ecx = ecx;
305 this_leaf->size = (ecx.split.number_of_sets + 1) *
306 (ebx.split.coherency_line_size + 1) *
307 (ebx.split.physical_line_partition + 1) *
308 (ebx.split.ways_of_associativity + 1);
313 __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
315 struct _cpuid4_info_regs *leaf_regs =
316 (struct _cpuid4_info_regs *)this_leaf;
318 return cpuid4_cache_lookup_regs(index, leaf_regs);
321 static int __cpuinit find_num_cache_leaves(void)
323 unsigned int eax, ebx, ecx, edx;
324 union _cpuid4_leaf_eax cache_eax;
329 /* Do cpuid(4) loop to find out num_cache_leaves */
330 cpuid_count(4, i, &eax, &ebx, &ecx, &edx);
331 cache_eax.full = eax;
332 } while (cache_eax.split.type != CACHE_TYPE_NULL);
336 unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
338 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */
339 unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
340 unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
341 unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
343 unsigned int cpu = c->cpu_index;
346 if (c->cpuid_level > 3) {
347 static int is_initialized;
349 if (is_initialized == 0) {
350 /* Init num_cache_leaves from boot CPU */
351 num_cache_leaves = find_num_cache_leaves();
356 * Whenever possible use cpuid(4), deterministic cache
357 * parameters cpuid leaf to find the cache details
359 for (i = 0; i < num_cache_leaves; i++) {
360 struct _cpuid4_info_regs this_leaf;
363 retval = cpuid4_cache_lookup_regs(i, &this_leaf);
365 switch(this_leaf.eax.split.level) {
367 if (this_leaf.eax.split.type ==
369 new_l1d = this_leaf.size/1024;
370 else if (this_leaf.eax.split.type ==
372 new_l1i = this_leaf.size/1024;
375 new_l2 = this_leaf.size/1024;
376 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
377 index_msb = get_count_order(num_threads_sharing);
378 l2_id = c->apicid >> index_msb;
381 new_l3 = this_leaf.size/1024;
382 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
383 index_msb = get_count_order(num_threads_sharing);
384 l3_id = c->apicid >> index_msb;
393 * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
396 if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) {
397 /* supports eax=2 call */
399 unsigned int regs[4];
400 unsigned char *dp = (unsigned char *)regs;
403 if (num_cache_leaves != 0 && c->x86 == 15)
406 /* Number of times to iterate */
407 n = cpuid_eax(2) & 0xFF;
409 for ( i = 0 ; i < n ; i++ ) {
410 cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]);
412 /* If bit 31 is set, this is an unknown format */
413 for ( j = 0 ; j < 3 ; j++ ) {
414 if (regs[j] & (1 << 31)) regs[j] = 0;
417 /* Byte 0 is level count, not a descriptor */
418 for ( j = 1 ; j < 16 ; j++ ) {
419 unsigned char des = dp[j];
422 /* look up this descriptor in the table */
423 while (cache_table[k].descriptor != 0)
425 if (cache_table[k].descriptor == des) {
426 if (only_trace && cache_table[k].cache_type != LVL_TRACE)
428 switch (cache_table[k].cache_type) {
430 l1i += cache_table[k].size;
433 l1d += cache_table[k].size;
436 l2 += cache_table[k].size;
439 l3 += cache_table[k].size;
442 trace += cache_table[k].size;
464 per_cpu(cpu_llc_id, cpu) = l2_id;
471 per_cpu(cpu_llc_id, cpu) = l3_id;
476 printk (KERN_INFO "CPU: Trace cache: %dK uops", trace);
478 printk (KERN_INFO "CPU: L1 I cache: %dK", l1i);
481 printk(", L1 D cache: %dK\n", l1d);
486 printk(KERN_INFO "CPU: L2 cache: %dK\n", l2);
489 printk(KERN_INFO "CPU: L3 cache: %dK\n", l3);
491 c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
496 /* pointer to _cpuid4_info array (for each cache leaf) */
497 static DEFINE_PER_CPU(struct _cpuid4_info *, cpuid4_info);
498 #define CPUID4_INFO_IDX(x, y) (&((per_cpu(cpuid4_info, x))[y]))
501 static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
503 struct _cpuid4_info *this_leaf, *sibling_leaf;
504 unsigned long num_threads_sharing;
506 struct cpuinfo_x86 *c = &cpu_data(cpu);
508 this_leaf = CPUID4_INFO_IDX(cpu, index);
509 num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing;
511 if (num_threads_sharing == 1)
512 cpumask_set_cpu(cpu, to_cpumask(this_leaf->shared_cpu_map));
514 index_msb = get_count_order(num_threads_sharing);
516 for_each_online_cpu(i) {
517 if (cpu_data(i).apicid >> index_msb ==
518 c->apicid >> index_msb) {
520 to_cpumask(this_leaf->shared_cpu_map));
521 if (i != cpu && per_cpu(cpuid4_info, i)) {
523 CPUID4_INFO_IDX(i, index);
524 cpumask_set_cpu(cpu, to_cpumask(
525 sibling_leaf->shared_cpu_map));
531 static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
533 struct _cpuid4_info *this_leaf, *sibling_leaf;
536 this_leaf = CPUID4_INFO_IDX(cpu, index);
537 for_each_cpu(sibling, to_cpumask(this_leaf->shared_cpu_map)) {
538 sibling_leaf = CPUID4_INFO_IDX(sibling, index);
539 cpumask_clear_cpu(cpu,
540 to_cpumask(sibling_leaf->shared_cpu_map));
544 static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) {}
545 static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) {}
548 static void __cpuinit free_cache_attributes(unsigned int cpu)
552 for (i = 0; i < num_cache_leaves; i++)
553 cache_remove_shared_cpu_map(cpu, i);
555 kfree(per_cpu(cpuid4_info, cpu));
556 per_cpu(cpuid4_info, cpu) = NULL;
559 static void __cpuinit get_cpu_leaves(void *_retval)
561 int j, *retval = _retval, cpu = smp_processor_id();
563 /* Do cpuid and store the results */
564 for (j = 0; j < num_cache_leaves; j++) {
565 struct _cpuid4_info *this_leaf;
566 this_leaf = CPUID4_INFO_IDX(cpu, j);
567 *retval = cpuid4_cache_lookup(j, this_leaf);
568 if (unlikely(*retval < 0)) {
571 for (i = 0; i < j; i++)
572 cache_remove_shared_cpu_map(cpu, i);
575 cache_shared_cpu_map_setup(cpu, j);
579 static int __cpuinit detect_cache_attributes(unsigned int cpu)
583 if (num_cache_leaves == 0)
586 per_cpu(cpuid4_info, cpu) = kzalloc(
587 sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
588 if (per_cpu(cpuid4_info, cpu) == NULL)
591 smp_call_function_single(cpu, get_cpu_leaves, &retval, true);
593 kfree(per_cpu(cpuid4_info, cpu));
594 per_cpu(cpuid4_info, cpu) = NULL;
602 #include <linux/kobject.h>
603 #include <linux/sysfs.h>
605 extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c */
607 /* pointer to kobject for cpuX/cache */
608 static DEFINE_PER_CPU(struct kobject *, cache_kobject);
610 struct _index_kobject {
613 unsigned short index;
616 /* pointer to array of kobjects for cpuX/cache/indexY */
617 static DEFINE_PER_CPU(struct _index_kobject *, index_kobject);
618 #define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(index_kobject, x))[y]))
620 #define show_one_plus(file_name, object, val) \
621 static ssize_t show_##file_name \
622 (struct _cpuid4_info *this_leaf, char *buf) \
624 return sprintf (buf, "%lu\n", (unsigned long)this_leaf->object + val); \
627 show_one_plus(level, eax.split.level, 0);
628 show_one_plus(coherency_line_size, ebx.split.coherency_line_size, 1);
629 show_one_plus(physical_line_partition, ebx.split.physical_line_partition, 1);
630 show_one_plus(ways_of_associativity, ebx.split.ways_of_associativity, 1);
631 show_one_plus(number_of_sets, ecx.split.number_of_sets, 1);
633 static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf)
635 return sprintf (buf, "%luK\n", this_leaf->size / 1024);
638 static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
641 ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf;
645 const struct cpumask *mask;
647 mask = to_cpumask(this_leaf->shared_cpu_map);
649 cpulist_scnprintf(buf, len-2, mask) :
650 cpumask_scnprintf(buf, len-2, mask);
657 static inline ssize_t show_shared_cpu_map(struct _cpuid4_info *leaf, char *buf)
659 return show_shared_cpu_map_func(leaf, 0, buf);
662 static inline ssize_t show_shared_cpu_list(struct _cpuid4_info *leaf, char *buf)
664 return show_shared_cpu_map_func(leaf, 1, buf);
667 static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf)
669 switch (this_leaf->eax.split.type) {
670 case CACHE_TYPE_DATA:
671 return sprintf(buf, "Data\n");
672 case CACHE_TYPE_INST:
673 return sprintf(buf, "Instruction\n");
674 case CACHE_TYPE_UNIFIED:
675 return sprintf(buf, "Unified\n");
677 return sprintf(buf, "Unknown\n");
681 #define to_object(k) container_of(k, struct _index_kobject, kobj)
682 #define to_attr(a) container_of(a, struct _cache_attr, attr)
685 static struct pci_dev *get_k8_northbridge(int node)
687 struct pci_dev *dev = NULL;
690 for (i = 0; i <= node; i++) {
692 dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
695 } while (!pci_match_id(&k8_nb_id[0], dev));
702 static struct pci_dev *get_k8_northbridge(int node)
708 static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf)
710 const struct cpumask *mask = to_cpumask(this_leaf->shared_cpu_map);
711 int node = cpu_to_node(cpumask_first(mask));
712 struct pci_dev *dev = NULL;
716 if (!this_leaf->can_disable)
717 return sprintf(buf, "Feature not enabled\n");
719 dev = get_k8_northbridge(node);
721 printk(KERN_ERR "Attempting AMD northbridge operation on a system with no northbridge\n");
725 for (i = 0; i < 2; i++) {
728 pci_read_config_dword(dev, 0x1BC + i * 4, ®);
730 ret += sprintf(buf, "%sEntry: %d\n", buf, i);
731 ret += sprintf(buf, "%sReads: %s\tNew Entries: %s\n",
733 reg & 0x80000000 ? "Disabled" : "Allowed",
734 reg & 0x40000000 ? "Disabled" : "Allowed");
735 ret += sprintf(buf, "%sSubCache: %x\tIndex: %x\n",
736 buf, (reg & 0x30000) >> 16, reg & 0xfff);
742 store_cache_disable(struct _cpuid4_info *this_leaf, const char *buf,
745 const struct cpumask *mask = to_cpumask(this_leaf->shared_cpu_map);
746 int node = cpu_to_node(cpumask_first(mask));
747 struct pci_dev *dev = NULL;
748 unsigned int ret, index, val;
750 if (!this_leaf->can_disable)
753 if (strlen(buf) > 15)
756 ret = sscanf(buf, "%x %x", &index, &val);
763 dev = get_k8_northbridge(node);
765 printk(KERN_ERR "Attempting AMD northbridge operation on a system with no northbridge\n");
769 pci_write_config_dword(dev, 0x1BC + index * 4, val & ~0x40000000);
771 pci_write_config_dword(dev, 0x1BC + index * 4, val);
777 struct attribute attr;
778 ssize_t (*show)(struct _cpuid4_info *, char *);
779 ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count);
782 #define define_one_ro(_name) \
783 static struct _cache_attr _name = \
784 __ATTR(_name, 0444, show_##_name, NULL)
786 define_one_ro(level);
788 define_one_ro(coherency_line_size);
789 define_one_ro(physical_line_partition);
790 define_one_ro(ways_of_associativity);
791 define_one_ro(number_of_sets);
793 define_one_ro(shared_cpu_map);
794 define_one_ro(shared_cpu_list);
796 static struct _cache_attr cache_disable = __ATTR(cache_disable, 0644, show_cache_disable, store_cache_disable);
798 static struct attribute * default_attrs[] = {
801 &coherency_line_size.attr,
802 &physical_line_partition.attr,
803 &ways_of_associativity.attr,
804 &number_of_sets.attr,
806 &shared_cpu_map.attr,
807 &shared_cpu_list.attr,
812 static ssize_t show(struct kobject * kobj, struct attribute * attr, char * buf)
814 struct _cache_attr *fattr = to_attr(attr);
815 struct _index_kobject *this_leaf = to_object(kobj);
819 fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
825 static ssize_t store(struct kobject * kobj, struct attribute * attr,
826 const char * buf, size_t count)
828 struct _cache_attr *fattr = to_attr(attr);
829 struct _index_kobject *this_leaf = to_object(kobj);
833 fattr->store(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
839 static struct sysfs_ops sysfs_ops = {
844 static struct kobj_type ktype_cache = {
845 .sysfs_ops = &sysfs_ops,
846 .default_attrs = default_attrs,
849 static struct kobj_type ktype_percpu_entry = {
850 .sysfs_ops = &sysfs_ops,
853 static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu)
855 kfree(per_cpu(cache_kobject, cpu));
856 kfree(per_cpu(index_kobject, cpu));
857 per_cpu(cache_kobject, cpu) = NULL;
858 per_cpu(index_kobject, cpu) = NULL;
859 free_cache_attributes(cpu);
862 static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
866 if (num_cache_leaves == 0)
869 err = detect_cache_attributes(cpu);
873 /* Allocate all required memory */
874 per_cpu(cache_kobject, cpu) =
875 kzalloc(sizeof(struct kobject), GFP_KERNEL);
876 if (unlikely(per_cpu(cache_kobject, cpu) == NULL))
879 per_cpu(index_kobject, cpu) = kzalloc(
880 sizeof(struct _index_kobject ) * num_cache_leaves, GFP_KERNEL);
881 if (unlikely(per_cpu(index_kobject, cpu) == NULL))
887 cpuid4_cache_sysfs_exit(cpu);
891 static DECLARE_BITMAP(cache_dev_map, NR_CPUS);
893 /* Add/Remove cache interface for CPU device */
894 static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
896 unsigned int cpu = sys_dev->id;
898 struct _index_kobject *this_object;
901 retval = cpuid4_cache_sysfs_init(cpu);
902 if (unlikely(retval < 0))
905 retval = kobject_init_and_add(per_cpu(cache_kobject, cpu),
907 &sys_dev->kobj, "%s", "cache");
909 cpuid4_cache_sysfs_exit(cpu);
913 for (i = 0; i < num_cache_leaves; i++) {
914 this_object = INDEX_KOBJECT_PTR(cpu,i);
915 this_object->cpu = cpu;
916 this_object->index = i;
917 retval = kobject_init_and_add(&(this_object->kobj),
919 per_cpu(cache_kobject, cpu),
921 if (unlikely(retval)) {
922 for (j = 0; j < i; j++) {
923 kobject_put(&(INDEX_KOBJECT_PTR(cpu,j)->kobj));
925 kobject_put(per_cpu(cache_kobject, cpu));
926 cpuid4_cache_sysfs_exit(cpu);
929 kobject_uevent(&(this_object->kobj), KOBJ_ADD);
931 cpumask_set_cpu(cpu, to_cpumask(cache_dev_map));
933 kobject_uevent(per_cpu(cache_kobject, cpu), KOBJ_ADD);
937 static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
939 unsigned int cpu = sys_dev->id;
942 if (per_cpu(cpuid4_info, cpu) == NULL)
944 if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map)))
946 cpumask_clear_cpu(cpu, to_cpumask(cache_dev_map));
948 for (i = 0; i < num_cache_leaves; i++)
949 kobject_put(&(INDEX_KOBJECT_PTR(cpu,i)->kobj));
950 kobject_put(per_cpu(cache_kobject, cpu));
951 cpuid4_cache_sysfs_exit(cpu);
954 static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
955 unsigned long action, void *hcpu)
957 unsigned int cpu = (unsigned long)hcpu;
958 struct sys_device *sys_dev;
960 sys_dev = get_cpu_sysdev(cpu);
963 case CPU_ONLINE_FROZEN:
964 cache_add_dev(sys_dev);
967 case CPU_DEAD_FROZEN:
968 cache_remove_dev(sys_dev);
974 static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier =
976 .notifier_call = cacheinfo_cpu_callback,
979 static int __cpuinit cache_sysfs_init(void)
983 if (num_cache_leaves == 0)
986 for_each_online_cpu(i) {
988 struct sys_device *sys_dev = get_cpu_sysdev(i);
990 err = cache_add_dev(sys_dev);
994 register_hotcpu_notifier(&cacheinfo_cpu_notifier);
998 device_initcall(cache_sysfs_init);