2 * Routines to indentify caches on Intel CPU.
5 * Venkatesh Pallipadi : Adding cache identification through cpuid(4)
6 * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
7 * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD.
10 #include <linux/init.h>
11 #include <linux/slab.h>
12 #include <linux/device.h>
13 #include <linux/compiler.h>
14 #include <linux/cpu.h>
15 #include <linux/sched.h>
16 #include <linux/pci.h>
18 #include <asm/processor.h>
29 unsigned char descriptor;
34 /* all the cache descriptor types we care about (no TLB or trace cache entries) */
35 static struct _cache_table cache_table[] __cpuinitdata =
37 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
38 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
39 { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */
40 { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */
41 { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
42 { 0x23, LVL_3, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */
43 { 0x25, LVL_3, 2048 }, /* 8-way set assoc, sectored cache, 64 byte line size */
44 { 0x29, LVL_3, 4096 }, /* 8-way set assoc, sectored cache, 64 byte line size */
45 { 0x2c, LVL_1_DATA, 32 }, /* 8-way set assoc, 64 byte line size */
46 { 0x30, LVL_1_INST, 32 }, /* 8-way set assoc, 64 byte line size */
47 { 0x39, LVL_2, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */
48 { 0x3a, LVL_2, 192 }, /* 6-way set assoc, sectored cache, 64 byte line size */
49 { 0x3b, LVL_2, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */
50 { 0x3c, LVL_2, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */
51 { 0x3d, LVL_2, 384 }, /* 6-way set assoc, sectored cache, 64 byte line size */
52 { 0x3e, LVL_2, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
53 { 0x3f, LVL_2, 256 }, /* 2-way set assoc, 64 byte line size */
54 { 0x41, LVL_2, 128 }, /* 4-way set assoc, 32 byte line size */
55 { 0x42, LVL_2, 256 }, /* 4-way set assoc, 32 byte line size */
56 { 0x43, LVL_2, 512 }, /* 4-way set assoc, 32 byte line size */
57 { 0x44, LVL_2, 1024 }, /* 4-way set assoc, 32 byte line size */
58 { 0x45, LVL_2, 2048 }, /* 4-way set assoc, 32 byte line size */
59 { 0x46, LVL_3, 4096 }, /* 4-way set assoc, 64 byte line size */
60 { 0x47, LVL_3, 8192 }, /* 8-way set assoc, 64 byte line size */
61 { 0x49, LVL_3, 4096 }, /* 16-way set assoc, 64 byte line size */
62 { 0x4a, LVL_3, 6144 }, /* 12-way set assoc, 64 byte line size */
63 { 0x4b, LVL_3, 8192 }, /* 16-way set assoc, 64 byte line size */
64 { 0x4c, LVL_3, 12288 }, /* 12-way set assoc, 64 byte line size */
65 { 0x4d, LVL_3, 16384 }, /* 16-way set assoc, 64 byte line size */
66 { 0x4e, LVL_2, 6144 }, /* 24-way set assoc, 64 byte line size */
67 { 0x60, LVL_1_DATA, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */
68 { 0x66, LVL_1_DATA, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */
69 { 0x67, LVL_1_DATA, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */
70 { 0x68, LVL_1_DATA, 32 }, /* 4-way set assoc, sectored cache, 64 byte line size */
71 { 0x70, LVL_TRACE, 12 }, /* 8-way set assoc */
72 { 0x71, LVL_TRACE, 16 }, /* 8-way set assoc */
73 { 0x72, LVL_TRACE, 32 }, /* 8-way set assoc */
74 { 0x73, LVL_TRACE, 64 }, /* 8-way set assoc */
75 { 0x78, LVL_2, 1024 }, /* 4-way set assoc, 64 byte line size */
76 { 0x79, LVL_2, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */
77 { 0x7a, LVL_2, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */
78 { 0x7b, LVL_2, 512 }, /* 8-way set assoc, sectored cache, 64 byte line size */
79 { 0x7c, LVL_2, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */
80 { 0x7d, LVL_2, 2048 }, /* 8-way set assoc, 64 byte line size */
81 { 0x7f, LVL_2, 512 }, /* 2-way set assoc, 64 byte line size */
82 { 0x82, LVL_2, 256 }, /* 8-way set assoc, 32 byte line size */
83 { 0x83, LVL_2, 512 }, /* 8-way set assoc, 32 byte line size */
84 { 0x84, LVL_2, 1024 }, /* 8-way set assoc, 32 byte line size */
85 { 0x85, LVL_2, 2048 }, /* 8-way set assoc, 32 byte line size */
86 { 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */
87 { 0x87, LVL_2, 1024 }, /* 8-way set assoc, 64 byte line size */
97 CACHE_TYPE_UNIFIED = 3
100 union _cpuid4_leaf_eax {
102 enum _cache_type type:5;
103 unsigned int level:3;
104 unsigned int is_self_initializing:1;
105 unsigned int is_fully_associative:1;
106 unsigned int reserved:4;
107 unsigned int num_threads_sharing:12;
108 unsigned int num_cores_on_die:6;
113 union _cpuid4_leaf_ebx {
115 unsigned int coherency_line_size:12;
116 unsigned int physical_line_partition:10;
117 unsigned int ways_of_associativity:10;
122 union _cpuid4_leaf_ecx {
124 unsigned int number_of_sets:32;
129 struct _cpuid4_info {
130 union _cpuid4_leaf_eax eax;
131 union _cpuid4_leaf_ebx ebx;
132 union _cpuid4_leaf_ecx ecx;
134 unsigned long can_disable;
135 cpumask_t shared_cpu_map; /* future?: only cpus/node is needed */
139 static struct pci_device_id k8_nb_id[] = {
140 { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1103) },
141 { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1203) },
146 unsigned short num_cache_leaves;
148 /* AMD doesn't have CPUID4. Emulate it here to report the same
149 information to the user. This makes some assumptions about the machine:
150 L2 not shared, no SMT etc. that is currently true on AMD CPUs.
152 In theory the TLBs could be reported as fake type (they are in "dummy").
156 unsigned line_size : 8;
157 unsigned lines_per_tag : 8;
159 unsigned size_in_kb : 8;
166 unsigned line_size : 8;
167 unsigned lines_per_tag : 4;
169 unsigned size_in_kb : 16;
176 unsigned line_size : 8;
177 unsigned lines_per_tag : 4;
180 unsigned size_encoded : 14;
185 static unsigned short assocs[] __cpuinitdata = {
186 [1] = 1, [2] = 2, [4] = 4, [6] = 8,
187 [8] = 16, [0xa] = 32, [0xb] = 48,
192 static unsigned char levels[] __cpuinitdata = { 1, 1, 2, 3 };
193 static unsigned char types[] __cpuinitdata = { 1, 2, 3, 3 };
195 static void __cpuinit
196 amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
197 union _cpuid4_leaf_ebx *ebx,
198 union _cpuid4_leaf_ecx *ecx)
201 unsigned line_size, lines_per_tag, assoc, size_in_kb;
202 union l1_cache l1i, l1d;
205 union l1_cache *l1 = &l1d;
211 cpuid(0x80000005, &dummy, &dummy, &l1d.val, &l1i.val);
212 cpuid(0x80000006, &dummy, &dummy, &l2.val, &l3.val);
221 line_size = l1->line_size;
222 lines_per_tag = l1->lines_per_tag;
223 size_in_kb = l1->size_in_kb;
229 line_size = l2.line_size;
230 lines_per_tag = l2.lines_per_tag;
231 /* cpu_data has errata corrections for K7 applied */
232 size_in_kb = current_cpu_data.x86_cache_size;
238 line_size = l3.line_size;
239 lines_per_tag = l3.lines_per_tag;
240 size_in_kb = l3.size_encoded * 512;
246 eax->split.is_self_initializing = 1;
247 eax->split.type = types[leaf];
248 eax->split.level = levels[leaf];
250 eax->split.num_threads_sharing = current_cpu_data.x86_max_cores - 1;
252 eax->split.num_threads_sharing = 0;
253 eax->split.num_cores_on_die = current_cpu_data.x86_max_cores - 1;
257 eax->split.is_fully_associative = 1;
258 ebx->split.coherency_line_size = line_size - 1;
259 ebx->split.ways_of_associativity = assocs[assoc] - 1;
260 ebx->split.physical_line_partition = lines_per_tag - 1;
261 ecx->split.number_of_sets = (size_in_kb * 1024) / line_size /
262 (ebx->split.ways_of_associativity + 1) - 1;
265 static void __cpuinit
266 amd_check_l3_disable(int index, struct _cpuid4_info *this_leaf)
270 this_leaf->can_disable = 1;
274 __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
276 union _cpuid4_leaf_eax eax;
277 union _cpuid4_leaf_ebx ebx;
278 union _cpuid4_leaf_ecx ecx;
281 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
282 amd_cpuid4(index, &eax, &ebx, &ecx);
283 if (boot_cpu_data.x86 >= 0x10)
284 amd_check_l3_disable(index, this_leaf);
286 cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
289 if (eax.split.type == CACHE_TYPE_NULL)
290 return -EIO; /* better error ? */
292 this_leaf->eax = eax;
293 this_leaf->ebx = ebx;
294 this_leaf->ecx = ecx;
295 this_leaf->size = (ecx.split.number_of_sets + 1) *
296 (ebx.split.coherency_line_size + 1) *
297 (ebx.split.physical_line_partition + 1) *
298 (ebx.split.ways_of_associativity + 1);
302 static int __cpuinit find_num_cache_leaves(void)
304 unsigned int eax, ebx, ecx, edx;
305 union _cpuid4_leaf_eax cache_eax;
310 /* Do cpuid(4) loop to find out num_cache_leaves */
311 cpuid_count(4, i, &eax, &ebx, &ecx, &edx);
312 cache_eax.full = eax;
313 } while (cache_eax.split.type != CACHE_TYPE_NULL);
317 unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
319 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */
320 unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
321 unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
322 unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
324 unsigned int cpu = c->cpu_index;
327 if (c->cpuid_level > 3) {
328 static int is_initialized;
330 if (is_initialized == 0) {
331 /* Init num_cache_leaves from boot CPU */
332 num_cache_leaves = find_num_cache_leaves();
337 * Whenever possible use cpuid(4), deterministic cache
338 * parameters cpuid leaf to find the cache details
340 for (i = 0; i < num_cache_leaves; i++) {
341 struct _cpuid4_info this_leaf;
345 retval = cpuid4_cache_lookup(i, &this_leaf);
347 switch(this_leaf.eax.split.level) {
349 if (this_leaf.eax.split.type ==
351 new_l1d = this_leaf.size/1024;
352 else if (this_leaf.eax.split.type ==
354 new_l1i = this_leaf.size/1024;
357 new_l2 = this_leaf.size/1024;
358 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
359 index_msb = get_count_order(num_threads_sharing);
360 l2_id = c->apicid >> index_msb;
363 new_l3 = this_leaf.size/1024;
364 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
365 index_msb = get_count_order(num_threads_sharing);
366 l3_id = c->apicid >> index_msb;
375 * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
378 if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) {
379 /* supports eax=2 call */
381 unsigned int regs[4];
382 unsigned char *dp = (unsigned char *)regs;
385 if (num_cache_leaves != 0 && c->x86 == 15)
388 /* Number of times to iterate */
389 n = cpuid_eax(2) & 0xFF;
391 for ( i = 0 ; i < n ; i++ ) {
392 cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]);
394 /* If bit 31 is set, this is an unknown format */
395 for ( j = 0 ; j < 3 ; j++ ) {
396 if (regs[j] & (1 << 31)) regs[j] = 0;
399 /* Byte 0 is level count, not a descriptor */
400 for ( j = 1 ; j < 16 ; j++ ) {
401 unsigned char des = dp[j];
404 /* look up this descriptor in the table */
405 while (cache_table[k].descriptor != 0)
407 if (cache_table[k].descriptor == des) {
408 if (only_trace && cache_table[k].cache_type != LVL_TRACE)
410 switch (cache_table[k].cache_type) {
412 l1i += cache_table[k].size;
415 l1d += cache_table[k].size;
418 l2 += cache_table[k].size;
421 l3 += cache_table[k].size;
424 trace += cache_table[k].size;
446 per_cpu(cpu_llc_id, cpu) = l2_id;
453 per_cpu(cpu_llc_id, cpu) = l3_id;
458 printk (KERN_INFO "CPU: Trace cache: %dK uops", trace);
460 printk (KERN_INFO "CPU: L1 I cache: %dK", l1i);
463 printk(", L1 D cache: %dK\n", l1d);
468 printk(KERN_INFO "CPU: L2 cache: %dK\n", l2);
471 printk(KERN_INFO "CPU: L3 cache: %dK\n", l3);
473 c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
478 /* pointer to _cpuid4_info array (for each cache leaf) */
479 static DEFINE_PER_CPU(struct _cpuid4_info *, cpuid4_info);
480 #define CPUID4_INFO_IDX(x, y) (&((per_cpu(cpuid4_info, x))[y]))
483 static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
485 struct _cpuid4_info *this_leaf, *sibling_leaf;
486 unsigned long num_threads_sharing;
488 struct cpuinfo_x86 *c = &cpu_data(cpu);
490 this_leaf = CPUID4_INFO_IDX(cpu, index);
491 num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing;
493 if (num_threads_sharing == 1)
494 cpu_set(cpu, this_leaf->shared_cpu_map);
496 index_msb = get_count_order(num_threads_sharing);
498 for_each_online_cpu(i) {
499 if (cpu_data(i).apicid >> index_msb ==
500 c->apicid >> index_msb) {
501 cpu_set(i, this_leaf->shared_cpu_map);
502 if (i != cpu && per_cpu(cpuid4_info, i)) {
503 sibling_leaf = CPUID4_INFO_IDX(i, index);
504 cpu_set(cpu, sibling_leaf->shared_cpu_map);
510 static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
512 struct _cpuid4_info *this_leaf, *sibling_leaf;
515 this_leaf = CPUID4_INFO_IDX(cpu, index);
516 for_each_cpu_mask_nr(sibling, this_leaf->shared_cpu_map) {
517 sibling_leaf = CPUID4_INFO_IDX(sibling, index);
518 cpu_clear(cpu, sibling_leaf->shared_cpu_map);
522 static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) {}
523 static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) {}
526 static void __cpuinit free_cache_attributes(unsigned int cpu)
530 for (i = 0; i < num_cache_leaves; i++)
531 cache_remove_shared_cpu_map(cpu, i);
533 kfree(per_cpu(cpuid4_info, cpu));
534 per_cpu(cpuid4_info, cpu) = NULL;
537 static void __cpuinit get_cpu_leaves(void *_retval)
539 int j, *retval = _retval, cpu = smp_processor_id();
541 /* Do cpuid and store the results */
542 for (j = 0; j < num_cache_leaves; j++) {
543 struct _cpuid4_info *this_leaf;
544 this_leaf = CPUID4_INFO_IDX(cpu, j);
545 *retval = cpuid4_cache_lookup(j, this_leaf);
546 if (unlikely(*retval < 0)) {
549 for (i = 0; i < j; i++)
550 cache_remove_shared_cpu_map(cpu, i);
553 cache_shared_cpu_map_setup(cpu, j);
557 static int __cpuinit detect_cache_attributes(unsigned int cpu)
561 if (num_cache_leaves == 0)
564 per_cpu(cpuid4_info, cpu) = kzalloc(
565 sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
566 if (per_cpu(cpuid4_info, cpu) == NULL)
569 smp_call_function_single(cpu, get_cpu_leaves, &retval, true);
571 kfree(per_cpu(cpuid4_info, cpu));
572 per_cpu(cpuid4_info, cpu) = NULL;
580 #include <linux/kobject.h>
581 #include <linux/sysfs.h>
583 extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c */
585 /* pointer to kobject for cpuX/cache */
586 static DEFINE_PER_CPU(struct kobject *, cache_kobject);
588 struct _index_kobject {
591 unsigned short index;
594 /* pointer to array of kobjects for cpuX/cache/indexY */
595 static DEFINE_PER_CPU(struct _index_kobject *, index_kobject);
596 #define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(index_kobject, x))[y]))
598 #define show_one_plus(file_name, object, val) \
599 static ssize_t show_##file_name \
600 (struct _cpuid4_info *this_leaf, char *buf) \
602 return sprintf (buf, "%lu\n", (unsigned long)this_leaf->object + val); \
605 show_one_plus(level, eax.split.level, 0);
606 show_one_plus(coherency_line_size, ebx.split.coherency_line_size, 1);
607 show_one_plus(physical_line_partition, ebx.split.physical_line_partition, 1);
608 show_one_plus(ways_of_associativity, ebx.split.ways_of_associativity, 1);
609 show_one_plus(number_of_sets, ecx.split.number_of_sets, 1);
611 static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf)
613 return sprintf (buf, "%luK\n", this_leaf->size / 1024);
616 static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
619 ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf;
623 cpumask_t *mask = &this_leaf->shared_cpu_map;
626 cpulist_scnprintf(buf, len-2, mask) :
627 cpumask_scnprintf(buf, len-2, mask);
634 static inline ssize_t show_shared_cpu_map(struct _cpuid4_info *leaf, char *buf)
636 return show_shared_cpu_map_func(leaf, 0, buf);
639 static inline ssize_t show_shared_cpu_list(struct _cpuid4_info *leaf, char *buf)
641 return show_shared_cpu_map_func(leaf, 1, buf);
644 static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf)
646 switch (this_leaf->eax.split.type) {
647 case CACHE_TYPE_DATA:
648 return sprintf(buf, "Data\n");
649 case CACHE_TYPE_INST:
650 return sprintf(buf, "Instruction\n");
651 case CACHE_TYPE_UNIFIED:
652 return sprintf(buf, "Unified\n");
654 return sprintf(buf, "Unknown\n");
658 #define to_object(k) container_of(k, struct _index_kobject, kobj)
659 #define to_attr(a) container_of(a, struct _cache_attr, attr)
662 static struct pci_dev *get_k8_northbridge(int node)
664 struct pci_dev *dev = NULL;
667 for (i = 0; i <= node; i++) {
669 dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
672 } while (!pci_match_id(&k8_nb_id[0], dev));
679 static struct pci_dev *get_k8_northbridge(int node)
685 static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf)
687 int node = cpu_to_node(first_cpu(this_leaf->shared_cpu_map));
688 struct pci_dev *dev = NULL;
692 if (!this_leaf->can_disable)
693 return sprintf(buf, "Feature not enabled\n");
695 dev = get_k8_northbridge(node);
697 printk(KERN_ERR "Attempting AMD northbridge operation on a system with no northbridge\n");
701 for (i = 0; i < 2; i++) {
704 pci_read_config_dword(dev, 0x1BC + i * 4, ®);
706 ret += sprintf(buf, "%sEntry: %d\n", buf, i);
707 ret += sprintf(buf, "%sReads: %s\tNew Entries: %s\n",
709 reg & 0x80000000 ? "Disabled" : "Allowed",
710 reg & 0x40000000 ? "Disabled" : "Allowed");
711 ret += sprintf(buf, "%sSubCache: %x\tIndex: %x\n",
712 buf, (reg & 0x30000) >> 16, reg & 0xfff);
718 store_cache_disable(struct _cpuid4_info *this_leaf, const char *buf,
721 int node = cpu_to_node(first_cpu(this_leaf->shared_cpu_map));
722 struct pci_dev *dev = NULL;
723 unsigned int ret, index, val;
725 if (!this_leaf->can_disable)
728 if (strlen(buf) > 15)
731 ret = sscanf(buf, "%x %x", &index, &val);
738 dev = get_k8_northbridge(node);
740 printk(KERN_ERR "Attempting AMD northbridge operation on a system with no northbridge\n");
744 pci_write_config_dword(dev, 0x1BC + index * 4, val & ~0x40000000);
746 pci_write_config_dword(dev, 0x1BC + index * 4, val);
752 struct attribute attr;
753 ssize_t (*show)(struct _cpuid4_info *, char *);
754 ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count);
757 #define define_one_ro(_name) \
758 static struct _cache_attr _name = \
759 __ATTR(_name, 0444, show_##_name, NULL)
761 define_one_ro(level);
763 define_one_ro(coherency_line_size);
764 define_one_ro(physical_line_partition);
765 define_one_ro(ways_of_associativity);
766 define_one_ro(number_of_sets);
768 define_one_ro(shared_cpu_map);
769 define_one_ro(shared_cpu_list);
771 static struct _cache_attr cache_disable = __ATTR(cache_disable, 0644, show_cache_disable, store_cache_disable);
773 static struct attribute * default_attrs[] = {
776 &coherency_line_size.attr,
777 &physical_line_partition.attr,
778 &ways_of_associativity.attr,
779 &number_of_sets.attr,
781 &shared_cpu_map.attr,
782 &shared_cpu_list.attr,
787 static ssize_t show(struct kobject * kobj, struct attribute * attr, char * buf)
789 struct _cache_attr *fattr = to_attr(attr);
790 struct _index_kobject *this_leaf = to_object(kobj);
794 fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
800 static ssize_t store(struct kobject * kobj, struct attribute * attr,
801 const char * buf, size_t count)
803 struct _cache_attr *fattr = to_attr(attr);
804 struct _index_kobject *this_leaf = to_object(kobj);
808 fattr->store(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
814 static struct sysfs_ops sysfs_ops = {
819 static struct kobj_type ktype_cache = {
820 .sysfs_ops = &sysfs_ops,
821 .default_attrs = default_attrs,
824 static struct kobj_type ktype_percpu_entry = {
825 .sysfs_ops = &sysfs_ops,
828 static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu)
830 kfree(per_cpu(cache_kobject, cpu));
831 kfree(per_cpu(index_kobject, cpu));
832 per_cpu(cache_kobject, cpu) = NULL;
833 per_cpu(index_kobject, cpu) = NULL;
834 free_cache_attributes(cpu);
837 static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
841 if (num_cache_leaves == 0)
844 err = detect_cache_attributes(cpu);
848 /* Allocate all required memory */
849 per_cpu(cache_kobject, cpu) =
850 kzalloc(sizeof(struct kobject), GFP_KERNEL);
851 if (unlikely(per_cpu(cache_kobject, cpu) == NULL))
854 per_cpu(index_kobject, cpu) = kzalloc(
855 sizeof(struct _index_kobject ) * num_cache_leaves, GFP_KERNEL);
856 if (unlikely(per_cpu(index_kobject, cpu) == NULL))
862 cpuid4_cache_sysfs_exit(cpu);
866 static cpumask_t cache_dev_map = CPU_MASK_NONE;
868 /* Add/Remove cache interface for CPU device */
869 static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
871 unsigned int cpu = sys_dev->id;
873 struct _index_kobject *this_object;
876 retval = cpuid4_cache_sysfs_init(cpu);
877 if (unlikely(retval < 0))
880 retval = kobject_init_and_add(per_cpu(cache_kobject, cpu),
882 &sys_dev->kobj, "%s", "cache");
884 cpuid4_cache_sysfs_exit(cpu);
888 for (i = 0; i < num_cache_leaves; i++) {
889 this_object = INDEX_KOBJECT_PTR(cpu,i);
890 this_object->cpu = cpu;
891 this_object->index = i;
892 retval = kobject_init_and_add(&(this_object->kobj),
894 per_cpu(cache_kobject, cpu),
896 if (unlikely(retval)) {
897 for (j = 0; j < i; j++) {
898 kobject_put(&(INDEX_KOBJECT_PTR(cpu,j)->kobj));
900 kobject_put(per_cpu(cache_kobject, cpu));
901 cpuid4_cache_sysfs_exit(cpu);
904 kobject_uevent(&(this_object->kobj), KOBJ_ADD);
906 cpu_set(cpu, cache_dev_map);
908 kobject_uevent(per_cpu(cache_kobject, cpu), KOBJ_ADD);
912 static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
914 unsigned int cpu = sys_dev->id;
917 if (per_cpu(cpuid4_info, cpu) == NULL)
919 if (!cpu_isset(cpu, cache_dev_map))
921 cpu_clear(cpu, cache_dev_map);
923 for (i = 0; i < num_cache_leaves; i++)
924 kobject_put(&(INDEX_KOBJECT_PTR(cpu,i)->kobj));
925 kobject_put(per_cpu(cache_kobject, cpu));
926 cpuid4_cache_sysfs_exit(cpu);
929 static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
930 unsigned long action, void *hcpu)
932 unsigned int cpu = (unsigned long)hcpu;
933 struct sys_device *sys_dev;
935 sys_dev = get_cpu_sysdev(cpu);
938 case CPU_ONLINE_FROZEN:
939 cache_add_dev(sys_dev);
942 case CPU_DEAD_FROZEN:
943 cache_remove_dev(sys_dev);
949 static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier =
951 .notifier_call = cacheinfo_cpu_callback,
954 static int __cpuinit cache_sysfs_init(void)
958 if (num_cache_leaves == 0)
961 for_each_online_cpu(i) {
963 struct sys_device *sys_dev = get_cpu_sysdev(i);
965 err = cache_add_dev(sys_dev);
969 register_hotcpu_notifier(&cacheinfo_cpu_notifier);
973 device_initcall(cache_sysfs_init);