2 * Routines to indentify caches on Intel CPU.
5 * Venkatesh Pallipadi : Adding cache identification through cpuid(4)
6 * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
7 * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD.
10 #include <linux/init.h>
11 #include <linux/slab.h>
12 #include <linux/device.h>
13 #include <linux/compiler.h>
14 #include <linux/cpu.h>
15 #include <linux/sched.h>
16 #include <linux/pci.h>
18 #include <asm/processor.h>
29 unsigned char descriptor;
34 /* all the cache descriptor types we care about (no TLB or trace cache entries) */
35 static const struct _cache_table __cpuinitconst cache_table[] =
37 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
38 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
39 { 0x09, LVL_1_INST, 32 }, /* 4-way set assoc, 64 byte line size */
40 { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */
41 { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */
42 { 0x0d, LVL_1_DATA, 16 }, /* 4-way set assoc, 64 byte line size */
43 { 0x21, LVL_2, 256 }, /* 8-way set assoc, 64 byte line size */
44 { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
45 { 0x23, LVL_3, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */
46 { 0x25, LVL_3, 2048 }, /* 8-way set assoc, sectored cache, 64 byte line size */
47 { 0x29, LVL_3, 4096 }, /* 8-way set assoc, sectored cache, 64 byte line size */
48 { 0x2c, LVL_1_DATA, 32 }, /* 8-way set assoc, 64 byte line size */
49 { 0x30, LVL_1_INST, 32 }, /* 8-way set assoc, 64 byte line size */
50 { 0x39, LVL_2, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */
51 { 0x3a, LVL_2, 192 }, /* 6-way set assoc, sectored cache, 64 byte line size */
52 { 0x3b, LVL_2, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */
53 { 0x3c, LVL_2, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */
54 { 0x3d, LVL_2, 384 }, /* 6-way set assoc, sectored cache, 64 byte line size */
55 { 0x3e, LVL_2, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
56 { 0x3f, LVL_2, 256 }, /* 2-way set assoc, 64 byte line size */
57 { 0x41, LVL_2, 128 }, /* 4-way set assoc, 32 byte line size */
58 { 0x42, LVL_2, 256 }, /* 4-way set assoc, 32 byte line size */
59 { 0x43, LVL_2, 512 }, /* 4-way set assoc, 32 byte line size */
60 { 0x44, LVL_2, 1024 }, /* 4-way set assoc, 32 byte line size */
61 { 0x45, LVL_2, 2048 }, /* 4-way set assoc, 32 byte line size */
62 { 0x46, LVL_3, 4096 }, /* 4-way set assoc, 64 byte line size */
63 { 0x47, LVL_3, 8192 }, /* 8-way set assoc, 64 byte line size */
64 { 0x49, LVL_3, 4096 }, /* 16-way set assoc, 64 byte line size */
65 { 0x4a, LVL_3, 6144 }, /* 12-way set assoc, 64 byte line size */
66 { 0x4b, LVL_3, 8192 }, /* 16-way set assoc, 64 byte line size */
67 { 0x4c, LVL_3, 12288 }, /* 12-way set assoc, 64 byte line size */
68 { 0x4d, LVL_3, 16384 }, /* 16-way set assoc, 64 byte line size */
69 { 0x4e, LVL_2, 6144 }, /* 24-way set assoc, 64 byte line size */
70 { 0x60, LVL_1_DATA, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */
71 { 0x66, LVL_1_DATA, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */
72 { 0x67, LVL_1_DATA, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */
73 { 0x68, LVL_1_DATA, 32 }, /* 4-way set assoc, sectored cache, 64 byte line size */
74 { 0x70, LVL_TRACE, 12 }, /* 8-way set assoc */
75 { 0x71, LVL_TRACE, 16 }, /* 8-way set assoc */
76 { 0x72, LVL_TRACE, 32 }, /* 8-way set assoc */
77 { 0x73, LVL_TRACE, 64 }, /* 8-way set assoc */
78 { 0x78, LVL_2, 1024 }, /* 4-way set assoc, 64 byte line size */
79 { 0x79, LVL_2, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */
80 { 0x7a, LVL_2, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */
81 { 0x7b, LVL_2, 512 }, /* 8-way set assoc, sectored cache, 64 byte line size */
82 { 0x7c, LVL_2, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */
83 { 0x7d, LVL_2, 2048 }, /* 8-way set assoc, 64 byte line size */
84 { 0x7f, LVL_2, 512 }, /* 2-way set assoc, 64 byte line size */
85 { 0x82, LVL_2, 256 }, /* 8-way set assoc, 32 byte line size */
86 { 0x83, LVL_2, 512 }, /* 8-way set assoc, 32 byte line size */
87 { 0x84, LVL_2, 1024 }, /* 8-way set assoc, 32 byte line size */
88 { 0x85, LVL_2, 2048 }, /* 8-way set assoc, 32 byte line size */
89 { 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */
90 { 0x87, LVL_2, 1024 }, /* 8-way set assoc, 64 byte line size */
91 { 0xd0, LVL_3, 512 }, /* 4-way set assoc, 64 byte line size */
92 { 0xd1, LVL_3, 1024 }, /* 4-way set assoc, 64 byte line size */
93 { 0xd2, LVL_3, 2048 }, /* 4-way set assoc, 64 byte line size */
94 { 0xd6, LVL_3, 1024 }, /* 8-way set assoc, 64 byte line size */
95 { 0xd7, LVL_3, 2038 }, /* 8-way set assoc, 64 byte line size */
96 { 0xd8, LVL_3, 4096 }, /* 12-way set assoc, 64 byte line size */
97 { 0xdc, LVL_3, 2048 }, /* 12-way set assoc, 64 byte line size */
98 { 0xdd, LVL_3, 4096 }, /* 12-way set assoc, 64 byte line size */
99 { 0xde, LVL_3, 8192 }, /* 12-way set assoc, 64 byte line size */
100 { 0xe2, LVL_3, 2048 }, /* 16-way set assoc, 64 byte line size */
101 { 0xe3, LVL_3, 4096 }, /* 16-way set assoc, 64 byte line size */
102 { 0xe4, LVL_3, 8192 }, /* 16-way set assoc, 64 byte line size */
112 CACHE_TYPE_UNIFIED = 3
115 union _cpuid4_leaf_eax {
117 enum _cache_type type:5;
118 unsigned int level:3;
119 unsigned int is_self_initializing:1;
120 unsigned int is_fully_associative:1;
121 unsigned int reserved:4;
122 unsigned int num_threads_sharing:12;
123 unsigned int num_cores_on_die:6;
128 union _cpuid4_leaf_ebx {
130 unsigned int coherency_line_size:12;
131 unsigned int physical_line_partition:10;
132 unsigned int ways_of_associativity:10;
137 union _cpuid4_leaf_ecx {
139 unsigned int number_of_sets:32;
144 struct _cpuid4_info {
145 union _cpuid4_leaf_eax eax;
146 union _cpuid4_leaf_ebx ebx;
147 union _cpuid4_leaf_ecx ecx;
149 unsigned long can_disable;
150 DECLARE_BITMAP(shared_cpu_map, NR_CPUS);
153 /* subset of above _cpuid4_info w/o shared_cpu_map */
154 struct _cpuid4_info_regs {
155 union _cpuid4_leaf_eax eax;
156 union _cpuid4_leaf_ebx ebx;
157 union _cpuid4_leaf_ecx ecx;
159 unsigned long can_disable;
162 #if defined(CONFIG_PCI) && defined(CONFIG_SYSFS)
163 static struct pci_device_id k8_nb_id[] = {
164 { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1103) },
165 { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1203) },
170 unsigned short num_cache_leaves;
172 /* AMD doesn't have CPUID4. Emulate it here to report the same
173 information to the user. This makes some assumptions about the machine:
174 L2 not shared, no SMT etc. that is currently true on AMD CPUs.
176 In theory the TLBs could be reported as fake type (they are in "dummy").
180 unsigned line_size : 8;
181 unsigned lines_per_tag : 8;
183 unsigned size_in_kb : 8;
190 unsigned line_size : 8;
191 unsigned lines_per_tag : 4;
193 unsigned size_in_kb : 16;
200 unsigned line_size : 8;
201 unsigned lines_per_tag : 4;
204 unsigned size_encoded : 14;
209 static const unsigned short __cpuinitconst assocs[] = {
210 [1] = 1, [2] = 2, [4] = 4, [6] = 8,
211 [8] = 16, [0xa] = 32, [0xb] = 48,
216 static const unsigned char __cpuinitconst levels[] = { 1, 1, 2, 3 };
217 static const unsigned char __cpuinitconst types[] = { 1, 2, 3, 3 };
219 static void __cpuinit
220 amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
221 union _cpuid4_leaf_ebx *ebx,
222 union _cpuid4_leaf_ecx *ecx)
225 unsigned line_size, lines_per_tag, assoc, size_in_kb;
226 union l1_cache l1i, l1d;
229 union l1_cache *l1 = &l1d;
235 cpuid(0x80000005, &dummy, &dummy, &l1d.val, &l1i.val);
236 cpuid(0x80000006, &dummy, &dummy, &l2.val, &l3.val);
245 line_size = l1->line_size;
246 lines_per_tag = l1->lines_per_tag;
247 size_in_kb = l1->size_in_kb;
253 line_size = l2.line_size;
254 lines_per_tag = l2.lines_per_tag;
255 /* cpu_data has errata corrections for K7 applied */
256 size_in_kb = current_cpu_data.x86_cache_size;
262 line_size = l3.line_size;
263 lines_per_tag = l3.lines_per_tag;
264 size_in_kb = l3.size_encoded * 512;
270 eax->split.is_self_initializing = 1;
271 eax->split.type = types[leaf];
272 eax->split.level = levels[leaf];
274 eax->split.num_threads_sharing = current_cpu_data.x86_max_cores - 1;
276 eax->split.num_threads_sharing = 0;
277 eax->split.num_cores_on_die = current_cpu_data.x86_max_cores - 1;
281 eax->split.is_fully_associative = 1;
282 ebx->split.coherency_line_size = line_size - 1;
283 ebx->split.ways_of_associativity = assocs[assoc] - 1;
284 ebx->split.physical_line_partition = lines_per_tag - 1;
285 ecx->split.number_of_sets = (size_in_kb * 1024) / line_size /
286 (ebx->split.ways_of_associativity + 1) - 1;
289 static void __cpuinit
290 amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf)
294 this_leaf->can_disable = 1;
298 __cpuinit cpuid4_cache_lookup_regs(int index,
299 struct _cpuid4_info_regs *this_leaf)
301 union _cpuid4_leaf_eax eax;
302 union _cpuid4_leaf_ebx ebx;
303 union _cpuid4_leaf_ecx ecx;
306 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
307 amd_cpuid4(index, &eax, &ebx, &ecx);
308 if (boot_cpu_data.x86 >= 0x10)
309 amd_check_l3_disable(index, this_leaf);
311 cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
314 if (eax.split.type == CACHE_TYPE_NULL)
315 return -EIO; /* better error ? */
317 this_leaf->eax = eax;
318 this_leaf->ebx = ebx;
319 this_leaf->ecx = ecx;
320 this_leaf->size = (ecx.split.number_of_sets + 1) *
321 (ebx.split.coherency_line_size + 1) *
322 (ebx.split.physical_line_partition + 1) *
323 (ebx.split.ways_of_associativity + 1);
327 static int __cpuinit find_num_cache_leaves(void)
329 unsigned int eax, ebx, ecx, edx;
330 union _cpuid4_leaf_eax cache_eax;
335 /* Do cpuid(4) loop to find out num_cache_leaves */
336 cpuid_count(4, i, &eax, &ebx, &ecx, &edx);
337 cache_eax.full = eax;
338 } while (cache_eax.split.type != CACHE_TYPE_NULL);
342 unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
344 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */
345 unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
346 unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
347 unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
349 unsigned int cpu = c->cpu_index;
352 if (c->cpuid_level > 3) {
353 static int is_initialized;
355 if (is_initialized == 0) {
356 /* Init num_cache_leaves from boot CPU */
357 num_cache_leaves = find_num_cache_leaves();
362 * Whenever possible use cpuid(4), deterministic cache
363 * parameters cpuid leaf to find the cache details
365 for (i = 0; i < num_cache_leaves; i++) {
366 struct _cpuid4_info_regs this_leaf;
369 retval = cpuid4_cache_lookup_regs(i, &this_leaf);
371 switch(this_leaf.eax.split.level) {
373 if (this_leaf.eax.split.type ==
375 new_l1d = this_leaf.size/1024;
376 else if (this_leaf.eax.split.type ==
378 new_l1i = this_leaf.size/1024;
381 new_l2 = this_leaf.size/1024;
382 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
383 index_msb = get_count_order(num_threads_sharing);
384 l2_id = c->apicid >> index_msb;
387 new_l3 = this_leaf.size/1024;
388 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
389 index_msb = get_count_order(num_threads_sharing);
390 l3_id = c->apicid >> index_msb;
399 * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
402 if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) {
403 /* supports eax=2 call */
405 unsigned int regs[4];
406 unsigned char *dp = (unsigned char *)regs;
409 if (num_cache_leaves != 0 && c->x86 == 15)
412 /* Number of times to iterate */
413 n = cpuid_eax(2) & 0xFF;
415 for ( i = 0 ; i < n ; i++ ) {
416 cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]);
418 /* If bit 31 is set, this is an unknown format */
419 for ( j = 0 ; j < 3 ; j++ ) {
420 if (regs[j] & (1 << 31)) regs[j] = 0;
423 /* Byte 0 is level count, not a descriptor */
424 for ( j = 1 ; j < 16 ; j++ ) {
425 unsigned char des = dp[j];
428 /* look up this descriptor in the table */
429 while (cache_table[k].descriptor != 0)
431 if (cache_table[k].descriptor == des) {
432 if (only_trace && cache_table[k].cache_type != LVL_TRACE)
434 switch (cache_table[k].cache_type) {
436 l1i += cache_table[k].size;
439 l1d += cache_table[k].size;
442 l2 += cache_table[k].size;
445 l3 += cache_table[k].size;
448 trace += cache_table[k].size;
470 per_cpu(cpu_llc_id, cpu) = l2_id;
477 per_cpu(cpu_llc_id, cpu) = l3_id;
482 printk (KERN_INFO "CPU: Trace cache: %dK uops", trace);
484 printk (KERN_INFO "CPU: L1 I cache: %dK", l1i);
487 printk(", L1 D cache: %dK\n", l1d);
492 printk(KERN_INFO "CPU: L2 cache: %dK\n", l2);
495 printk(KERN_INFO "CPU: L3 cache: %dK\n", l3);
497 c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
504 /* pointer to _cpuid4_info array (for each cache leaf) */
505 static DEFINE_PER_CPU(struct _cpuid4_info *, cpuid4_info);
506 #define CPUID4_INFO_IDX(x, y) (&((per_cpu(cpuid4_info, x))[y]))
509 static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
511 struct _cpuid4_info *this_leaf, *sibling_leaf;
512 unsigned long num_threads_sharing;
514 struct cpuinfo_x86 *c = &cpu_data(cpu);
516 this_leaf = CPUID4_INFO_IDX(cpu, index);
517 num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing;
519 if (num_threads_sharing == 1)
520 cpumask_set_cpu(cpu, to_cpumask(this_leaf->shared_cpu_map));
522 index_msb = get_count_order(num_threads_sharing);
524 for_each_online_cpu(i) {
525 if (cpu_data(i).apicid >> index_msb ==
526 c->apicid >> index_msb) {
528 to_cpumask(this_leaf->shared_cpu_map));
529 if (i != cpu && per_cpu(cpuid4_info, i)) {
531 CPUID4_INFO_IDX(i, index);
532 cpumask_set_cpu(cpu, to_cpumask(
533 sibling_leaf->shared_cpu_map));
539 static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
541 struct _cpuid4_info *this_leaf, *sibling_leaf;
544 this_leaf = CPUID4_INFO_IDX(cpu, index);
545 for_each_cpu(sibling, to_cpumask(this_leaf->shared_cpu_map)) {
546 sibling_leaf = CPUID4_INFO_IDX(sibling, index);
547 cpumask_clear_cpu(cpu,
548 to_cpumask(sibling_leaf->shared_cpu_map));
552 static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) {}
553 static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) {}
556 static void __cpuinit free_cache_attributes(unsigned int cpu)
560 for (i = 0; i < num_cache_leaves; i++)
561 cache_remove_shared_cpu_map(cpu, i);
563 kfree(per_cpu(cpuid4_info, cpu));
564 per_cpu(cpuid4_info, cpu) = NULL;
568 __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
570 struct _cpuid4_info_regs *leaf_regs =
571 (struct _cpuid4_info_regs *)this_leaf;
573 return cpuid4_cache_lookup_regs(index, leaf_regs);
576 static void __cpuinit get_cpu_leaves(void *_retval)
578 int j, *retval = _retval, cpu = smp_processor_id();
580 /* Do cpuid and store the results */
581 for (j = 0; j < num_cache_leaves; j++) {
582 struct _cpuid4_info *this_leaf;
583 this_leaf = CPUID4_INFO_IDX(cpu, j);
584 *retval = cpuid4_cache_lookup(j, this_leaf);
585 if (unlikely(*retval < 0)) {
588 for (i = 0; i < j; i++)
589 cache_remove_shared_cpu_map(cpu, i);
592 cache_shared_cpu_map_setup(cpu, j);
596 static int __cpuinit detect_cache_attributes(unsigned int cpu)
600 if (num_cache_leaves == 0)
603 per_cpu(cpuid4_info, cpu) = kzalloc(
604 sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
605 if (per_cpu(cpuid4_info, cpu) == NULL)
608 smp_call_function_single(cpu, get_cpu_leaves, &retval, true);
610 kfree(per_cpu(cpuid4_info, cpu));
611 per_cpu(cpuid4_info, cpu) = NULL;
617 #include <linux/kobject.h>
618 #include <linux/sysfs.h>
620 extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c */
622 /* pointer to kobject for cpuX/cache */
623 static DEFINE_PER_CPU(struct kobject *, cache_kobject);
625 struct _index_kobject {
628 unsigned short index;
631 /* pointer to array of kobjects for cpuX/cache/indexY */
632 static DEFINE_PER_CPU(struct _index_kobject *, index_kobject);
633 #define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(index_kobject, x))[y]))
635 #define show_one_plus(file_name, object, val) \
636 static ssize_t show_##file_name \
637 (struct _cpuid4_info *this_leaf, char *buf) \
639 return sprintf (buf, "%lu\n", (unsigned long)this_leaf->object + val); \
642 show_one_plus(level, eax.split.level, 0);
643 show_one_plus(coherency_line_size, ebx.split.coherency_line_size, 1);
644 show_one_plus(physical_line_partition, ebx.split.physical_line_partition, 1);
645 show_one_plus(ways_of_associativity, ebx.split.ways_of_associativity, 1);
646 show_one_plus(number_of_sets, ecx.split.number_of_sets, 1);
648 static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf)
650 return sprintf (buf, "%luK\n", this_leaf->size / 1024);
653 static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
656 ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf;
660 const struct cpumask *mask;
662 mask = to_cpumask(this_leaf->shared_cpu_map);
664 cpulist_scnprintf(buf, len-2, mask) :
665 cpumask_scnprintf(buf, len-2, mask);
672 static inline ssize_t show_shared_cpu_map(struct _cpuid4_info *leaf, char *buf)
674 return show_shared_cpu_map_func(leaf, 0, buf);
677 static inline ssize_t show_shared_cpu_list(struct _cpuid4_info *leaf, char *buf)
679 return show_shared_cpu_map_func(leaf, 1, buf);
682 static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf)
684 switch (this_leaf->eax.split.type) {
685 case CACHE_TYPE_DATA:
686 return sprintf(buf, "Data\n");
687 case CACHE_TYPE_INST:
688 return sprintf(buf, "Instruction\n");
689 case CACHE_TYPE_UNIFIED:
690 return sprintf(buf, "Unified\n");
692 return sprintf(buf, "Unknown\n");
696 #define to_object(k) container_of(k, struct _index_kobject, kobj)
697 #define to_attr(a) container_of(a, struct _cache_attr, attr)
700 static struct pci_dev *get_k8_northbridge(int node)
702 struct pci_dev *dev = NULL;
705 for (i = 0; i <= node; i++) {
707 dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
710 } while (!pci_match_id(&k8_nb_id[0], dev));
717 static struct pci_dev *get_k8_northbridge(int node)
723 static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf)
725 const struct cpumask *mask = to_cpumask(this_leaf->shared_cpu_map);
726 int node = cpu_to_node(cpumask_first(mask));
727 struct pci_dev *dev = NULL;
731 if (!this_leaf->can_disable)
732 return sprintf(buf, "Feature not enabled\n");
734 dev = get_k8_northbridge(node);
736 printk(KERN_ERR "Attempting AMD northbridge operation on a system with no northbridge\n");
740 for (i = 0; i < 2; i++) {
743 pci_read_config_dword(dev, 0x1BC + i * 4, ®);
745 ret += sprintf(buf, "%sEntry: %d\n", buf, i);
746 ret += sprintf(buf, "%sReads: %s\tNew Entries: %s\n",
748 reg & 0x80000000 ? "Disabled" : "Allowed",
749 reg & 0x40000000 ? "Disabled" : "Allowed");
750 ret += sprintf(buf, "%sSubCache: %x\tIndex: %x\n",
751 buf, (reg & 0x30000) >> 16, reg & 0xfff);
757 store_cache_disable(struct _cpuid4_info *this_leaf, const char *buf,
760 const struct cpumask *mask = to_cpumask(this_leaf->shared_cpu_map);
761 int node = cpu_to_node(cpumask_first(mask));
762 struct pci_dev *dev = NULL;
763 unsigned int ret, index, val;
765 if (!this_leaf->can_disable)
768 if (strlen(buf) > 15)
771 ret = sscanf(buf, "%x %x", &index, &val);
778 dev = get_k8_northbridge(node);
780 printk(KERN_ERR "Attempting AMD northbridge operation on a system with no northbridge\n");
784 pci_write_config_dword(dev, 0x1BC + index * 4, val & ~0x40000000);
786 pci_write_config_dword(dev, 0x1BC + index * 4, val);
792 struct attribute attr;
793 ssize_t (*show)(struct _cpuid4_info *, char *);
794 ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count);
797 #define define_one_ro(_name) \
798 static struct _cache_attr _name = \
799 __ATTR(_name, 0444, show_##_name, NULL)
801 define_one_ro(level);
803 define_one_ro(coherency_line_size);
804 define_one_ro(physical_line_partition);
805 define_one_ro(ways_of_associativity);
806 define_one_ro(number_of_sets);
808 define_one_ro(shared_cpu_map);
809 define_one_ro(shared_cpu_list);
811 static struct _cache_attr cache_disable = __ATTR(cache_disable, 0644, show_cache_disable, store_cache_disable);
813 static struct attribute * default_attrs[] = {
816 &coherency_line_size.attr,
817 &physical_line_partition.attr,
818 &ways_of_associativity.attr,
819 &number_of_sets.attr,
821 &shared_cpu_map.attr,
822 &shared_cpu_list.attr,
827 static ssize_t show(struct kobject * kobj, struct attribute * attr, char * buf)
829 struct _cache_attr *fattr = to_attr(attr);
830 struct _index_kobject *this_leaf = to_object(kobj);
834 fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
840 static ssize_t store(struct kobject * kobj, struct attribute * attr,
841 const char * buf, size_t count)
843 struct _cache_attr *fattr = to_attr(attr);
844 struct _index_kobject *this_leaf = to_object(kobj);
848 fattr->store(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
854 static struct sysfs_ops sysfs_ops = {
859 static struct kobj_type ktype_cache = {
860 .sysfs_ops = &sysfs_ops,
861 .default_attrs = default_attrs,
864 static struct kobj_type ktype_percpu_entry = {
865 .sysfs_ops = &sysfs_ops,
868 static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu)
870 kfree(per_cpu(cache_kobject, cpu));
871 kfree(per_cpu(index_kobject, cpu));
872 per_cpu(cache_kobject, cpu) = NULL;
873 per_cpu(index_kobject, cpu) = NULL;
874 free_cache_attributes(cpu);
877 static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
881 if (num_cache_leaves == 0)
884 err = detect_cache_attributes(cpu);
888 /* Allocate all required memory */
889 per_cpu(cache_kobject, cpu) =
890 kzalloc(sizeof(struct kobject), GFP_KERNEL);
891 if (unlikely(per_cpu(cache_kobject, cpu) == NULL))
894 per_cpu(index_kobject, cpu) = kzalloc(
895 sizeof(struct _index_kobject ) * num_cache_leaves, GFP_KERNEL);
896 if (unlikely(per_cpu(index_kobject, cpu) == NULL))
902 cpuid4_cache_sysfs_exit(cpu);
906 static DECLARE_BITMAP(cache_dev_map, NR_CPUS);
908 /* Add/Remove cache interface for CPU device */
909 static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
911 unsigned int cpu = sys_dev->id;
913 struct _index_kobject *this_object;
916 retval = cpuid4_cache_sysfs_init(cpu);
917 if (unlikely(retval < 0))
920 retval = kobject_init_and_add(per_cpu(cache_kobject, cpu),
922 &sys_dev->kobj, "%s", "cache");
924 cpuid4_cache_sysfs_exit(cpu);
928 for (i = 0; i < num_cache_leaves; i++) {
929 this_object = INDEX_KOBJECT_PTR(cpu,i);
930 this_object->cpu = cpu;
931 this_object->index = i;
932 retval = kobject_init_and_add(&(this_object->kobj),
934 per_cpu(cache_kobject, cpu),
936 if (unlikely(retval)) {
937 for (j = 0; j < i; j++) {
938 kobject_put(&(INDEX_KOBJECT_PTR(cpu,j)->kobj));
940 kobject_put(per_cpu(cache_kobject, cpu));
941 cpuid4_cache_sysfs_exit(cpu);
944 kobject_uevent(&(this_object->kobj), KOBJ_ADD);
946 cpumask_set_cpu(cpu, to_cpumask(cache_dev_map));
948 kobject_uevent(per_cpu(cache_kobject, cpu), KOBJ_ADD);
952 static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
954 unsigned int cpu = sys_dev->id;
957 if (per_cpu(cpuid4_info, cpu) == NULL)
959 if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map)))
961 cpumask_clear_cpu(cpu, to_cpumask(cache_dev_map));
963 for (i = 0; i < num_cache_leaves; i++)
964 kobject_put(&(INDEX_KOBJECT_PTR(cpu,i)->kobj));
965 kobject_put(per_cpu(cache_kobject, cpu));
966 cpuid4_cache_sysfs_exit(cpu);
969 static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
970 unsigned long action, void *hcpu)
972 unsigned int cpu = (unsigned long)hcpu;
973 struct sys_device *sys_dev;
975 sys_dev = get_cpu_sysdev(cpu);
978 case CPU_ONLINE_FROZEN:
979 cache_add_dev(sys_dev);
982 case CPU_DEAD_FROZEN:
983 cache_remove_dev(sys_dev);
989 static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier =
991 .notifier_call = cacheinfo_cpu_callback,
994 static int __cpuinit cache_sysfs_init(void)
998 if (num_cache_leaves == 0)
1001 for_each_online_cpu(i) {
1003 struct sys_device *sys_dev = get_cpu_sysdev(i);
1005 err = cache_add_dev(sys_dev);
1009 register_hotcpu_notifier(&cacheinfo_cpu_notifier);
1013 device_initcall(cache_sysfs_init);