2 * Routines to indentify caches on Intel CPU.
5 * Venkatesh Pallipadi : Adding cache identification through cpuid(4)
6 * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
7 * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD.
10 #include <linux/init.h>
11 #include <linux/slab.h>
12 #include <linux/device.h>
13 #include <linux/compiler.h>
14 #include <linux/cpu.h>
15 #include <linux/sched.h>
16 #include <linux/pci.h>
18 #include <asm/processor.h>
29 unsigned char descriptor;
34 /* all the cache descriptor types we care about (no TLB or trace cache entries) */
35 static const struct _cache_table __cpuinitconst cache_table[] =
37 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
38 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
39 { 0x09, LVL_1_INST, 32 }, /* 4-way set assoc, 64 byte line size */
40 { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */
41 { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */
42 { 0x0d, LVL_1_DATA, 16 }, /* 4-way set assoc, 64 byte line size */
43 { 0x21, LVL_2, 256 }, /* 8-way set assoc, 64 byte line size */
44 { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
45 { 0x23, LVL_3, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */
46 { 0x25, LVL_3, 2048 }, /* 8-way set assoc, sectored cache, 64 byte line size */
47 { 0x29, LVL_3, 4096 }, /* 8-way set assoc, sectored cache, 64 byte line size */
48 { 0x2c, LVL_1_DATA, 32 }, /* 8-way set assoc, 64 byte line size */
49 { 0x30, LVL_1_INST, 32 }, /* 8-way set assoc, 64 byte line size */
50 { 0x39, LVL_2, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */
51 { 0x3a, LVL_2, 192 }, /* 6-way set assoc, sectored cache, 64 byte line size */
52 { 0x3b, LVL_2, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */
53 { 0x3c, LVL_2, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */
54 { 0x3d, LVL_2, 384 }, /* 6-way set assoc, sectored cache, 64 byte line size */
55 { 0x3e, LVL_2, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
56 { 0x3f, LVL_2, 256 }, /* 2-way set assoc, 64 byte line size */
57 { 0x41, LVL_2, 128 }, /* 4-way set assoc, 32 byte line size */
58 { 0x42, LVL_2, 256 }, /* 4-way set assoc, 32 byte line size */
59 { 0x43, LVL_2, 512 }, /* 4-way set assoc, 32 byte line size */
60 { 0x44, LVL_2, 1024 }, /* 4-way set assoc, 32 byte line size */
61 { 0x45, LVL_2, 2048 }, /* 4-way set assoc, 32 byte line size */
62 { 0x46, LVL_3, 4096 }, /* 4-way set assoc, 64 byte line size */
63 { 0x47, LVL_3, 8192 }, /* 8-way set assoc, 64 byte line size */
64 { 0x49, LVL_3, 4096 }, /* 16-way set assoc, 64 byte line size */
65 { 0x4a, LVL_3, 6144 }, /* 12-way set assoc, 64 byte line size */
66 { 0x4b, LVL_3, 8192 }, /* 16-way set assoc, 64 byte line size */
67 { 0x4c, LVL_3, 12288 }, /* 12-way set assoc, 64 byte line size */
68 { 0x4d, LVL_3, 16384 }, /* 16-way set assoc, 64 byte line size */
69 { 0x4e, LVL_2, 6144 }, /* 24-way set assoc, 64 byte line size */
70 { 0x60, LVL_1_DATA, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */
71 { 0x66, LVL_1_DATA, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */
72 { 0x67, LVL_1_DATA, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */
73 { 0x68, LVL_1_DATA, 32 }, /* 4-way set assoc, sectored cache, 64 byte line size */
74 { 0x70, LVL_TRACE, 12 }, /* 8-way set assoc */
75 { 0x71, LVL_TRACE, 16 }, /* 8-way set assoc */
76 { 0x72, LVL_TRACE, 32 }, /* 8-way set assoc */
77 { 0x73, LVL_TRACE, 64 }, /* 8-way set assoc */
78 { 0x78, LVL_2, 1024 }, /* 4-way set assoc, 64 byte line size */
79 { 0x79, LVL_2, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */
80 { 0x7a, LVL_2, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */
81 { 0x7b, LVL_2, 512 }, /* 8-way set assoc, sectored cache, 64 byte line size */
82 { 0x7c, LVL_2, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */
83 { 0x7d, LVL_2, 2048 }, /* 8-way set assoc, 64 byte line size */
84 { 0x7f, LVL_2, 512 }, /* 2-way set assoc, 64 byte line size */
85 { 0x82, LVL_2, 256 }, /* 8-way set assoc, 32 byte line size */
86 { 0x83, LVL_2, 512 }, /* 8-way set assoc, 32 byte line size */
87 { 0x84, LVL_2, 1024 }, /* 8-way set assoc, 32 byte line size */
88 { 0x85, LVL_2, 2048 }, /* 8-way set assoc, 32 byte line size */
89 { 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */
90 { 0x87, LVL_2, 1024 }, /* 8-way set assoc, 64 byte line size */
91 { 0xd0, LVL_3, 512 }, /* 4-way set assoc, 64 byte line size */
92 { 0xd1, LVL_3, 1024 }, /* 4-way set assoc, 64 byte line size */
93 { 0xd2, LVL_3, 2048 }, /* 4-way set assoc, 64 byte line size */
94 { 0xd6, LVL_3, 1024 }, /* 8-way set assoc, 64 byte line size */
95 { 0xd7, LVL_3, 2038 }, /* 8-way set assoc, 64 byte line size */
96 { 0xd8, LVL_3, 4096 }, /* 12-way set assoc, 64 byte line size */
97 { 0xdc, LVL_3, 2048 }, /* 12-way set assoc, 64 byte line size */
98 { 0xdd, LVL_3, 4096 }, /* 12-way set assoc, 64 byte line size */
99 { 0xde, LVL_3, 8192 }, /* 12-way set assoc, 64 byte line size */
100 { 0xe2, LVL_3, 2048 }, /* 16-way set assoc, 64 byte line size */
101 { 0xe3, LVL_3, 4096 }, /* 16-way set assoc, 64 byte line size */
102 { 0xe4, LVL_3, 8192 }, /* 16-way set assoc, 64 byte line size */
112 CACHE_TYPE_UNIFIED = 3
115 union _cpuid4_leaf_eax {
117 enum _cache_type type:5;
118 unsigned int level:3;
119 unsigned int is_self_initializing:1;
120 unsigned int is_fully_associative:1;
121 unsigned int reserved:4;
122 unsigned int num_threads_sharing:12;
123 unsigned int num_cores_on_die:6;
128 union _cpuid4_leaf_ebx {
130 unsigned int coherency_line_size:12;
131 unsigned int physical_line_partition:10;
132 unsigned int ways_of_associativity:10;
137 union _cpuid4_leaf_ecx {
139 unsigned int number_of_sets:32;
144 struct _cpuid4_info {
145 union _cpuid4_leaf_eax eax;
146 union _cpuid4_leaf_ebx ebx;
147 union _cpuid4_leaf_ecx ecx;
149 unsigned long can_disable;
150 DECLARE_BITMAP(shared_cpu_map, NR_CPUS);
153 /* subset of above _cpuid4_info w/o shared_cpu_map */
154 struct _cpuid4_info_regs {
155 union _cpuid4_leaf_eax eax;
156 union _cpuid4_leaf_ebx ebx;
157 union _cpuid4_leaf_ecx ecx;
159 unsigned long can_disable;
163 static struct pci_device_id k8_nb_id[] = {
164 { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1103) },
165 { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1203) },
170 unsigned short num_cache_leaves;
172 /* AMD doesn't have CPUID4. Emulate it here to report the same
173 information to the user. This makes some assumptions about the machine:
174 L2 not shared, no SMT etc. that is currently true on AMD CPUs.
176 In theory the TLBs could be reported as fake type (they are in "dummy").
180 unsigned line_size : 8;
181 unsigned lines_per_tag : 8;
183 unsigned size_in_kb : 8;
190 unsigned line_size : 8;
191 unsigned lines_per_tag : 4;
193 unsigned size_in_kb : 16;
200 unsigned line_size : 8;
201 unsigned lines_per_tag : 4;
204 unsigned size_encoded : 14;
209 static const unsigned short __cpuinitconst assocs[] = {
210 [1] = 1, [2] = 2, [4] = 4, [6] = 8,
211 [8] = 16, [0xa] = 32, [0xb] = 48,
216 static const unsigned char __cpuinitconst levels[] = { 1, 1, 2, 3 };
217 static const unsigned char __cpuinitconst types[] = { 1, 2, 3, 3 };
219 static void __cpuinit
220 amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
221 union _cpuid4_leaf_ebx *ebx,
222 union _cpuid4_leaf_ecx *ecx)
225 unsigned line_size, lines_per_tag, assoc, size_in_kb;
226 union l1_cache l1i, l1d;
229 union l1_cache *l1 = &l1d;
235 cpuid(0x80000005, &dummy, &dummy, &l1d.val, &l1i.val);
236 cpuid(0x80000006, &dummy, &dummy, &l2.val, &l3.val);
245 line_size = l1->line_size;
246 lines_per_tag = l1->lines_per_tag;
247 size_in_kb = l1->size_in_kb;
253 line_size = l2.line_size;
254 lines_per_tag = l2.lines_per_tag;
255 /* cpu_data has errata corrections for K7 applied */
256 size_in_kb = current_cpu_data.x86_cache_size;
262 line_size = l3.line_size;
263 lines_per_tag = l3.lines_per_tag;
264 size_in_kb = l3.size_encoded * 512;
270 eax->split.is_self_initializing = 1;
271 eax->split.type = types[leaf];
272 eax->split.level = levels[leaf];
274 eax->split.num_threads_sharing = current_cpu_data.x86_max_cores - 1;
276 eax->split.num_threads_sharing = 0;
277 eax->split.num_cores_on_die = current_cpu_data.x86_max_cores - 1;
281 eax->split.is_fully_associative = 1;
282 ebx->split.coherency_line_size = line_size - 1;
283 ebx->split.ways_of_associativity = assocs[assoc] - 1;
284 ebx->split.physical_line_partition = lines_per_tag - 1;
285 ecx->split.number_of_sets = (size_in_kb * 1024) / line_size /
286 (ebx->split.ways_of_associativity + 1) - 1;
289 static void __cpuinit
290 amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf)
294 this_leaf->can_disable = 1;
298 __cpuinit cpuid4_cache_lookup_regs(int index,
299 struct _cpuid4_info_regs *this_leaf)
301 union _cpuid4_leaf_eax eax;
302 union _cpuid4_leaf_ebx ebx;
303 union _cpuid4_leaf_ecx ecx;
306 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
307 amd_cpuid4(index, &eax, &ebx, &ecx);
308 if (boot_cpu_data.x86 >= 0x10)
309 amd_check_l3_disable(index, this_leaf);
311 cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
314 if (eax.split.type == CACHE_TYPE_NULL)
315 return -EIO; /* better error ? */
317 this_leaf->eax = eax;
318 this_leaf->ebx = ebx;
319 this_leaf->ecx = ecx;
320 this_leaf->size = (ecx.split.number_of_sets + 1) *
321 (ebx.split.coherency_line_size + 1) *
322 (ebx.split.physical_line_partition + 1) *
323 (ebx.split.ways_of_associativity + 1);
328 __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
330 struct _cpuid4_info_regs *leaf_regs =
331 (struct _cpuid4_info_regs *)this_leaf;
333 return cpuid4_cache_lookup_regs(index, leaf_regs);
336 static int __cpuinit find_num_cache_leaves(void)
338 unsigned int eax, ebx, ecx, edx;
339 union _cpuid4_leaf_eax cache_eax;
344 /* Do cpuid(4) loop to find out num_cache_leaves */
345 cpuid_count(4, i, &eax, &ebx, &ecx, &edx);
346 cache_eax.full = eax;
347 } while (cache_eax.split.type != CACHE_TYPE_NULL);
351 unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
353 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */
354 unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
355 unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
356 unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
358 unsigned int cpu = c->cpu_index;
361 if (c->cpuid_level > 3) {
362 static int is_initialized;
364 if (is_initialized == 0) {
365 /* Init num_cache_leaves from boot CPU */
366 num_cache_leaves = find_num_cache_leaves();
371 * Whenever possible use cpuid(4), deterministic cache
372 * parameters cpuid leaf to find the cache details
374 for (i = 0; i < num_cache_leaves; i++) {
375 struct _cpuid4_info_regs this_leaf;
378 retval = cpuid4_cache_lookup_regs(i, &this_leaf);
380 switch(this_leaf.eax.split.level) {
382 if (this_leaf.eax.split.type ==
384 new_l1d = this_leaf.size/1024;
385 else if (this_leaf.eax.split.type ==
387 new_l1i = this_leaf.size/1024;
390 new_l2 = this_leaf.size/1024;
391 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
392 index_msb = get_count_order(num_threads_sharing);
393 l2_id = c->apicid >> index_msb;
396 new_l3 = this_leaf.size/1024;
397 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
398 index_msb = get_count_order(num_threads_sharing);
399 l3_id = c->apicid >> index_msb;
408 * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
411 if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) {
412 /* supports eax=2 call */
414 unsigned int regs[4];
415 unsigned char *dp = (unsigned char *)regs;
418 if (num_cache_leaves != 0 && c->x86 == 15)
421 /* Number of times to iterate */
422 n = cpuid_eax(2) & 0xFF;
424 for ( i = 0 ; i < n ; i++ ) {
425 cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]);
427 /* If bit 31 is set, this is an unknown format */
428 for ( j = 0 ; j < 3 ; j++ ) {
429 if (regs[j] & (1 << 31)) regs[j] = 0;
432 /* Byte 0 is level count, not a descriptor */
433 for ( j = 1 ; j < 16 ; j++ ) {
434 unsigned char des = dp[j];
437 /* look up this descriptor in the table */
438 while (cache_table[k].descriptor != 0)
440 if (cache_table[k].descriptor == des) {
441 if (only_trace && cache_table[k].cache_type != LVL_TRACE)
443 switch (cache_table[k].cache_type) {
445 l1i += cache_table[k].size;
448 l1d += cache_table[k].size;
451 l2 += cache_table[k].size;
454 l3 += cache_table[k].size;
457 trace += cache_table[k].size;
479 per_cpu(cpu_llc_id, cpu) = l2_id;
486 per_cpu(cpu_llc_id, cpu) = l3_id;
491 printk (KERN_INFO "CPU: Trace cache: %dK uops", trace);
493 printk (KERN_INFO "CPU: L1 I cache: %dK", l1i);
496 printk(", L1 D cache: %dK\n", l1d);
501 printk(KERN_INFO "CPU: L2 cache: %dK\n", l2);
504 printk(KERN_INFO "CPU: L3 cache: %dK\n", l3);
506 c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
511 /* pointer to _cpuid4_info array (for each cache leaf) */
512 static DEFINE_PER_CPU(struct _cpuid4_info *, cpuid4_info);
513 #define CPUID4_INFO_IDX(x, y) (&((per_cpu(cpuid4_info, x))[y]))
516 static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
518 struct _cpuid4_info *this_leaf, *sibling_leaf;
519 unsigned long num_threads_sharing;
521 struct cpuinfo_x86 *c = &cpu_data(cpu);
523 this_leaf = CPUID4_INFO_IDX(cpu, index);
524 num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing;
526 if (num_threads_sharing == 1)
527 cpumask_set_cpu(cpu, to_cpumask(this_leaf->shared_cpu_map));
529 index_msb = get_count_order(num_threads_sharing);
531 for_each_online_cpu(i) {
532 if (cpu_data(i).apicid >> index_msb ==
533 c->apicid >> index_msb) {
535 to_cpumask(this_leaf->shared_cpu_map));
536 if (i != cpu && per_cpu(cpuid4_info, i)) {
538 CPUID4_INFO_IDX(i, index);
539 cpumask_set_cpu(cpu, to_cpumask(
540 sibling_leaf->shared_cpu_map));
546 static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
548 struct _cpuid4_info *this_leaf, *sibling_leaf;
551 this_leaf = CPUID4_INFO_IDX(cpu, index);
552 for_each_cpu(sibling, to_cpumask(this_leaf->shared_cpu_map)) {
553 sibling_leaf = CPUID4_INFO_IDX(sibling, index);
554 cpumask_clear_cpu(cpu,
555 to_cpumask(sibling_leaf->shared_cpu_map));
559 static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) {}
560 static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) {}
563 static void __cpuinit free_cache_attributes(unsigned int cpu)
567 for (i = 0; i < num_cache_leaves; i++)
568 cache_remove_shared_cpu_map(cpu, i);
570 kfree(per_cpu(cpuid4_info, cpu));
571 per_cpu(cpuid4_info, cpu) = NULL;
574 static void __cpuinit get_cpu_leaves(void *_retval)
576 int j, *retval = _retval, cpu = smp_processor_id();
578 /* Do cpuid and store the results */
579 for (j = 0; j < num_cache_leaves; j++) {
580 struct _cpuid4_info *this_leaf;
581 this_leaf = CPUID4_INFO_IDX(cpu, j);
582 *retval = cpuid4_cache_lookup(j, this_leaf);
583 if (unlikely(*retval < 0)) {
586 for (i = 0; i < j; i++)
587 cache_remove_shared_cpu_map(cpu, i);
590 cache_shared_cpu_map_setup(cpu, j);
594 static int __cpuinit detect_cache_attributes(unsigned int cpu)
598 if (num_cache_leaves == 0)
601 per_cpu(cpuid4_info, cpu) = kzalloc(
602 sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
603 if (per_cpu(cpuid4_info, cpu) == NULL)
606 smp_call_function_single(cpu, get_cpu_leaves, &retval, true);
608 kfree(per_cpu(cpuid4_info, cpu));
609 per_cpu(cpuid4_info, cpu) = NULL;
617 #include <linux/kobject.h>
618 #include <linux/sysfs.h>
620 extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c */
622 /* pointer to kobject for cpuX/cache */
623 static DEFINE_PER_CPU(struct kobject *, cache_kobject);
625 struct _index_kobject {
628 unsigned short index;
631 /* pointer to array of kobjects for cpuX/cache/indexY */
632 static DEFINE_PER_CPU(struct _index_kobject *, index_kobject);
633 #define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(index_kobject, x))[y]))
635 #define show_one_plus(file_name, object, val) \
636 static ssize_t show_##file_name \
637 (struct _cpuid4_info *this_leaf, char *buf) \
639 return sprintf (buf, "%lu\n", (unsigned long)this_leaf->object + val); \
642 show_one_plus(level, eax.split.level, 0);
643 show_one_plus(coherency_line_size, ebx.split.coherency_line_size, 1);
644 show_one_plus(physical_line_partition, ebx.split.physical_line_partition, 1);
645 show_one_plus(ways_of_associativity, ebx.split.ways_of_associativity, 1);
646 show_one_plus(number_of_sets, ecx.split.number_of_sets, 1);
648 static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf)
650 return sprintf (buf, "%luK\n", this_leaf->size / 1024);
653 static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
656 ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf;
660 const struct cpumask *mask;
662 mask = to_cpumask(this_leaf->shared_cpu_map);
664 cpulist_scnprintf(buf, len-2, mask) :
665 cpumask_scnprintf(buf, len-2, mask);
672 static inline ssize_t show_shared_cpu_map(struct _cpuid4_info *leaf, char *buf)
674 return show_shared_cpu_map_func(leaf, 0, buf);
677 static inline ssize_t show_shared_cpu_list(struct _cpuid4_info *leaf, char *buf)
679 return show_shared_cpu_map_func(leaf, 1, buf);
682 static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf)
684 switch (this_leaf->eax.split.type) {
685 case CACHE_TYPE_DATA:
686 return sprintf(buf, "Data\n");
687 case CACHE_TYPE_INST:
688 return sprintf(buf, "Instruction\n");
689 case CACHE_TYPE_UNIFIED:
690 return sprintf(buf, "Unified\n");
692 return sprintf(buf, "Unknown\n");
696 #define to_object(k) container_of(k, struct _index_kobject, kobj)
697 #define to_attr(a) container_of(a, struct _cache_attr, attr)
700 static struct pci_dev *get_k8_northbridge(int node)
702 struct pci_dev *dev = NULL;
705 for (i = 0; i <= node; i++) {
707 dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
710 } while (!pci_match_id(&k8_nb_id[0], dev));
717 static struct pci_dev *get_k8_northbridge(int node)
723 static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf)
725 const struct cpumask *mask = to_cpumask(this_leaf->shared_cpu_map);
726 int node = cpu_to_node(cpumask_first(mask));
727 struct pci_dev *dev = NULL;
731 if (!this_leaf->can_disable)
732 return sprintf(buf, "Feature not enabled\n");
734 dev = get_k8_northbridge(node);
736 printk(KERN_ERR "Attempting AMD northbridge operation on a system with no northbridge\n");
740 for (i = 0; i < 2; i++) {
743 pci_read_config_dword(dev, 0x1BC + i * 4, ®);
745 ret += sprintf(buf, "%sEntry: %d\n", buf, i);
746 ret += sprintf(buf, "%sReads: %s\tNew Entries: %s\n",
748 reg & 0x80000000 ? "Disabled" : "Allowed",
749 reg & 0x40000000 ? "Disabled" : "Allowed");
750 ret += sprintf(buf, "%sSubCache: %x\tIndex: %x\n",
751 buf, (reg & 0x30000) >> 16, reg & 0xfff);
757 store_cache_disable(struct _cpuid4_info *this_leaf, const char *buf,
760 const struct cpumask *mask = to_cpumask(this_leaf->shared_cpu_map);
761 int node = cpu_to_node(cpumask_first(mask));
762 struct pci_dev *dev = NULL;
763 unsigned int ret, index, val;
765 if (!this_leaf->can_disable)
768 if (strlen(buf) > 15)
771 ret = sscanf(buf, "%x %x", &index, &val);
778 dev = get_k8_northbridge(node);
780 printk(KERN_ERR "Attempting AMD northbridge operation on a system with no northbridge\n");
784 pci_write_config_dword(dev, 0x1BC + index * 4, val & ~0x40000000);
786 pci_write_config_dword(dev, 0x1BC + index * 4, val);
792 struct attribute attr;
793 ssize_t (*show)(struct _cpuid4_info *, char *);
794 ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count);
797 #define define_one_ro(_name) \
798 static struct _cache_attr _name = \
799 __ATTR(_name, 0444, show_##_name, NULL)
801 define_one_ro(level);
803 define_one_ro(coherency_line_size);
804 define_one_ro(physical_line_partition);
805 define_one_ro(ways_of_associativity);
806 define_one_ro(number_of_sets);
808 define_one_ro(shared_cpu_map);
809 define_one_ro(shared_cpu_list);
811 static struct _cache_attr cache_disable = __ATTR(cache_disable, 0644, show_cache_disable, store_cache_disable);
813 static struct attribute * default_attrs[] = {
816 &coherency_line_size.attr,
817 &physical_line_partition.attr,
818 &ways_of_associativity.attr,
819 &number_of_sets.attr,
821 &shared_cpu_map.attr,
822 &shared_cpu_list.attr,
827 static ssize_t show(struct kobject * kobj, struct attribute * attr, char * buf)
829 struct _cache_attr *fattr = to_attr(attr);
830 struct _index_kobject *this_leaf = to_object(kobj);
834 fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
840 static ssize_t store(struct kobject * kobj, struct attribute * attr,
841 const char * buf, size_t count)
843 struct _cache_attr *fattr = to_attr(attr);
844 struct _index_kobject *this_leaf = to_object(kobj);
848 fattr->store(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
854 static struct sysfs_ops sysfs_ops = {
859 static struct kobj_type ktype_cache = {
860 .sysfs_ops = &sysfs_ops,
861 .default_attrs = default_attrs,
864 static struct kobj_type ktype_percpu_entry = {
865 .sysfs_ops = &sysfs_ops,
868 static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu)
870 kfree(per_cpu(cache_kobject, cpu));
871 kfree(per_cpu(index_kobject, cpu));
872 per_cpu(cache_kobject, cpu) = NULL;
873 per_cpu(index_kobject, cpu) = NULL;
874 free_cache_attributes(cpu);
877 static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
881 if (num_cache_leaves == 0)
884 err = detect_cache_attributes(cpu);
888 /* Allocate all required memory */
889 per_cpu(cache_kobject, cpu) =
890 kzalloc(sizeof(struct kobject), GFP_KERNEL);
891 if (unlikely(per_cpu(cache_kobject, cpu) == NULL))
894 per_cpu(index_kobject, cpu) = kzalloc(
895 sizeof(struct _index_kobject ) * num_cache_leaves, GFP_KERNEL);
896 if (unlikely(per_cpu(index_kobject, cpu) == NULL))
902 cpuid4_cache_sysfs_exit(cpu);
906 static DECLARE_BITMAP(cache_dev_map, NR_CPUS);
908 /* Add/Remove cache interface for CPU device */
909 static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
911 unsigned int cpu = sys_dev->id;
913 struct _index_kobject *this_object;
916 retval = cpuid4_cache_sysfs_init(cpu);
917 if (unlikely(retval < 0))
920 retval = kobject_init_and_add(per_cpu(cache_kobject, cpu),
922 &sys_dev->kobj, "%s", "cache");
924 cpuid4_cache_sysfs_exit(cpu);
928 for (i = 0; i < num_cache_leaves; i++) {
929 this_object = INDEX_KOBJECT_PTR(cpu,i);
930 this_object->cpu = cpu;
931 this_object->index = i;
932 retval = kobject_init_and_add(&(this_object->kobj),
934 per_cpu(cache_kobject, cpu),
936 if (unlikely(retval)) {
937 for (j = 0; j < i; j++) {
938 kobject_put(&(INDEX_KOBJECT_PTR(cpu,j)->kobj));
940 kobject_put(per_cpu(cache_kobject, cpu));
941 cpuid4_cache_sysfs_exit(cpu);
944 kobject_uevent(&(this_object->kobj), KOBJ_ADD);
946 cpumask_set_cpu(cpu, to_cpumask(cache_dev_map));
948 kobject_uevent(per_cpu(cache_kobject, cpu), KOBJ_ADD);
952 static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
954 unsigned int cpu = sys_dev->id;
957 if (per_cpu(cpuid4_info, cpu) == NULL)
959 if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map)))
961 cpumask_clear_cpu(cpu, to_cpumask(cache_dev_map));
963 for (i = 0; i < num_cache_leaves; i++)
964 kobject_put(&(INDEX_KOBJECT_PTR(cpu,i)->kobj));
965 kobject_put(per_cpu(cache_kobject, cpu));
966 cpuid4_cache_sysfs_exit(cpu);
969 static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
970 unsigned long action, void *hcpu)
972 unsigned int cpu = (unsigned long)hcpu;
973 struct sys_device *sys_dev;
975 sys_dev = get_cpu_sysdev(cpu);
978 case CPU_ONLINE_FROZEN:
979 cache_add_dev(sys_dev);
982 case CPU_DEAD_FROZEN:
983 cache_remove_dev(sys_dev);
989 static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier =
991 .notifier_call = cacheinfo_cpu_callback,
994 static int __cpuinit cache_sysfs_init(void)
998 if (num_cache_leaves == 0)
1001 for_each_online_cpu(i) {
1003 struct sys_device *sys_dev = get_cpu_sysdev(i);
1005 err = cache_add_dev(sys_dev);
1009 register_hotcpu_notifier(&cacheinfo_cpu_notifier);
1013 device_initcall(cache_sysfs_init);