2 * Routines to indentify caches on Intel CPU.
5 * Venkatesh Pallipadi : Adding cache identification through cpuid(4)
6 * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
7 * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD.
10 #include <linux/init.h>
11 #include <linux/slab.h>
12 #include <linux/device.h>
13 #include <linux/compiler.h>
14 #include <linux/cpu.h>
15 #include <linux/sched.h>
16 #include <linux/pci.h>
18 #include <asm/processor.h>
30 unsigned char descriptor;
35 /* all the cache descriptor types we care about (no TLB or trace cache entries) */
36 static const struct _cache_table __cpuinitconst cache_table[] =
38 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
39 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
40 { 0x09, LVL_1_INST, 32 }, /* 4-way set assoc, 64 byte line size */
41 { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */
42 { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */
43 { 0x0d, LVL_1_DATA, 16 }, /* 4-way set assoc, 64 byte line size */
44 { 0x21, LVL_2, 256 }, /* 8-way set assoc, 64 byte line size */
45 { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
46 { 0x23, LVL_3, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */
47 { 0x25, LVL_3, 2048 }, /* 8-way set assoc, sectored cache, 64 byte line size */
48 { 0x29, LVL_3, 4096 }, /* 8-way set assoc, sectored cache, 64 byte line size */
49 { 0x2c, LVL_1_DATA, 32 }, /* 8-way set assoc, 64 byte line size */
50 { 0x30, LVL_1_INST, 32 }, /* 8-way set assoc, 64 byte line size */
51 { 0x39, LVL_2, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */
52 { 0x3a, LVL_2, 192 }, /* 6-way set assoc, sectored cache, 64 byte line size */
53 { 0x3b, LVL_2, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */
54 { 0x3c, LVL_2, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */
55 { 0x3d, LVL_2, 384 }, /* 6-way set assoc, sectored cache, 64 byte line size */
56 { 0x3e, LVL_2, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
57 { 0x3f, LVL_2, 256 }, /* 2-way set assoc, 64 byte line size */
58 { 0x41, LVL_2, 128 }, /* 4-way set assoc, 32 byte line size */
59 { 0x42, LVL_2, 256 }, /* 4-way set assoc, 32 byte line size */
60 { 0x43, LVL_2, 512 }, /* 4-way set assoc, 32 byte line size */
61 { 0x44, LVL_2, 1024 }, /* 4-way set assoc, 32 byte line size */
62 { 0x45, LVL_2, 2048 }, /* 4-way set assoc, 32 byte line size */
63 { 0x46, LVL_3, 4096 }, /* 4-way set assoc, 64 byte line size */
64 { 0x47, LVL_3, 8192 }, /* 8-way set assoc, 64 byte line size */
65 { 0x49, LVL_3, 4096 }, /* 16-way set assoc, 64 byte line size */
66 { 0x4a, LVL_3, 6144 }, /* 12-way set assoc, 64 byte line size */
67 { 0x4b, LVL_3, 8192 }, /* 16-way set assoc, 64 byte line size */
68 { 0x4c, LVL_3, 12288 }, /* 12-way set assoc, 64 byte line size */
69 { 0x4d, LVL_3, 16384 }, /* 16-way set assoc, 64 byte line size */
70 { 0x4e, LVL_2, 6144 }, /* 24-way set assoc, 64 byte line size */
71 { 0x60, LVL_1_DATA, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */
72 { 0x66, LVL_1_DATA, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */
73 { 0x67, LVL_1_DATA, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */
74 { 0x68, LVL_1_DATA, 32 }, /* 4-way set assoc, sectored cache, 64 byte line size */
75 { 0x70, LVL_TRACE, 12 }, /* 8-way set assoc */
76 { 0x71, LVL_TRACE, 16 }, /* 8-way set assoc */
77 { 0x72, LVL_TRACE, 32 }, /* 8-way set assoc */
78 { 0x73, LVL_TRACE, 64 }, /* 8-way set assoc */
79 { 0x78, LVL_2, 1024 }, /* 4-way set assoc, 64 byte line size */
80 { 0x79, LVL_2, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */
81 { 0x7a, LVL_2, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */
82 { 0x7b, LVL_2, 512 }, /* 8-way set assoc, sectored cache, 64 byte line size */
83 { 0x7c, LVL_2, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */
84 { 0x7d, LVL_2, 2048 }, /* 8-way set assoc, 64 byte line size */
85 { 0x7f, LVL_2, 512 }, /* 2-way set assoc, 64 byte line size */
86 { 0x82, LVL_2, 256 }, /* 8-way set assoc, 32 byte line size */
87 { 0x83, LVL_2, 512 }, /* 8-way set assoc, 32 byte line size */
88 { 0x84, LVL_2, 1024 }, /* 8-way set assoc, 32 byte line size */
89 { 0x85, LVL_2, 2048 }, /* 8-way set assoc, 32 byte line size */
90 { 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */
91 { 0x87, LVL_2, 1024 }, /* 8-way set assoc, 64 byte line size */
92 { 0xd0, LVL_3, 512 }, /* 4-way set assoc, 64 byte line size */
93 { 0xd1, LVL_3, 1024 }, /* 4-way set assoc, 64 byte line size */
94 { 0xd2, LVL_3, 2048 }, /* 4-way set assoc, 64 byte line size */
95 { 0xd6, LVL_3, 1024 }, /* 8-way set assoc, 64 byte line size */
96 { 0xd7, LVL_3, 2038 }, /* 8-way set assoc, 64 byte line size */
97 { 0xd8, LVL_3, 4096 }, /* 12-way set assoc, 64 byte line size */
98 { 0xdc, LVL_3, 2048 }, /* 12-way set assoc, 64 byte line size */
99 { 0xdd, LVL_3, 4096 }, /* 12-way set assoc, 64 byte line size */
100 { 0xde, LVL_3, 8192 }, /* 12-way set assoc, 64 byte line size */
101 { 0xe2, LVL_3, 2048 }, /* 16-way set assoc, 64 byte line size */
102 { 0xe3, LVL_3, 4096 }, /* 16-way set assoc, 64 byte line size */
103 { 0xe4, LVL_3, 8192 }, /* 16-way set assoc, 64 byte line size */
113 CACHE_TYPE_UNIFIED = 3
116 union _cpuid4_leaf_eax {
118 enum _cache_type type:5;
119 unsigned int level:3;
120 unsigned int is_self_initializing:1;
121 unsigned int is_fully_associative:1;
122 unsigned int reserved:4;
123 unsigned int num_threads_sharing:12;
124 unsigned int num_cores_on_die:6;
129 union _cpuid4_leaf_ebx {
131 unsigned int coherency_line_size:12;
132 unsigned int physical_line_partition:10;
133 unsigned int ways_of_associativity:10;
138 union _cpuid4_leaf_ecx {
140 unsigned int number_of_sets:32;
145 struct _cpuid4_info {
146 union _cpuid4_leaf_eax eax;
147 union _cpuid4_leaf_ebx ebx;
148 union _cpuid4_leaf_ecx ecx;
150 unsigned long can_disable;
151 DECLARE_BITMAP(shared_cpu_map, NR_CPUS);
154 /* subset of above _cpuid4_info w/o shared_cpu_map */
155 struct _cpuid4_info_regs {
156 union _cpuid4_leaf_eax eax;
157 union _cpuid4_leaf_ebx ebx;
158 union _cpuid4_leaf_ecx ecx;
160 unsigned long can_disable;
163 unsigned short num_cache_leaves;
165 /* AMD doesn't have CPUID4. Emulate it here to report the same
166 information to the user. This makes some assumptions about the machine:
167 L2 not shared, no SMT etc. that is currently true on AMD CPUs.
169 In theory the TLBs could be reported as fake type (they are in "dummy").
173 unsigned line_size : 8;
174 unsigned lines_per_tag : 8;
176 unsigned size_in_kb : 8;
183 unsigned line_size : 8;
184 unsigned lines_per_tag : 4;
186 unsigned size_in_kb : 16;
193 unsigned line_size : 8;
194 unsigned lines_per_tag : 4;
197 unsigned size_encoded : 14;
202 static const unsigned short __cpuinitconst assocs[] = {
213 [0xf] = 0xffff /* fully associative - no way to show this currently */
216 static const unsigned char __cpuinitconst levels[] = { 1, 1, 2, 3 };
217 static const unsigned char __cpuinitconst types[] = { 1, 2, 3, 3 };
219 static void __cpuinit
220 amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
221 union _cpuid4_leaf_ebx *ebx,
222 union _cpuid4_leaf_ecx *ecx)
225 unsigned line_size, lines_per_tag, assoc, size_in_kb;
226 union l1_cache l1i, l1d;
229 union l1_cache *l1 = &l1d;
235 cpuid(0x80000005, &dummy, &dummy, &l1d.val, &l1i.val);
236 cpuid(0x80000006, &dummy, &dummy, &l2.val, &l3.val);
245 line_size = l1->line_size;
246 lines_per_tag = l1->lines_per_tag;
247 size_in_kb = l1->size_in_kb;
253 line_size = l2.line_size;
254 lines_per_tag = l2.lines_per_tag;
255 /* cpu_data has errata corrections for K7 applied */
256 size_in_kb = current_cpu_data.x86_cache_size;
262 line_size = l3.line_size;
263 lines_per_tag = l3.lines_per_tag;
264 size_in_kb = l3.size_encoded * 512;
270 eax->split.is_self_initializing = 1;
271 eax->split.type = types[leaf];
272 eax->split.level = levels[leaf];
274 eax->split.num_threads_sharing =
275 current_cpu_data.x86_max_cores - 1;
277 eax->split.num_threads_sharing = 0;
278 eax->split.num_cores_on_die = current_cpu_data.x86_max_cores - 1;
282 eax->split.is_fully_associative = 1;
283 ebx->split.coherency_line_size = line_size - 1;
284 ebx->split.ways_of_associativity = assocs[assoc] - 1;
285 ebx->split.physical_line_partition = lines_per_tag - 1;
286 ecx->split.number_of_sets = (size_in_kb * 1024) / line_size /
287 (ebx->split.ways_of_associativity + 1) - 1;
290 static void __cpuinit
291 amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf)
296 if (boot_cpu_data.x86 == 0x11)
299 /* see erratum #382 */
300 if ((boot_cpu_data.x86 == 0x10) && (boot_cpu_data.x86_model < 0x8))
303 this_leaf->can_disable = 1;
307 __cpuinit cpuid4_cache_lookup_regs(int index,
308 struct _cpuid4_info_regs *this_leaf)
310 union _cpuid4_leaf_eax eax;
311 union _cpuid4_leaf_ebx ebx;
312 union _cpuid4_leaf_ecx ecx;
315 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
316 amd_cpuid4(index, &eax, &ebx, &ecx);
317 if (boot_cpu_data.x86 >= 0x10)
318 amd_check_l3_disable(index, this_leaf);
320 cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
323 if (eax.split.type == CACHE_TYPE_NULL)
324 return -EIO; /* better error ? */
326 this_leaf->eax = eax;
327 this_leaf->ebx = ebx;
328 this_leaf->ecx = ecx;
329 this_leaf->size = (ecx.split.number_of_sets + 1) *
330 (ebx.split.coherency_line_size + 1) *
331 (ebx.split.physical_line_partition + 1) *
332 (ebx.split.ways_of_associativity + 1);
336 static int __cpuinit find_num_cache_leaves(void)
338 unsigned int eax, ebx, ecx, edx;
339 union _cpuid4_leaf_eax cache_eax;
344 /* Do cpuid(4) loop to find out num_cache_leaves */
345 cpuid_count(4, i, &eax, &ebx, &ecx, &edx);
346 cache_eax.full = eax;
347 } while (cache_eax.split.type != CACHE_TYPE_NULL);
351 unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
353 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */
354 unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
355 unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
356 unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
358 unsigned int cpu = c->cpu_index;
361 if (c->cpuid_level > 3) {
362 static int is_initialized;
364 if (is_initialized == 0) {
365 /* Init num_cache_leaves from boot CPU */
366 num_cache_leaves = find_num_cache_leaves();
371 * Whenever possible use cpuid(4), deterministic cache
372 * parameters cpuid leaf to find the cache details
374 for (i = 0; i < num_cache_leaves; i++) {
375 struct _cpuid4_info_regs this_leaf;
378 retval = cpuid4_cache_lookup_regs(i, &this_leaf);
380 switch(this_leaf.eax.split.level) {
382 if (this_leaf.eax.split.type ==
384 new_l1d = this_leaf.size/1024;
385 else if (this_leaf.eax.split.type ==
387 new_l1i = this_leaf.size/1024;
390 new_l2 = this_leaf.size/1024;
391 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
392 index_msb = get_count_order(num_threads_sharing);
393 l2_id = c->apicid >> index_msb;
396 new_l3 = this_leaf.size/1024;
397 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
398 index_msb = get_count_order(num_threads_sharing);
399 l3_id = c->apicid >> index_msb;
408 * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
411 if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) {
412 /* supports eax=2 call */
414 unsigned int regs[4];
415 unsigned char *dp = (unsigned char *)regs;
418 if (num_cache_leaves != 0 && c->x86 == 15)
421 /* Number of times to iterate */
422 n = cpuid_eax(2) & 0xFF;
424 for ( i = 0 ; i < n ; i++ ) {
425 cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]);
427 /* If bit 31 is set, this is an unknown format */
428 for ( j = 0 ; j < 3 ; j++ ) {
429 if (regs[j] & (1 << 31)) regs[j] = 0;
432 /* Byte 0 is level count, not a descriptor */
433 for ( j = 1 ; j < 16 ; j++ ) {
434 unsigned char des = dp[j];
437 /* look up this descriptor in the table */
438 while (cache_table[k].descriptor != 0)
440 if (cache_table[k].descriptor == des) {
441 if (only_trace && cache_table[k].cache_type != LVL_TRACE)
443 switch (cache_table[k].cache_type) {
445 l1i += cache_table[k].size;
448 l1d += cache_table[k].size;
451 l2 += cache_table[k].size;
454 l3 += cache_table[k].size;
457 trace += cache_table[k].size;
479 per_cpu(cpu_llc_id, cpu) = l2_id;
486 per_cpu(cpu_llc_id, cpu) = l3_id;
491 printk (KERN_INFO "CPU: Trace cache: %dK uops", trace);
493 printk (KERN_INFO "CPU: L1 I cache: %dK", l1i);
496 printk(", L1 D cache: %dK\n", l1d);
501 printk(KERN_INFO "CPU: L2 cache: %dK\n", l2);
504 printk(KERN_INFO "CPU: L3 cache: %dK\n", l3);
506 c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
513 /* pointer to _cpuid4_info array (for each cache leaf) */
514 static DEFINE_PER_CPU(struct _cpuid4_info *, cpuid4_info);
515 #define CPUID4_INFO_IDX(x, y) (&((per_cpu(cpuid4_info, x))[y]))
518 static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
520 struct _cpuid4_info *this_leaf, *sibling_leaf;
521 unsigned long num_threads_sharing;
523 struct cpuinfo_x86 *c = &cpu_data(cpu);
525 this_leaf = CPUID4_INFO_IDX(cpu, index);
526 num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing;
528 if (num_threads_sharing == 1)
529 cpumask_set_cpu(cpu, to_cpumask(this_leaf->shared_cpu_map));
531 index_msb = get_count_order(num_threads_sharing);
533 for_each_online_cpu(i) {
534 if (cpu_data(i).apicid >> index_msb ==
535 c->apicid >> index_msb) {
537 to_cpumask(this_leaf->shared_cpu_map));
538 if (i != cpu && per_cpu(cpuid4_info, i)) {
540 CPUID4_INFO_IDX(i, index);
541 cpumask_set_cpu(cpu, to_cpumask(
542 sibling_leaf->shared_cpu_map));
548 static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
550 struct _cpuid4_info *this_leaf, *sibling_leaf;
553 this_leaf = CPUID4_INFO_IDX(cpu, index);
554 for_each_cpu(sibling, to_cpumask(this_leaf->shared_cpu_map)) {
555 sibling_leaf = CPUID4_INFO_IDX(sibling, index);
556 cpumask_clear_cpu(cpu,
557 to_cpumask(sibling_leaf->shared_cpu_map));
561 static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) {}
562 static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) {}
565 static void __cpuinit free_cache_attributes(unsigned int cpu)
569 for (i = 0; i < num_cache_leaves; i++)
570 cache_remove_shared_cpu_map(cpu, i);
572 kfree(per_cpu(cpuid4_info, cpu));
573 per_cpu(cpuid4_info, cpu) = NULL;
577 __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
579 struct _cpuid4_info_regs *leaf_regs =
580 (struct _cpuid4_info_regs *)this_leaf;
582 return cpuid4_cache_lookup_regs(index, leaf_regs);
585 static void __cpuinit get_cpu_leaves(void *_retval)
587 int j, *retval = _retval, cpu = smp_processor_id();
589 /* Do cpuid and store the results */
590 for (j = 0; j < num_cache_leaves; j++) {
591 struct _cpuid4_info *this_leaf;
592 this_leaf = CPUID4_INFO_IDX(cpu, j);
593 *retval = cpuid4_cache_lookup(j, this_leaf);
594 if (unlikely(*retval < 0)) {
597 for (i = 0; i < j; i++)
598 cache_remove_shared_cpu_map(cpu, i);
601 cache_shared_cpu_map_setup(cpu, j);
605 static int __cpuinit detect_cache_attributes(unsigned int cpu)
609 if (num_cache_leaves == 0)
612 per_cpu(cpuid4_info, cpu) = kzalloc(
613 sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
614 if (per_cpu(cpuid4_info, cpu) == NULL)
617 smp_call_function_single(cpu, get_cpu_leaves, &retval, true);
619 kfree(per_cpu(cpuid4_info, cpu));
620 per_cpu(cpuid4_info, cpu) = NULL;
626 #include <linux/kobject.h>
627 #include <linux/sysfs.h>
629 extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c */
631 /* pointer to kobject for cpuX/cache */
632 static DEFINE_PER_CPU(struct kobject *, cache_kobject);
634 struct _index_kobject {
637 unsigned short index;
640 /* pointer to array of kobjects for cpuX/cache/indexY */
641 static DEFINE_PER_CPU(struct _index_kobject *, index_kobject);
642 #define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(index_kobject, x))[y]))
644 #define show_one_plus(file_name, object, val) \
645 static ssize_t show_##file_name \
646 (struct _cpuid4_info *this_leaf, char *buf) \
648 return sprintf (buf, "%lu\n", (unsigned long)this_leaf->object + val); \
651 show_one_plus(level, eax.split.level, 0);
652 show_one_plus(coherency_line_size, ebx.split.coherency_line_size, 1);
653 show_one_plus(physical_line_partition, ebx.split.physical_line_partition, 1);
654 show_one_plus(ways_of_associativity, ebx.split.ways_of_associativity, 1);
655 show_one_plus(number_of_sets, ecx.split.number_of_sets, 1);
657 static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf)
659 return sprintf (buf, "%luK\n", this_leaf->size / 1024);
662 static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
665 ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf;
669 const struct cpumask *mask;
671 mask = to_cpumask(this_leaf->shared_cpu_map);
673 cpulist_scnprintf(buf, len-2, mask) :
674 cpumask_scnprintf(buf, len-2, mask);
681 static inline ssize_t show_shared_cpu_map(struct _cpuid4_info *leaf, char *buf)
683 return show_shared_cpu_map_func(leaf, 0, buf);
686 static inline ssize_t show_shared_cpu_list(struct _cpuid4_info *leaf, char *buf)
688 return show_shared_cpu_map_func(leaf, 1, buf);
691 static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf)
693 switch (this_leaf->eax.split.type) {
694 case CACHE_TYPE_DATA:
695 return sprintf(buf, "Data\n");
696 case CACHE_TYPE_INST:
697 return sprintf(buf, "Instruction\n");
698 case CACHE_TYPE_UNIFIED:
699 return sprintf(buf, "Unified\n");
701 return sprintf(buf, "Unknown\n");
705 #define to_object(k) container_of(k, struct _index_kobject, kobj)
706 #define to_attr(a) container_of(a, struct _cache_attr, attr)
708 static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf,
711 int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
712 int node = cpu_to_node(cpu);
713 struct pci_dev *dev = node_to_k8_nb_misc(node);
714 unsigned int reg = 0;
716 if (!this_leaf->can_disable)
722 pci_read_config_dword(dev, 0x1BC + index * 4, ®);
723 return sprintf(buf, "%x\n", reg);
726 #define SHOW_CACHE_DISABLE(index) \
728 show_cache_disable_##index(struct _cpuid4_info *this_leaf, char *buf) \
730 return show_cache_disable(this_leaf, buf, index); \
732 SHOW_CACHE_DISABLE(0)
733 SHOW_CACHE_DISABLE(1)
735 static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
736 const char *buf, size_t count, unsigned int index)
738 int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
739 int node = cpu_to_node(cpu);
740 struct pci_dev *dev = node_to_k8_nb_misc(node);
741 unsigned long val = 0;
742 unsigned int scrubber = 0;
744 if (!this_leaf->can_disable)
747 if (!capable(CAP_SYS_ADMIN))
753 if (strict_strtoul(buf, 10, &val) < 0)
758 pci_read_config_dword(dev, 0x58, &scrubber);
759 scrubber &= ~0x1f000000;
760 pci_write_config_dword(dev, 0x58, scrubber);
762 pci_write_config_dword(dev, 0x1BC + index * 4, val & ~0x40000000);
764 pci_write_config_dword(dev, 0x1BC + index * 4, val);
768 #define STORE_CACHE_DISABLE(index) \
770 store_cache_disable_##index(struct _cpuid4_info *this_leaf, \
771 const char *buf, size_t count) \
773 return store_cache_disable(this_leaf, buf, count, index); \
775 STORE_CACHE_DISABLE(0)
776 STORE_CACHE_DISABLE(1)
779 struct attribute attr;
780 ssize_t (*show)(struct _cpuid4_info *, char *);
781 ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count);
784 #define define_one_ro(_name) \
785 static struct _cache_attr _name = \
786 __ATTR(_name, 0444, show_##_name, NULL)
788 define_one_ro(level);
790 define_one_ro(coherency_line_size);
791 define_one_ro(physical_line_partition);
792 define_one_ro(ways_of_associativity);
793 define_one_ro(number_of_sets);
795 define_one_ro(shared_cpu_map);
796 define_one_ro(shared_cpu_list);
798 static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644,
799 show_cache_disable_0, store_cache_disable_0);
800 static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644,
801 show_cache_disable_1, store_cache_disable_1);
803 static struct attribute * default_attrs[] = {
806 &coherency_line_size.attr,
807 &physical_line_partition.attr,
808 &ways_of_associativity.attr,
809 &number_of_sets.attr,
811 &shared_cpu_map.attr,
812 &shared_cpu_list.attr,
813 &cache_disable_0.attr,
814 &cache_disable_1.attr,
818 static ssize_t show(struct kobject * kobj, struct attribute * attr, char * buf)
820 struct _cache_attr *fattr = to_attr(attr);
821 struct _index_kobject *this_leaf = to_object(kobj);
825 fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
831 static ssize_t store(struct kobject * kobj, struct attribute * attr,
832 const char * buf, size_t count)
834 struct _cache_attr *fattr = to_attr(attr);
835 struct _index_kobject *this_leaf = to_object(kobj);
839 fattr->store(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
845 static struct sysfs_ops sysfs_ops = {
850 static struct kobj_type ktype_cache = {
851 .sysfs_ops = &sysfs_ops,
852 .default_attrs = default_attrs,
855 static struct kobj_type ktype_percpu_entry = {
856 .sysfs_ops = &sysfs_ops,
859 static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu)
861 kfree(per_cpu(cache_kobject, cpu));
862 kfree(per_cpu(index_kobject, cpu));
863 per_cpu(cache_kobject, cpu) = NULL;
864 per_cpu(index_kobject, cpu) = NULL;
865 free_cache_attributes(cpu);
868 static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
872 if (num_cache_leaves == 0)
875 err = detect_cache_attributes(cpu);
879 /* Allocate all required memory */
880 per_cpu(cache_kobject, cpu) =
881 kzalloc(sizeof(struct kobject), GFP_KERNEL);
882 if (unlikely(per_cpu(cache_kobject, cpu) == NULL))
885 per_cpu(index_kobject, cpu) = kzalloc(
886 sizeof(struct _index_kobject ) * num_cache_leaves, GFP_KERNEL);
887 if (unlikely(per_cpu(index_kobject, cpu) == NULL))
893 cpuid4_cache_sysfs_exit(cpu);
897 static DECLARE_BITMAP(cache_dev_map, NR_CPUS);
899 /* Add/Remove cache interface for CPU device */
900 static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
902 unsigned int cpu = sys_dev->id;
904 struct _index_kobject *this_object;
907 retval = cpuid4_cache_sysfs_init(cpu);
908 if (unlikely(retval < 0))
911 retval = kobject_init_and_add(per_cpu(cache_kobject, cpu),
913 &sys_dev->kobj, "%s", "cache");
915 cpuid4_cache_sysfs_exit(cpu);
919 for (i = 0; i < num_cache_leaves; i++) {
920 this_object = INDEX_KOBJECT_PTR(cpu,i);
921 this_object->cpu = cpu;
922 this_object->index = i;
923 retval = kobject_init_and_add(&(this_object->kobj),
925 per_cpu(cache_kobject, cpu),
927 if (unlikely(retval)) {
928 for (j = 0; j < i; j++) {
929 kobject_put(&(INDEX_KOBJECT_PTR(cpu,j)->kobj));
931 kobject_put(per_cpu(cache_kobject, cpu));
932 cpuid4_cache_sysfs_exit(cpu);
935 kobject_uevent(&(this_object->kobj), KOBJ_ADD);
937 cpumask_set_cpu(cpu, to_cpumask(cache_dev_map));
939 kobject_uevent(per_cpu(cache_kobject, cpu), KOBJ_ADD);
943 static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
945 unsigned int cpu = sys_dev->id;
948 if (per_cpu(cpuid4_info, cpu) == NULL)
950 if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map)))
952 cpumask_clear_cpu(cpu, to_cpumask(cache_dev_map));
954 for (i = 0; i < num_cache_leaves; i++)
955 kobject_put(&(INDEX_KOBJECT_PTR(cpu,i)->kobj));
956 kobject_put(per_cpu(cache_kobject, cpu));
957 cpuid4_cache_sysfs_exit(cpu);
960 static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
961 unsigned long action, void *hcpu)
963 unsigned int cpu = (unsigned long)hcpu;
964 struct sys_device *sys_dev;
966 sys_dev = get_cpu_sysdev(cpu);
969 case CPU_ONLINE_FROZEN:
970 cache_add_dev(sys_dev);
973 case CPU_DEAD_FROZEN:
974 cache_remove_dev(sys_dev);
980 static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier =
982 .notifier_call = cacheinfo_cpu_callback,
985 static int __cpuinit cache_sysfs_init(void)
989 if (num_cache_leaves == 0)
992 for_each_online_cpu(i) {
994 struct sys_device *sys_dev = get_cpu_sysdev(i);
996 err = cache_add_dev(sys_dev);
1000 register_hotcpu_notifier(&cacheinfo_cpu_notifier);
1004 device_initcall(cache_sysfs_init);