2 * Routines to indentify caches on Intel CPU.
5 * Venkatesh Pallipadi : Adding cache identification through cpuid(4)
6 * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
7 * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD.
10 #include <linux/init.h>
11 #include <linux/slab.h>
12 #include <linux/device.h>
13 #include <linux/compiler.h>
14 #include <linux/cpu.h>
15 #include <linux/sched.h>
17 #include <asm/processor.h>
28 unsigned char descriptor;
33 /* all the cache descriptor types we care about (no TLB or trace cache entries) */
34 static struct _cache_table cache_table[] __cpuinitdata =
36 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
37 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
38 { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */
39 { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */
40 { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
41 { 0x23, LVL_3, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */
42 { 0x25, LVL_3, 2048 }, /* 8-way set assoc, sectored cache, 64 byte line size */
43 { 0x29, LVL_3, 4096 }, /* 8-way set assoc, sectored cache, 64 byte line size */
44 { 0x2c, LVL_1_DATA, 32 }, /* 8-way set assoc, 64 byte line size */
45 { 0x30, LVL_1_INST, 32 }, /* 8-way set assoc, 64 byte line size */
46 { 0x39, LVL_2, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */
47 { 0x3a, LVL_2, 192 }, /* 6-way set assoc, sectored cache, 64 byte line size */
48 { 0x3b, LVL_2, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */
49 { 0x3c, LVL_2, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */
50 { 0x3d, LVL_2, 384 }, /* 6-way set assoc, sectored cache, 64 byte line size */
51 { 0x3e, LVL_2, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
52 { 0x3f, LVL_2, 256 }, /* 2-way set assoc, 64 byte line size */
53 { 0x41, LVL_2, 128 }, /* 4-way set assoc, 32 byte line size */
54 { 0x42, LVL_2, 256 }, /* 4-way set assoc, 32 byte line size */
55 { 0x43, LVL_2, 512 }, /* 4-way set assoc, 32 byte line size */
56 { 0x44, LVL_2, 1024 }, /* 4-way set assoc, 32 byte line size */
57 { 0x45, LVL_2, 2048 }, /* 4-way set assoc, 32 byte line size */
58 { 0x46, LVL_3, 4096 }, /* 4-way set assoc, 64 byte line size */
59 { 0x47, LVL_3, 8192 }, /* 8-way set assoc, 64 byte line size */
60 { 0x49, LVL_3, 4096 }, /* 16-way set assoc, 64 byte line size */
61 { 0x4a, LVL_3, 6144 }, /* 12-way set assoc, 64 byte line size */
62 { 0x4b, LVL_3, 8192 }, /* 16-way set assoc, 64 byte line size */
63 { 0x4c, LVL_3, 12288 }, /* 12-way set assoc, 64 byte line size */
64 { 0x4d, LVL_3, 16384 }, /* 16-way set assoc, 64 byte line size */
65 { 0x4e, LVL_2, 6144 }, /* 24-way set assoc, 64 byte line size */
66 { 0x60, LVL_1_DATA, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */
67 { 0x66, LVL_1_DATA, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */
68 { 0x67, LVL_1_DATA, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */
69 { 0x68, LVL_1_DATA, 32 }, /* 4-way set assoc, sectored cache, 64 byte line size */
70 { 0x70, LVL_TRACE, 12 }, /* 8-way set assoc */
71 { 0x71, LVL_TRACE, 16 }, /* 8-way set assoc */
72 { 0x72, LVL_TRACE, 32 }, /* 8-way set assoc */
73 { 0x73, LVL_TRACE, 64 }, /* 8-way set assoc */
74 { 0x78, LVL_2, 1024 }, /* 4-way set assoc, 64 byte line size */
75 { 0x79, LVL_2, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */
76 { 0x7a, LVL_2, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */
77 { 0x7b, LVL_2, 512 }, /* 8-way set assoc, sectored cache, 64 byte line size */
78 { 0x7c, LVL_2, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */
79 { 0x7d, LVL_2, 2048 }, /* 8-way set assoc, 64 byte line size */
80 { 0x7f, LVL_2, 512 }, /* 2-way set assoc, 64 byte line size */
81 { 0x82, LVL_2, 256 }, /* 8-way set assoc, 32 byte line size */
82 { 0x83, LVL_2, 512 }, /* 8-way set assoc, 32 byte line size */
83 { 0x84, LVL_2, 1024 }, /* 8-way set assoc, 32 byte line size */
84 { 0x85, LVL_2, 2048 }, /* 8-way set assoc, 32 byte line size */
85 { 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */
86 { 0x87, LVL_2, 1024 }, /* 8-way set assoc, 64 byte line size */
96 CACHE_TYPE_UNIFIED = 3
99 union _cpuid4_leaf_eax {
101 enum _cache_type type:5;
102 unsigned int level:3;
103 unsigned int is_self_initializing:1;
104 unsigned int is_fully_associative:1;
105 unsigned int reserved:4;
106 unsigned int num_threads_sharing:12;
107 unsigned int num_cores_on_die:6;
112 union _cpuid4_leaf_ebx {
114 unsigned int coherency_line_size:12;
115 unsigned int physical_line_partition:10;
116 unsigned int ways_of_associativity:10;
121 union _cpuid4_leaf_ecx {
123 unsigned int number_of_sets:32;
128 struct _cpuid4_info {
129 union _cpuid4_leaf_eax eax;
130 union _cpuid4_leaf_ebx ebx;
131 union _cpuid4_leaf_ecx ecx;
133 cpumask_t shared_cpu_map; /* future?: only cpus/node is needed */
136 unsigned short num_cache_leaves;
138 /* AMD doesn't have CPUID4. Emulate it here to report the same
139 information to the user. This makes some assumptions about the machine:
140 L2 not shared, no SMT etc. that is currently true on AMD CPUs.
142 In theory the TLBs could be reported as fake type (they are in "dummy").
146 unsigned line_size : 8;
147 unsigned lines_per_tag : 8;
149 unsigned size_in_kb : 8;
156 unsigned line_size : 8;
157 unsigned lines_per_tag : 4;
159 unsigned size_in_kb : 16;
166 unsigned line_size : 8;
167 unsigned lines_per_tag : 4;
170 unsigned size_encoded : 14;
175 static unsigned short assocs[] __cpuinitdata = {
176 [1] = 1, [2] = 2, [4] = 4, [6] = 8,
177 [8] = 16, [0xa] = 32, [0xb] = 48,
182 static unsigned char levels[] __cpuinitdata = { 1, 1, 2, 3 };
183 static unsigned char types[] __cpuinitdata = { 1, 2, 3, 3 };
185 static void __cpuinit amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
186 union _cpuid4_leaf_ebx *ebx,
187 union _cpuid4_leaf_ecx *ecx)
190 unsigned line_size, lines_per_tag, assoc, size_in_kb;
191 union l1_cache l1i, l1d;
194 union l1_cache *l1 = &l1d;
200 cpuid(0x80000005, &dummy, &dummy, &l1d.val, &l1i.val);
201 cpuid(0x80000006, &dummy, &dummy, &l2.val, &l3.val);
210 line_size = l1->line_size;
211 lines_per_tag = l1->lines_per_tag;
212 size_in_kb = l1->size_in_kb;
218 line_size = l2.line_size;
219 lines_per_tag = l2.lines_per_tag;
220 /* cpu_data has errata corrections for K7 applied */
221 size_in_kb = current_cpu_data.x86_cache_size;
227 line_size = l3.line_size;
228 lines_per_tag = l3.lines_per_tag;
229 size_in_kb = l3.size_encoded * 512;
235 eax->split.is_self_initializing = 1;
236 eax->split.type = types[leaf];
237 eax->split.level = levels[leaf];
239 eax->split.num_threads_sharing = current_cpu_data.x86_max_cores - 1;
241 eax->split.num_threads_sharing = 0;
242 eax->split.num_cores_on_die = current_cpu_data.x86_max_cores - 1;
246 eax->split.is_fully_associative = 1;
247 ebx->split.coherency_line_size = line_size - 1;
248 ebx->split.ways_of_associativity = assocs[assoc] - 1;
249 ebx->split.physical_line_partition = lines_per_tag - 1;
250 ecx->split.number_of_sets = (size_in_kb * 1024) / line_size /
251 (ebx->split.ways_of_associativity + 1) - 1;
254 static int __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
256 union _cpuid4_leaf_eax eax;
257 union _cpuid4_leaf_ebx ebx;
258 union _cpuid4_leaf_ecx ecx;
261 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
262 amd_cpuid4(index, &eax, &ebx, &ecx);
264 cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
265 if (eax.split.type == CACHE_TYPE_NULL)
266 return -EIO; /* better error ? */
268 this_leaf->eax = eax;
269 this_leaf->ebx = ebx;
270 this_leaf->ecx = ecx;
271 this_leaf->size = (ecx.split.number_of_sets + 1) *
272 (ebx.split.coherency_line_size + 1) *
273 (ebx.split.physical_line_partition + 1) *
274 (ebx.split.ways_of_associativity + 1);
278 static int __cpuinit find_num_cache_leaves(void)
280 unsigned int eax, ebx, ecx, edx;
281 union _cpuid4_leaf_eax cache_eax;
286 /* Do cpuid(4) loop to find out num_cache_leaves */
287 cpuid_count(4, i, &eax, &ebx, &ecx, &edx);
288 cache_eax.full = eax;
289 } while (cache_eax.split.type != CACHE_TYPE_NULL);
293 unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
295 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */
296 unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
297 unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
298 unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
300 unsigned int cpu = c->cpu_index;
303 if (c->cpuid_level > 3) {
304 static int is_initialized;
306 if (is_initialized == 0) {
307 /* Init num_cache_leaves from boot CPU */
308 num_cache_leaves = find_num_cache_leaves();
313 * Whenever possible use cpuid(4), deterministic cache
314 * parameters cpuid leaf to find the cache details
316 for (i = 0; i < num_cache_leaves; i++) {
317 struct _cpuid4_info this_leaf;
321 retval = cpuid4_cache_lookup(i, &this_leaf);
323 switch(this_leaf.eax.split.level) {
325 if (this_leaf.eax.split.type ==
327 new_l1d = this_leaf.size/1024;
328 else if (this_leaf.eax.split.type ==
330 new_l1i = this_leaf.size/1024;
333 new_l2 = this_leaf.size/1024;
334 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
335 index_msb = get_count_order(num_threads_sharing);
336 l2_id = c->apicid >> index_msb;
339 new_l3 = this_leaf.size/1024;
340 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
341 index_msb = get_count_order(num_threads_sharing);
342 l3_id = c->apicid >> index_msb;
351 * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
354 if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) {
355 /* supports eax=2 call */
357 unsigned int regs[4];
358 unsigned char *dp = (unsigned char *)regs;
361 if (num_cache_leaves != 0 && c->x86 == 15)
364 /* Number of times to iterate */
365 n = cpuid_eax(2) & 0xFF;
367 for ( i = 0 ; i < n ; i++ ) {
368 cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]);
370 /* If bit 31 is set, this is an unknown format */
371 for ( j = 0 ; j < 3 ; j++ ) {
372 if (regs[j] & (1 << 31)) regs[j] = 0;
375 /* Byte 0 is level count, not a descriptor */
376 for ( j = 1 ; j < 16 ; j++ ) {
377 unsigned char des = dp[j];
380 /* look up this descriptor in the table */
381 while (cache_table[k].descriptor != 0)
383 if (cache_table[k].descriptor == des) {
384 if (only_trace && cache_table[k].cache_type != LVL_TRACE)
386 switch (cache_table[k].cache_type) {
388 l1i += cache_table[k].size;
391 l1d += cache_table[k].size;
394 l2 += cache_table[k].size;
397 l3 += cache_table[k].size;
400 trace += cache_table[k].size;
422 per_cpu(cpu_llc_id, cpu) = l2_id;
429 per_cpu(cpu_llc_id, cpu) = l3_id;
434 printk (KERN_INFO "CPU: Trace cache: %dK uops", trace);
436 printk (KERN_INFO "CPU: L1 I cache: %dK", l1i);
439 printk(", L1 D cache: %dK\n", l1d);
444 printk(KERN_INFO "CPU: L2 cache: %dK\n", l2);
447 printk(KERN_INFO "CPU: L3 cache: %dK\n", l3);
449 c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
454 /* pointer to _cpuid4_info array (for each cache leaf) */
455 static DEFINE_PER_CPU(struct _cpuid4_info *, cpuid4_info);
456 #define CPUID4_INFO_IDX(x, y) (&((per_cpu(cpuid4_info, x))[y]))
459 static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
461 struct _cpuid4_info *this_leaf, *sibling_leaf;
462 unsigned long num_threads_sharing;
464 struct cpuinfo_x86 *c = &cpu_data(cpu);
466 this_leaf = CPUID4_INFO_IDX(cpu, index);
467 num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing;
469 if (num_threads_sharing == 1)
470 cpu_set(cpu, this_leaf->shared_cpu_map);
472 index_msb = get_count_order(num_threads_sharing);
474 for_each_online_cpu(i) {
475 if (cpu_data(i).apicid >> index_msb ==
476 c->apicid >> index_msb) {
477 cpu_set(i, this_leaf->shared_cpu_map);
478 if (i != cpu && per_cpu(cpuid4_info, i)) {
479 sibling_leaf = CPUID4_INFO_IDX(i, index);
480 cpu_set(cpu, sibling_leaf->shared_cpu_map);
486 static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
488 struct _cpuid4_info *this_leaf, *sibling_leaf;
491 this_leaf = CPUID4_INFO_IDX(cpu, index);
492 for_each_cpu_mask(sibling, this_leaf->shared_cpu_map) {
493 sibling_leaf = CPUID4_INFO_IDX(sibling, index);
494 cpu_clear(cpu, sibling_leaf->shared_cpu_map);
498 static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) {}
499 static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) {}
502 static void __cpuinit free_cache_attributes(unsigned int cpu)
506 for (i = 0; i < num_cache_leaves; i++)
507 cache_remove_shared_cpu_map(cpu, i);
509 kfree(per_cpu(cpuid4_info, cpu));
510 per_cpu(cpuid4_info, cpu) = NULL;
513 static int __cpuinit detect_cache_attributes(unsigned int cpu)
515 struct _cpuid4_info *this_leaf;
520 if (num_cache_leaves == 0)
523 per_cpu(cpuid4_info, cpu) = kzalloc(
524 sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
525 if (per_cpu(cpuid4_info, cpu) == NULL)
528 oldmask = current->cpus_allowed;
529 retval = set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
533 /* Do cpuid and store the results */
534 for (j = 0; j < num_cache_leaves; j++) {
535 this_leaf = CPUID4_INFO_IDX(cpu, j);
536 retval = cpuid4_cache_lookup(j, this_leaf);
537 if (unlikely(retval < 0)) {
540 for (i = 0; i < j; i++)
541 cache_remove_shared_cpu_map(cpu, i);
544 cache_shared_cpu_map_setup(cpu, j);
546 set_cpus_allowed_ptr(current, &oldmask);
550 kfree(per_cpu(cpuid4_info, cpu));
551 per_cpu(cpuid4_info, cpu) = NULL;
559 #include <linux/kobject.h>
560 #include <linux/sysfs.h>
562 extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c */
564 /* pointer to kobject for cpuX/cache */
565 static DEFINE_PER_CPU(struct kobject *, cache_kobject);
567 struct _index_kobject {
570 unsigned short index;
573 /* pointer to array of kobjects for cpuX/cache/indexY */
574 static DEFINE_PER_CPU(struct _index_kobject *, index_kobject);
575 #define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(index_kobject, x))[y]))
577 #define show_one_plus(file_name, object, val) \
578 static ssize_t show_##file_name \
579 (struct _cpuid4_info *this_leaf, char *buf) \
581 return sprintf (buf, "%lu\n", (unsigned long)this_leaf->object + val); \
584 show_one_plus(level, eax.split.level, 0);
585 show_one_plus(coherency_line_size, ebx.split.coherency_line_size, 1);
586 show_one_plus(physical_line_partition, ebx.split.physical_line_partition, 1);
587 show_one_plus(ways_of_associativity, ebx.split.ways_of_associativity, 1);
588 show_one_plus(number_of_sets, ecx.split.number_of_sets, 1);
590 static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf)
592 return sprintf (buf, "%luK\n", this_leaf->size / 1024);
595 static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
598 ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf;
602 cpumask_t *mask = &this_leaf->shared_cpu_map;
605 cpulist_scnprintf(buf, len-2, *mask):
606 cpumask_scnprintf(buf, len-2, *mask);
613 static inline ssize_t show_shared_cpu_map(struct _cpuid4_info *leaf, char *buf)
615 return show_shared_cpu_map_func(leaf, 0, buf);
618 static inline ssize_t show_shared_cpu_list(struct _cpuid4_info *leaf, char *buf)
620 return show_shared_cpu_map_func(leaf, 1, buf);
623 static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf) {
624 switch(this_leaf->eax.split.type) {
625 case CACHE_TYPE_DATA:
626 return sprintf(buf, "Data\n");
628 case CACHE_TYPE_INST:
629 return sprintf(buf, "Instruction\n");
631 case CACHE_TYPE_UNIFIED:
632 return sprintf(buf, "Unified\n");
635 return sprintf(buf, "Unknown\n");
641 struct attribute attr;
642 ssize_t (*show)(struct _cpuid4_info *, char *);
643 ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count);
646 #define define_one_ro(_name) \
647 static struct _cache_attr _name = \
648 __ATTR(_name, 0444, show_##_name, NULL)
650 define_one_ro(level);
652 define_one_ro(coherency_line_size);
653 define_one_ro(physical_line_partition);
654 define_one_ro(ways_of_associativity);
655 define_one_ro(number_of_sets);
657 define_one_ro(shared_cpu_map);
658 define_one_ro(shared_cpu_list);
660 static struct attribute * default_attrs[] = {
663 &coherency_line_size.attr,
664 &physical_line_partition.attr,
665 &ways_of_associativity.attr,
666 &number_of_sets.attr,
668 &shared_cpu_map.attr,
669 &shared_cpu_list.attr,
673 #define to_object(k) container_of(k, struct _index_kobject, kobj)
674 #define to_attr(a) container_of(a, struct _cache_attr, attr)
676 static ssize_t show(struct kobject * kobj, struct attribute * attr, char * buf)
678 struct _cache_attr *fattr = to_attr(attr);
679 struct _index_kobject *this_leaf = to_object(kobj);
683 fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
689 static ssize_t store(struct kobject * kobj, struct attribute * attr,
690 const char * buf, size_t count)
695 static struct sysfs_ops sysfs_ops = {
700 static struct kobj_type ktype_cache = {
701 .sysfs_ops = &sysfs_ops,
702 .default_attrs = default_attrs,
705 static struct kobj_type ktype_percpu_entry = {
706 .sysfs_ops = &sysfs_ops,
709 static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu)
711 kfree(per_cpu(cache_kobject, cpu));
712 kfree(per_cpu(index_kobject, cpu));
713 per_cpu(cache_kobject, cpu) = NULL;
714 per_cpu(index_kobject, cpu) = NULL;
715 free_cache_attributes(cpu);
718 static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
722 if (num_cache_leaves == 0)
725 err = detect_cache_attributes(cpu);
729 /* Allocate all required memory */
730 per_cpu(cache_kobject, cpu) =
731 kzalloc(sizeof(struct kobject), GFP_KERNEL);
732 if (unlikely(per_cpu(cache_kobject, cpu) == NULL))
735 per_cpu(index_kobject, cpu) = kzalloc(
736 sizeof(struct _index_kobject ) * num_cache_leaves, GFP_KERNEL);
737 if (unlikely(per_cpu(index_kobject, cpu) == NULL))
743 cpuid4_cache_sysfs_exit(cpu);
747 static cpumask_t cache_dev_map = CPU_MASK_NONE;
749 /* Add/Remove cache interface for CPU device */
750 static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
752 unsigned int cpu = sys_dev->id;
754 struct _index_kobject *this_object;
757 retval = cpuid4_cache_sysfs_init(cpu);
758 if (unlikely(retval < 0))
761 retval = kobject_init_and_add(per_cpu(cache_kobject, cpu),
763 &sys_dev->kobj, "%s", "cache");
765 cpuid4_cache_sysfs_exit(cpu);
769 for (i = 0; i < num_cache_leaves; i++) {
770 this_object = INDEX_KOBJECT_PTR(cpu,i);
771 this_object->cpu = cpu;
772 this_object->index = i;
773 retval = kobject_init_and_add(&(this_object->kobj),
775 per_cpu(cache_kobject, cpu),
777 if (unlikely(retval)) {
778 for (j = 0; j < i; j++) {
779 kobject_put(&(INDEX_KOBJECT_PTR(cpu,j)->kobj));
781 kobject_put(per_cpu(cache_kobject, cpu));
782 cpuid4_cache_sysfs_exit(cpu);
785 kobject_uevent(&(this_object->kobj), KOBJ_ADD);
788 cpu_set(cpu, cache_dev_map);
790 kobject_uevent(per_cpu(cache_kobject, cpu), KOBJ_ADD);
794 static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
796 unsigned int cpu = sys_dev->id;
799 if (per_cpu(cpuid4_info, cpu) == NULL)
801 if (!cpu_isset(cpu, cache_dev_map))
803 cpu_clear(cpu, cache_dev_map);
805 for (i = 0; i < num_cache_leaves; i++)
806 kobject_put(&(INDEX_KOBJECT_PTR(cpu,i)->kobj));
807 kobject_put(per_cpu(cache_kobject, cpu));
808 cpuid4_cache_sysfs_exit(cpu);
811 static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
812 unsigned long action, void *hcpu)
814 unsigned int cpu = (unsigned long)hcpu;
815 struct sys_device *sys_dev;
817 sys_dev = get_cpu_sysdev(cpu);
820 case CPU_ONLINE_FROZEN:
821 cache_add_dev(sys_dev);
824 case CPU_DEAD_FROZEN:
825 cache_remove_dev(sys_dev);
831 static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier =
833 .notifier_call = cacheinfo_cpu_callback,
836 static int __cpuinit cache_sysfs_init(void)
840 if (num_cache_leaves == 0)
843 for_each_online_cpu(i) {
845 struct sys_device *sys_dev = get_cpu_sysdev(i);
847 err = cache_add_dev(sys_dev);
851 register_hotcpu_notifier(&cacheinfo_cpu_notifier);
855 device_initcall(cache_sysfs_init);