2 * Routines to indentify caches on Intel CPU.
5 * Venkatesh Pallipadi : Adding cache identification through cpuid(4)
6 * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
7 * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD.
10 #include <linux/init.h>
11 #include <linux/slab.h>
12 #include <linux/device.h>
13 #include <linux/compiler.h>
14 #include <linux/cpu.h>
15 #include <linux/sched.h>
17 #include <asm/processor.h>
28 unsigned char descriptor;
33 /* all the cache descriptor types we care about (no TLB or trace cache entries) */
34 static struct _cache_table cache_table[] __cpuinitdata =
36 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
37 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
38 { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */
39 { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */
40 { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
41 { 0x23, LVL_3, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */
42 { 0x25, LVL_3, 2048 }, /* 8-way set assoc, sectored cache, 64 byte line size */
43 { 0x29, LVL_3, 4096 }, /* 8-way set assoc, sectored cache, 64 byte line size */
44 { 0x2c, LVL_1_DATA, 32 }, /* 8-way set assoc, 64 byte line size */
45 { 0x30, LVL_1_INST, 32 }, /* 8-way set assoc, 64 byte line size */
46 { 0x39, LVL_2, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */
47 { 0x3a, LVL_2, 192 }, /* 6-way set assoc, sectored cache, 64 byte line size */
48 { 0x3b, LVL_2, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */
49 { 0x3c, LVL_2, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */
50 { 0x3d, LVL_2, 384 }, /* 6-way set assoc, sectored cache, 64 byte line size */
51 { 0x3e, LVL_2, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
52 { 0x3f, LVL_2, 256 }, /* 2-way set assoc, 64 byte line size */
53 { 0x41, LVL_2, 128 }, /* 4-way set assoc, 32 byte line size */
54 { 0x42, LVL_2, 256 }, /* 4-way set assoc, 32 byte line size */
55 { 0x43, LVL_2, 512 }, /* 4-way set assoc, 32 byte line size */
56 { 0x44, LVL_2, 1024 }, /* 4-way set assoc, 32 byte line size */
57 { 0x45, LVL_2, 2048 }, /* 4-way set assoc, 32 byte line size */
58 { 0x46, LVL_3, 4096 }, /* 4-way set assoc, 64 byte line size */
59 { 0x47, LVL_3, 8192 }, /* 8-way set assoc, 64 byte line size */
60 { 0x49, LVL_3, 4096 }, /* 16-way set assoc, 64 byte line size */
61 { 0x4a, LVL_3, 6144 }, /* 12-way set assoc, 64 byte line size */
62 { 0x4b, LVL_3, 8192 }, /* 16-way set assoc, 64 byte line size */
63 { 0x4c, LVL_3, 12288 }, /* 12-way set assoc, 64 byte line size */
64 { 0x4d, LVL_3, 16384 }, /* 16-way set assoc, 64 byte line size */
65 { 0x60, LVL_1_DATA, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */
66 { 0x66, LVL_1_DATA, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */
67 { 0x67, LVL_1_DATA, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */
68 { 0x68, LVL_1_DATA, 32 }, /* 4-way set assoc, sectored cache, 64 byte line size */
69 { 0x70, LVL_TRACE, 12 }, /* 8-way set assoc */
70 { 0x71, LVL_TRACE, 16 }, /* 8-way set assoc */
71 { 0x72, LVL_TRACE, 32 }, /* 8-way set assoc */
72 { 0x73, LVL_TRACE, 64 }, /* 8-way set assoc */
73 { 0x78, LVL_2, 1024 }, /* 4-way set assoc, 64 byte line size */
74 { 0x79, LVL_2, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */
75 { 0x7a, LVL_2, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */
76 { 0x7b, LVL_2, 512 }, /* 8-way set assoc, sectored cache, 64 byte line size */
77 { 0x7c, LVL_2, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */
78 { 0x7d, LVL_2, 2048 }, /* 8-way set assoc, 64 byte line size */
79 { 0x7f, LVL_2, 512 }, /* 2-way set assoc, 64 byte line size */
80 { 0x82, LVL_2, 256 }, /* 8-way set assoc, 32 byte line size */
81 { 0x83, LVL_2, 512 }, /* 8-way set assoc, 32 byte line size */
82 { 0x84, LVL_2, 1024 }, /* 8-way set assoc, 32 byte line size */
83 { 0x85, LVL_2, 2048 }, /* 8-way set assoc, 32 byte line size */
84 { 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */
85 { 0x87, LVL_2, 1024 }, /* 8-way set assoc, 64 byte line size */
95 CACHE_TYPE_UNIFIED = 3
98 union _cpuid4_leaf_eax {
100 enum _cache_type type:5;
101 unsigned int level:3;
102 unsigned int is_self_initializing:1;
103 unsigned int is_fully_associative:1;
104 unsigned int reserved:4;
105 unsigned int num_threads_sharing:12;
106 unsigned int num_cores_on_die:6;
111 union _cpuid4_leaf_ebx {
113 unsigned int coherency_line_size:12;
114 unsigned int physical_line_partition:10;
115 unsigned int ways_of_associativity:10;
120 union _cpuid4_leaf_ecx {
122 unsigned int number_of_sets:32;
127 struct _cpuid4_info {
128 union _cpuid4_leaf_eax eax;
129 union _cpuid4_leaf_ebx ebx;
130 union _cpuid4_leaf_ecx ecx;
132 cpumask_t shared_cpu_map; /* future?: only cpus/node is needed */
135 unsigned short num_cache_leaves;
137 /* AMD doesn't have CPUID4. Emulate it here to report the same
138 information to the user. This makes some assumptions about the machine:
139 L2 not shared, no SMT etc. that is currently true on AMD CPUs.
141 In theory the TLBs could be reported as fake type (they are in "dummy").
145 unsigned line_size : 8;
146 unsigned lines_per_tag : 8;
148 unsigned size_in_kb : 8;
155 unsigned line_size : 8;
156 unsigned lines_per_tag : 4;
158 unsigned size_in_kb : 16;
165 unsigned line_size : 8;
166 unsigned lines_per_tag : 4;
169 unsigned size_encoded : 14;
174 static unsigned short assocs[] __cpuinitdata = {
175 [1] = 1, [2] = 2, [4] = 4, [6] = 8,
176 [8] = 16, [0xa] = 32, [0xb] = 48,
181 static unsigned char levels[] __cpuinitdata = { 1, 1, 2, 3 };
182 static unsigned char types[] __cpuinitdata = { 1, 2, 3, 3 };
184 static void __cpuinit amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
185 union _cpuid4_leaf_ebx *ebx,
186 union _cpuid4_leaf_ecx *ecx)
189 unsigned line_size, lines_per_tag, assoc, size_in_kb;
190 union l1_cache l1i, l1d;
193 union l1_cache *l1 = &l1d;
199 cpuid(0x80000005, &dummy, &dummy, &l1d.val, &l1i.val);
200 cpuid(0x80000006, &dummy, &dummy, &l2.val, &l3.val);
209 line_size = l1->line_size;
210 lines_per_tag = l1->lines_per_tag;
211 size_in_kb = l1->size_in_kb;
217 line_size = l2.line_size;
218 lines_per_tag = l2.lines_per_tag;
219 /* cpu_data has errata corrections for K7 applied */
220 size_in_kb = current_cpu_data.x86_cache_size;
226 line_size = l3.line_size;
227 lines_per_tag = l3.lines_per_tag;
228 size_in_kb = l3.size_encoded * 512;
234 eax->split.is_self_initializing = 1;
235 eax->split.type = types[leaf];
236 eax->split.level = levels[leaf];
238 eax->split.num_threads_sharing = current_cpu_data.x86_max_cores - 1;
240 eax->split.num_threads_sharing = 0;
241 eax->split.num_cores_on_die = current_cpu_data.x86_max_cores - 1;
245 eax->split.is_fully_associative = 1;
246 ebx->split.coherency_line_size = line_size - 1;
247 ebx->split.ways_of_associativity = assocs[assoc] - 1;
248 ebx->split.physical_line_partition = lines_per_tag - 1;
249 ecx->split.number_of_sets = (size_in_kb * 1024) / line_size /
250 (ebx->split.ways_of_associativity + 1) - 1;
253 static int __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
255 union _cpuid4_leaf_eax eax;
256 union _cpuid4_leaf_ebx ebx;
257 union _cpuid4_leaf_ecx ecx;
260 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
261 amd_cpuid4(index, &eax, &ebx, &ecx);
263 cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
264 if (eax.split.type == CACHE_TYPE_NULL)
265 return -EIO; /* better error ? */
267 this_leaf->eax = eax;
268 this_leaf->ebx = ebx;
269 this_leaf->ecx = ecx;
270 this_leaf->size = (ecx.split.number_of_sets + 1) *
271 (ebx.split.coherency_line_size + 1) *
272 (ebx.split.physical_line_partition + 1) *
273 (ebx.split.ways_of_associativity + 1);
277 static int __cpuinit find_num_cache_leaves(void)
279 unsigned int eax, ebx, ecx, edx;
280 union _cpuid4_leaf_eax cache_eax;
285 /* Do cpuid(4) loop to find out num_cache_leaves */
286 cpuid_count(4, i, &eax, &ebx, &ecx, &edx);
287 cache_eax.full = eax;
288 } while (cache_eax.split.type != CACHE_TYPE_NULL);
292 unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
294 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */
295 unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
296 unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
297 unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
299 unsigned int cpu = c->cpu_index;
302 if (c->cpuid_level > 3) {
303 static int is_initialized;
305 if (is_initialized == 0) {
306 /* Init num_cache_leaves from boot CPU */
307 num_cache_leaves = find_num_cache_leaves();
312 * Whenever possible use cpuid(4), deterministic cache
313 * parameters cpuid leaf to find the cache details
315 for (i = 0; i < num_cache_leaves; i++) {
316 struct _cpuid4_info this_leaf;
320 retval = cpuid4_cache_lookup(i, &this_leaf);
322 switch(this_leaf.eax.split.level) {
324 if (this_leaf.eax.split.type ==
326 new_l1d = this_leaf.size/1024;
327 else if (this_leaf.eax.split.type ==
329 new_l1i = this_leaf.size/1024;
332 new_l2 = this_leaf.size/1024;
333 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
334 index_msb = get_count_order(num_threads_sharing);
335 l2_id = c->apicid >> index_msb;
338 new_l3 = this_leaf.size/1024;
339 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
340 index_msb = get_count_order(num_threads_sharing);
341 l3_id = c->apicid >> index_msb;
350 * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
353 if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) {
354 /* supports eax=2 call */
356 unsigned int regs[4];
357 unsigned char *dp = (unsigned char *)regs;
360 if (num_cache_leaves != 0 && c->x86 == 15)
363 /* Number of times to iterate */
364 n = cpuid_eax(2) & 0xFF;
366 for ( i = 0 ; i < n ; i++ ) {
367 cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]);
369 /* If bit 31 is set, this is an unknown format */
370 for ( j = 0 ; j < 3 ; j++ ) {
371 if (regs[j] & (1 << 31)) regs[j] = 0;
374 /* Byte 0 is level count, not a descriptor */
375 for ( j = 1 ; j < 16 ; j++ ) {
376 unsigned char des = dp[j];
379 /* look up this descriptor in the table */
380 while (cache_table[k].descriptor != 0)
382 if (cache_table[k].descriptor == des) {
383 if (only_trace && cache_table[k].cache_type != LVL_TRACE)
385 switch (cache_table[k].cache_type) {
387 l1i += cache_table[k].size;
390 l1d += cache_table[k].size;
393 l2 += cache_table[k].size;
396 l3 += cache_table[k].size;
399 trace += cache_table[k].size;
421 per_cpu(cpu_llc_id, cpu) = l2_id;
428 per_cpu(cpu_llc_id, cpu) = l3_id;
433 printk (KERN_INFO "CPU: Trace cache: %dK uops", trace);
435 printk (KERN_INFO "CPU: L1 I cache: %dK", l1i);
438 printk(", L1 D cache: %dK\n", l1d);
443 printk(KERN_INFO "CPU: L2 cache: %dK\n", l2);
446 printk(KERN_INFO "CPU: L3 cache: %dK\n", l3);
448 c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
453 /* pointer to _cpuid4_info array (for each cache leaf) */
454 static DEFINE_PER_CPU(struct _cpuid4_info *, cpuid4_info);
455 #define CPUID4_INFO_IDX(x, y) (&((per_cpu(cpuid4_info, x))[y]))
458 static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
460 struct _cpuid4_info *this_leaf, *sibling_leaf;
461 unsigned long num_threads_sharing;
463 struct cpuinfo_x86 *c = &cpu_data(cpu);
465 this_leaf = CPUID4_INFO_IDX(cpu, index);
466 num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing;
468 if (num_threads_sharing == 1)
469 cpu_set(cpu, this_leaf->shared_cpu_map);
471 index_msb = get_count_order(num_threads_sharing);
473 for_each_online_cpu(i) {
474 if (cpu_data(i).apicid >> index_msb ==
475 c->apicid >> index_msb) {
476 cpu_set(i, this_leaf->shared_cpu_map);
477 if (i != cpu && per_cpu(cpuid4_info, i)) {
478 sibling_leaf = CPUID4_INFO_IDX(i, index);
479 cpu_set(cpu, sibling_leaf->shared_cpu_map);
485 static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
487 struct _cpuid4_info *this_leaf, *sibling_leaf;
490 this_leaf = CPUID4_INFO_IDX(cpu, index);
491 for_each_cpu_mask(sibling, this_leaf->shared_cpu_map) {
492 sibling_leaf = CPUID4_INFO_IDX(sibling, index);
493 cpu_clear(cpu, sibling_leaf->shared_cpu_map);
497 static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) {}
498 static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) {}
501 static void __cpuinit free_cache_attributes(unsigned int cpu)
505 for (i = 0; i < num_cache_leaves; i++)
506 cache_remove_shared_cpu_map(cpu, i);
508 kfree(per_cpu(cpuid4_info, cpu));
509 per_cpu(cpuid4_info, cpu) = NULL;
512 static int __cpuinit detect_cache_attributes(unsigned int cpu)
514 struct _cpuid4_info *this_leaf;
519 if (num_cache_leaves == 0)
522 per_cpu(cpuid4_info, cpu) = kzalloc(
523 sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
524 if (per_cpu(cpuid4_info, cpu) == NULL)
527 oldmask = current->cpus_allowed;
528 retval = set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
532 /* Do cpuid and store the results */
533 for (j = 0; j < num_cache_leaves; j++) {
534 this_leaf = CPUID4_INFO_IDX(cpu, j);
535 retval = cpuid4_cache_lookup(j, this_leaf);
536 if (unlikely(retval < 0)) {
539 for (i = 0; i < j; i++)
540 cache_remove_shared_cpu_map(cpu, i);
543 cache_shared_cpu_map_setup(cpu, j);
545 set_cpus_allowed_ptr(current, &oldmask);
549 kfree(per_cpu(cpuid4_info, cpu));
550 per_cpu(cpuid4_info, cpu) = NULL;
558 #include <linux/kobject.h>
559 #include <linux/sysfs.h>
561 extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c */
563 /* pointer to kobject for cpuX/cache */
564 static DEFINE_PER_CPU(struct kobject *, cache_kobject);
566 struct _index_kobject {
569 unsigned short index;
572 /* pointer to array of kobjects for cpuX/cache/indexY */
573 static DEFINE_PER_CPU(struct _index_kobject *, index_kobject);
574 #define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(index_kobject, x))[y]))
576 #define show_one_plus(file_name, object, val) \
577 static ssize_t show_##file_name \
578 (struct _cpuid4_info *this_leaf, char *buf) \
580 return sprintf (buf, "%lu\n", (unsigned long)this_leaf->object + val); \
583 show_one_plus(level, eax.split.level, 0);
584 show_one_plus(coherency_line_size, ebx.split.coherency_line_size, 1);
585 show_one_plus(physical_line_partition, ebx.split.physical_line_partition, 1);
586 show_one_plus(ways_of_associativity, ebx.split.ways_of_associativity, 1);
587 show_one_plus(number_of_sets, ecx.split.number_of_sets, 1);
589 static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf)
591 return sprintf (buf, "%luK\n", this_leaf->size / 1024);
594 static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
597 ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf;
601 cpumask_t *mask = &this_leaf->shared_cpu_map;
604 cpulist_scnprintf(buf, len-2, *mask):
605 cpumask_scnprintf(buf, len-2, *mask);
612 static inline ssize_t show_shared_cpu_map(struct _cpuid4_info *leaf, char *buf)
614 return show_shared_cpu_map_func(leaf, 0, buf);
617 static inline ssize_t show_shared_cpu_list(struct _cpuid4_info *leaf, char *buf)
619 return show_shared_cpu_map_func(leaf, 1, buf);
622 static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf) {
623 switch(this_leaf->eax.split.type) {
624 case CACHE_TYPE_DATA:
625 return sprintf(buf, "Data\n");
627 case CACHE_TYPE_INST:
628 return sprintf(buf, "Instruction\n");
630 case CACHE_TYPE_UNIFIED:
631 return sprintf(buf, "Unified\n");
634 return sprintf(buf, "Unknown\n");
640 struct attribute attr;
641 ssize_t (*show)(struct _cpuid4_info *, char *);
642 ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count);
645 #define define_one_ro(_name) \
646 static struct _cache_attr _name = \
647 __ATTR(_name, 0444, show_##_name, NULL)
649 define_one_ro(level);
651 define_one_ro(coherency_line_size);
652 define_one_ro(physical_line_partition);
653 define_one_ro(ways_of_associativity);
654 define_one_ro(number_of_sets);
656 define_one_ro(shared_cpu_map);
657 define_one_ro(shared_cpu_list);
659 static struct attribute * default_attrs[] = {
662 &coherency_line_size.attr,
663 &physical_line_partition.attr,
664 &ways_of_associativity.attr,
665 &number_of_sets.attr,
667 &shared_cpu_map.attr,
668 &shared_cpu_list.attr,
672 #define to_object(k) container_of(k, struct _index_kobject, kobj)
673 #define to_attr(a) container_of(a, struct _cache_attr, attr)
675 static ssize_t show(struct kobject * kobj, struct attribute * attr, char * buf)
677 struct _cache_attr *fattr = to_attr(attr);
678 struct _index_kobject *this_leaf = to_object(kobj);
682 fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
688 static ssize_t store(struct kobject * kobj, struct attribute * attr,
689 const char * buf, size_t count)
694 static struct sysfs_ops sysfs_ops = {
699 static struct kobj_type ktype_cache = {
700 .sysfs_ops = &sysfs_ops,
701 .default_attrs = default_attrs,
704 static struct kobj_type ktype_percpu_entry = {
705 .sysfs_ops = &sysfs_ops,
708 static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu)
710 kfree(per_cpu(cache_kobject, cpu));
711 kfree(per_cpu(index_kobject, cpu));
712 per_cpu(cache_kobject, cpu) = NULL;
713 per_cpu(index_kobject, cpu) = NULL;
714 free_cache_attributes(cpu);
717 static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
721 if (num_cache_leaves == 0)
724 err = detect_cache_attributes(cpu);
728 /* Allocate all required memory */
729 per_cpu(cache_kobject, cpu) =
730 kzalloc(sizeof(struct kobject), GFP_KERNEL);
731 if (unlikely(per_cpu(cache_kobject, cpu) == NULL))
734 per_cpu(index_kobject, cpu) = kzalloc(
735 sizeof(struct _index_kobject ) * num_cache_leaves, GFP_KERNEL);
736 if (unlikely(per_cpu(index_kobject, cpu) == NULL))
742 cpuid4_cache_sysfs_exit(cpu);
746 static cpumask_t cache_dev_map = CPU_MASK_NONE;
748 /* Add/Remove cache interface for CPU device */
749 static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
751 unsigned int cpu = sys_dev->id;
753 struct _index_kobject *this_object;
756 retval = cpuid4_cache_sysfs_init(cpu);
757 if (unlikely(retval < 0))
760 retval = kobject_init_and_add(per_cpu(cache_kobject, cpu),
762 &sys_dev->kobj, "%s", "cache");
764 cpuid4_cache_sysfs_exit(cpu);
768 for (i = 0; i < num_cache_leaves; i++) {
769 this_object = INDEX_KOBJECT_PTR(cpu,i);
770 this_object->cpu = cpu;
771 this_object->index = i;
772 retval = kobject_init_and_add(&(this_object->kobj),
774 per_cpu(cache_kobject, cpu),
776 if (unlikely(retval)) {
777 for (j = 0; j < i; j++) {
778 kobject_put(&(INDEX_KOBJECT_PTR(cpu,j)->kobj));
780 kobject_put(per_cpu(cache_kobject, cpu));
781 cpuid4_cache_sysfs_exit(cpu);
784 kobject_uevent(&(this_object->kobj), KOBJ_ADD);
787 cpu_set(cpu, cache_dev_map);
789 kobject_uevent(per_cpu(cache_kobject, cpu), KOBJ_ADD);
793 static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
795 unsigned int cpu = sys_dev->id;
798 if (per_cpu(cpuid4_info, cpu) == NULL)
800 if (!cpu_isset(cpu, cache_dev_map))
802 cpu_clear(cpu, cache_dev_map);
804 for (i = 0; i < num_cache_leaves; i++)
805 kobject_put(&(INDEX_KOBJECT_PTR(cpu,i)->kobj));
806 kobject_put(per_cpu(cache_kobject, cpu));
807 cpuid4_cache_sysfs_exit(cpu);
810 static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
811 unsigned long action, void *hcpu)
813 unsigned int cpu = (unsigned long)hcpu;
814 struct sys_device *sys_dev;
816 sys_dev = get_cpu_sysdev(cpu);
819 case CPU_ONLINE_FROZEN:
820 cache_add_dev(sys_dev);
823 case CPU_DEAD_FROZEN:
824 cache_remove_dev(sys_dev);
830 static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier =
832 .notifier_call = cacheinfo_cpu_callback,
835 static int __cpuinit cache_sysfs_init(void)
839 if (num_cache_leaves == 0)
842 for_each_online_cpu(i) {
844 struct sys_device *sys_dev = get_cpu_sysdev(i);
846 err = cache_add_dev(sys_dev);
850 register_hotcpu_notifier(&cacheinfo_cpu_notifier);
854 device_initcall(cache_sysfs_init);