2 * Routines to indentify caches on Intel CPU.
5 * Venkatesh Pallipadi : Adding cache identification through cpuid(4)
6 * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
7 * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD.
10 #include <linux/init.h>
11 #include <linux/slab.h>
12 #include <linux/device.h>
13 #include <linux/compiler.h>
14 #include <linux/cpu.h>
15 #include <linux/sched.h>
17 #include <asm/processor.h>
28 unsigned char descriptor;
33 /* all the cache descriptor types we care about (no TLB or trace cache entries) */
34 static struct _cache_table cache_table[] __cpuinitdata =
36 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
37 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
38 { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */
39 { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */
40 { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
41 { 0x23, LVL_3, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */
42 { 0x25, LVL_3, 2048 }, /* 8-way set assoc, sectored cache, 64 byte line size */
43 { 0x29, LVL_3, 4096 }, /* 8-way set assoc, sectored cache, 64 byte line size */
44 { 0x2c, LVL_1_DATA, 32 }, /* 8-way set assoc, 64 byte line size */
45 { 0x30, LVL_1_INST, 32 }, /* 8-way set assoc, 64 byte line size */
46 { 0x39, LVL_2, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */
47 { 0x3a, LVL_2, 192 }, /* 6-way set assoc, sectored cache, 64 byte line size */
48 { 0x3b, LVL_2, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */
49 { 0x3c, LVL_2, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */
50 { 0x3d, LVL_2, 384 }, /* 6-way set assoc, sectored cache, 64 byte line size */
51 { 0x3e, LVL_2, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
52 { 0x41, LVL_2, 128 }, /* 4-way set assoc, 32 byte line size */
53 { 0x42, LVL_2, 256 }, /* 4-way set assoc, 32 byte line size */
54 { 0x43, LVL_2, 512 }, /* 4-way set assoc, 32 byte line size */
55 { 0x44, LVL_2, 1024 }, /* 4-way set assoc, 32 byte line size */
56 { 0x45, LVL_2, 2048 }, /* 4-way set assoc, 32 byte line size */
57 { 0x46, LVL_3, 4096 }, /* 4-way set assoc, 64 byte line size */
58 { 0x47, LVL_3, 8192 }, /* 8-way set assoc, 64 byte line size */
59 { 0x49, LVL_3, 4096 }, /* 16-way set assoc, 64 byte line size */
60 { 0x4a, LVL_3, 6144 }, /* 12-way set assoc, 64 byte line size */
61 { 0x4b, LVL_3, 8192 }, /* 16-way set assoc, 64 byte line size */
62 { 0x4c, LVL_3, 12288 }, /* 12-way set assoc, 64 byte line size */
63 { 0x4d, LVL_3, 16384 }, /* 16-way set assoc, 64 byte line size */
64 { 0x60, LVL_1_DATA, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */
65 { 0x66, LVL_1_DATA, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */
66 { 0x67, LVL_1_DATA, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */
67 { 0x68, LVL_1_DATA, 32 }, /* 4-way set assoc, sectored cache, 64 byte line size */
68 { 0x70, LVL_TRACE, 12 }, /* 8-way set assoc */
69 { 0x71, LVL_TRACE, 16 }, /* 8-way set assoc */
70 { 0x72, LVL_TRACE, 32 }, /* 8-way set assoc */
71 { 0x73, LVL_TRACE, 64 }, /* 8-way set assoc */
72 { 0x78, LVL_2, 1024 }, /* 4-way set assoc, 64 byte line size */
73 { 0x79, LVL_2, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */
74 { 0x7a, LVL_2, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */
75 { 0x7b, LVL_2, 512 }, /* 8-way set assoc, sectored cache, 64 byte line size */
76 { 0x7c, LVL_2, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */
77 { 0x7d, LVL_2, 2048 }, /* 8-way set assoc, 64 byte line size */
78 { 0x7f, LVL_2, 512 }, /* 2-way set assoc, 64 byte line size */
79 { 0x82, LVL_2, 256 }, /* 8-way set assoc, 32 byte line size */
80 { 0x83, LVL_2, 512 }, /* 8-way set assoc, 32 byte line size */
81 { 0x84, LVL_2, 1024 }, /* 8-way set assoc, 32 byte line size */
82 { 0x85, LVL_2, 2048 }, /* 8-way set assoc, 32 byte line size */
83 { 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */
84 { 0x87, LVL_2, 1024 }, /* 8-way set assoc, 64 byte line size */
94 CACHE_TYPE_UNIFIED = 3
97 union _cpuid4_leaf_eax {
99 enum _cache_type type:5;
100 unsigned int level:3;
101 unsigned int is_self_initializing:1;
102 unsigned int is_fully_associative:1;
103 unsigned int reserved:4;
104 unsigned int num_threads_sharing:12;
105 unsigned int num_cores_on_die:6;
110 union _cpuid4_leaf_ebx {
112 unsigned int coherency_line_size:12;
113 unsigned int physical_line_partition:10;
114 unsigned int ways_of_associativity:10;
119 union _cpuid4_leaf_ecx {
121 unsigned int number_of_sets:32;
126 struct _cpuid4_info {
127 union _cpuid4_leaf_eax eax;
128 union _cpuid4_leaf_ebx ebx;
129 union _cpuid4_leaf_ecx ecx;
131 cpumask_t shared_cpu_map;
134 unsigned short num_cache_leaves;
136 /* AMD doesn't have CPUID4. Emulate it here to report the same
137 information to the user. This makes some assumptions about the machine:
138 L2 not shared, no SMT etc. that is currently true on AMD CPUs.
140 In theory the TLBs could be reported as fake type (they are in "dummy").
144 unsigned line_size : 8;
145 unsigned lines_per_tag : 8;
147 unsigned size_in_kb : 8;
154 unsigned line_size : 8;
155 unsigned lines_per_tag : 4;
157 unsigned size_in_kb : 16;
164 unsigned line_size : 8;
165 unsigned lines_per_tag : 4;
168 unsigned size_encoded : 14;
173 static unsigned short assocs[] __cpuinitdata = {
174 [1] = 1, [2] = 2, [4] = 4, [6] = 8,
175 [8] = 16, [0xa] = 32, [0xb] = 48,
180 static unsigned char levels[] __cpuinitdata = { 1, 1, 2, 3 };
181 static unsigned char types[] __cpuinitdata = { 1, 2, 3, 3 };
183 static void __cpuinit amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
184 union _cpuid4_leaf_ebx *ebx,
185 union _cpuid4_leaf_ecx *ecx)
188 unsigned line_size, lines_per_tag, assoc, size_in_kb;
189 union l1_cache l1i, l1d;
192 union l1_cache *l1 = &l1d;
198 cpuid(0x80000005, &dummy, &dummy, &l1d.val, &l1i.val);
199 cpuid(0x80000006, &dummy, &dummy, &l2.val, &l3.val);
208 line_size = l1->line_size;
209 lines_per_tag = l1->lines_per_tag;
210 size_in_kb = l1->size_in_kb;
216 line_size = l2.line_size;
217 lines_per_tag = l2.lines_per_tag;
218 /* cpu_data has errata corrections for K7 applied */
219 size_in_kb = current_cpu_data.x86_cache_size;
225 line_size = l3.line_size;
226 lines_per_tag = l3.lines_per_tag;
227 size_in_kb = l3.size_encoded * 512;
233 eax->split.is_self_initializing = 1;
234 eax->split.type = types[leaf];
235 eax->split.level = levels[leaf];
237 eax->split.num_threads_sharing = current_cpu_data.x86_max_cores - 1;
239 eax->split.num_threads_sharing = 0;
240 eax->split.num_cores_on_die = current_cpu_data.x86_max_cores - 1;
244 eax->split.is_fully_associative = 1;
245 ebx->split.coherency_line_size = line_size - 1;
246 ebx->split.ways_of_associativity = assocs[assoc] - 1;
247 ebx->split.physical_line_partition = lines_per_tag - 1;
248 ecx->split.number_of_sets = (size_in_kb * 1024) / line_size /
249 (ebx->split.ways_of_associativity + 1) - 1;
252 static int __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
254 union _cpuid4_leaf_eax eax;
255 union _cpuid4_leaf_ebx ebx;
256 union _cpuid4_leaf_ecx ecx;
259 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
260 amd_cpuid4(index, &eax, &ebx, &ecx);
262 cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
263 if (eax.split.type == CACHE_TYPE_NULL)
264 return -EIO; /* better error ? */
266 this_leaf->eax = eax;
267 this_leaf->ebx = ebx;
268 this_leaf->ecx = ecx;
269 this_leaf->size = (ecx.split.number_of_sets + 1) *
270 (ebx.split.coherency_line_size + 1) *
271 (ebx.split.physical_line_partition + 1) *
272 (ebx.split.ways_of_associativity + 1);
276 static int __cpuinit find_num_cache_leaves(void)
278 unsigned int eax, ebx, ecx, edx;
279 union _cpuid4_leaf_eax cache_eax;
284 /* Do cpuid(4) loop to find out num_cache_leaves */
285 cpuid_count(4, i, &eax, &ebx, &ecx, &edx);
286 cache_eax.full = eax;
287 } while (cache_eax.split.type != CACHE_TYPE_NULL);
291 unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
293 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */
294 unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
295 unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
296 unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
298 unsigned int cpu = c->cpu_index;
301 if (c->cpuid_level > 3) {
302 static int is_initialized;
304 if (is_initialized == 0) {
305 /* Init num_cache_leaves from boot CPU */
306 num_cache_leaves = find_num_cache_leaves();
311 * Whenever possible use cpuid(4), deterministic cache
312 * parameters cpuid leaf to find the cache details
314 for (i = 0; i < num_cache_leaves; i++) {
315 struct _cpuid4_info this_leaf;
319 retval = cpuid4_cache_lookup(i, &this_leaf);
321 switch(this_leaf.eax.split.level) {
323 if (this_leaf.eax.split.type ==
325 new_l1d = this_leaf.size/1024;
326 else if (this_leaf.eax.split.type ==
328 new_l1i = this_leaf.size/1024;
331 new_l2 = this_leaf.size/1024;
332 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
333 index_msb = get_count_order(num_threads_sharing);
334 l2_id = c->apicid >> index_msb;
337 new_l3 = this_leaf.size/1024;
338 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
339 index_msb = get_count_order(num_threads_sharing);
340 l3_id = c->apicid >> index_msb;
349 * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
352 if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) {
353 /* supports eax=2 call */
356 unsigned char *dp = (unsigned char *)regs;
359 if (num_cache_leaves != 0 && c->x86 == 15)
362 /* Number of times to iterate */
363 n = cpuid_eax(2) & 0xFF;
365 for ( i = 0 ; i < n ; i++ ) {
366 cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]);
368 /* If bit 31 is set, this is an unknown format */
369 for ( j = 0 ; j < 3 ; j++ ) {
370 if ( regs[j] < 0 ) regs[j] = 0;
373 /* Byte 0 is level count, not a descriptor */
374 for ( j = 1 ; j < 16 ; j++ ) {
375 unsigned char des = dp[j];
378 /* look up this descriptor in the table */
379 while (cache_table[k].descriptor != 0)
381 if (cache_table[k].descriptor == des) {
382 if (only_trace && cache_table[k].cache_type != LVL_TRACE)
384 switch (cache_table[k].cache_type) {
386 l1i += cache_table[k].size;
389 l1d += cache_table[k].size;
392 l2 += cache_table[k].size;
395 l3 += cache_table[k].size;
398 trace += cache_table[k].size;
420 per_cpu(cpu_llc_id, cpu) = l2_id;
427 per_cpu(cpu_llc_id, cpu) = l3_id;
432 printk (KERN_INFO "CPU: Trace cache: %dK uops", trace);
434 printk (KERN_INFO "CPU: L1 I cache: %dK", l1i);
437 printk(", L1 D cache: %dK\n", l1d);
442 printk(KERN_INFO "CPU: L2 cache: %dK\n", l2);
445 printk(KERN_INFO "CPU: L3 cache: %dK\n", l3);
447 c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
452 /* pointer to _cpuid4_info array (for each cache leaf) */
453 static struct _cpuid4_info *cpuid4_info[NR_CPUS];
454 #define CPUID4_INFO_IDX(x,y) (&((cpuid4_info[x])[y]))
457 static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
459 struct _cpuid4_info *this_leaf, *sibling_leaf;
460 unsigned long num_threads_sharing;
462 struct cpuinfo_x86 *c = &cpu_data(cpu);
464 this_leaf = CPUID4_INFO_IDX(cpu, index);
465 num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing;
467 if (num_threads_sharing == 1)
468 cpu_set(cpu, this_leaf->shared_cpu_map);
470 index_msb = get_count_order(num_threads_sharing);
472 for_each_online_cpu(i) {
473 if (cpu_data(i).apicid >> index_msb ==
474 c->apicid >> index_msb) {
475 cpu_set(i, this_leaf->shared_cpu_map);
476 if (i != cpu && cpuid4_info[i]) {
477 sibling_leaf = CPUID4_INFO_IDX(i, index);
478 cpu_set(cpu, sibling_leaf->shared_cpu_map);
484 static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
486 struct _cpuid4_info *this_leaf, *sibling_leaf;
489 this_leaf = CPUID4_INFO_IDX(cpu, index);
490 for_each_cpu_mask(sibling, this_leaf->shared_cpu_map) {
491 sibling_leaf = CPUID4_INFO_IDX(sibling, index);
492 cpu_clear(cpu, sibling_leaf->shared_cpu_map);
496 static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) {}
497 static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) {}
500 static void free_cache_attributes(unsigned int cpu)
504 for (i = 0; i < num_cache_leaves; i++)
505 cache_remove_shared_cpu_map(cpu, i);
507 kfree(cpuid4_info[cpu]);
508 cpuid4_info[cpu] = NULL;
511 static int __cpuinit detect_cache_attributes(unsigned int cpu)
513 struct _cpuid4_info *this_leaf;
518 if (num_cache_leaves == 0)
521 cpuid4_info[cpu] = kzalloc(
522 sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
523 if (cpuid4_info[cpu] == NULL)
526 oldmask = current->cpus_allowed;
527 retval = set_cpus_allowed(current, cpumask_of_cpu(cpu));
531 /* Do cpuid and store the results */
532 for (j = 0; j < num_cache_leaves; j++) {
533 this_leaf = CPUID4_INFO_IDX(cpu, j);
534 retval = cpuid4_cache_lookup(j, this_leaf);
535 if (unlikely(retval < 0)) {
538 for (i = 0; i < j; i++)
539 cache_remove_shared_cpu_map(cpu, i);
542 cache_shared_cpu_map_setup(cpu, j);
544 set_cpus_allowed(current, oldmask);
548 kfree(cpuid4_info[cpu]);
549 cpuid4_info[cpu] = NULL;
557 #include <linux/kobject.h>
558 #include <linux/sysfs.h>
560 extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c */
562 /* pointer to kobject for cpuX/cache */
563 static struct kobject * cache_kobject[NR_CPUS];
565 struct _index_kobject {
568 unsigned short index;
571 /* pointer to array of kobjects for cpuX/cache/indexY */
572 static struct _index_kobject *index_kobject[NR_CPUS];
573 #define INDEX_KOBJECT_PTR(x,y) (&((index_kobject[x])[y]))
575 #define show_one_plus(file_name, object, val) \
576 static ssize_t show_##file_name \
577 (struct _cpuid4_info *this_leaf, char *buf) \
579 return sprintf (buf, "%lu\n", (unsigned long)this_leaf->object + val); \
582 show_one_plus(level, eax.split.level, 0);
583 show_one_plus(coherency_line_size, ebx.split.coherency_line_size, 1);
584 show_one_plus(physical_line_partition, ebx.split.physical_line_partition, 1);
585 show_one_plus(ways_of_associativity, ebx.split.ways_of_associativity, 1);
586 show_one_plus(number_of_sets, ecx.split.number_of_sets, 1);
588 static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf)
590 return sprintf (buf, "%luK\n", this_leaf->size / 1024);
593 static ssize_t show_shared_cpu_map(struct _cpuid4_info *this_leaf, char *buf)
595 char mask_str[NR_CPUS];
596 cpumask_scnprintf(mask_str, NR_CPUS, this_leaf->shared_cpu_map);
597 return sprintf(buf, "%s\n", mask_str);
600 static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf) {
601 switch(this_leaf->eax.split.type) {
602 case CACHE_TYPE_DATA:
603 return sprintf(buf, "Data\n");
605 case CACHE_TYPE_INST:
606 return sprintf(buf, "Instruction\n");
608 case CACHE_TYPE_UNIFIED:
609 return sprintf(buf, "Unified\n");
612 return sprintf(buf, "Unknown\n");
618 struct attribute attr;
619 ssize_t (*show)(struct _cpuid4_info *, char *);
620 ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count);
623 #define define_one_ro(_name) \
624 static struct _cache_attr _name = \
625 __ATTR(_name, 0444, show_##_name, NULL)
627 define_one_ro(level);
629 define_one_ro(coherency_line_size);
630 define_one_ro(physical_line_partition);
631 define_one_ro(ways_of_associativity);
632 define_one_ro(number_of_sets);
634 define_one_ro(shared_cpu_map);
636 static struct attribute * default_attrs[] = {
639 &coherency_line_size.attr,
640 &physical_line_partition.attr,
641 &ways_of_associativity.attr,
642 &number_of_sets.attr,
644 &shared_cpu_map.attr,
648 #define to_object(k) container_of(k, struct _index_kobject, kobj)
649 #define to_attr(a) container_of(a, struct _cache_attr, attr)
651 static ssize_t show(struct kobject * kobj, struct attribute * attr, char * buf)
653 struct _cache_attr *fattr = to_attr(attr);
654 struct _index_kobject *this_leaf = to_object(kobj);
658 fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
664 static ssize_t store(struct kobject * kobj, struct attribute * attr,
665 const char * buf, size_t count)
670 static struct sysfs_ops sysfs_ops = {
675 static struct kobj_type ktype_cache = {
676 .sysfs_ops = &sysfs_ops,
677 .default_attrs = default_attrs,
680 static struct kobj_type ktype_percpu_entry = {
681 .sysfs_ops = &sysfs_ops,
684 static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu)
686 kfree(cache_kobject[cpu]);
687 kfree(index_kobject[cpu]);
688 cache_kobject[cpu] = NULL;
689 index_kobject[cpu] = NULL;
690 free_cache_attributes(cpu);
693 static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
697 if (num_cache_leaves == 0)
700 err = detect_cache_attributes(cpu);
704 /* Allocate all required memory */
705 cache_kobject[cpu] = kzalloc(sizeof(struct kobject), GFP_KERNEL);
706 if (unlikely(cache_kobject[cpu] == NULL))
709 index_kobject[cpu] = kzalloc(
710 sizeof(struct _index_kobject ) * num_cache_leaves, GFP_KERNEL);
711 if (unlikely(index_kobject[cpu] == NULL))
717 cpuid4_cache_sysfs_exit(cpu);
721 static cpumask_t cache_dev_map = CPU_MASK_NONE;
723 /* Add/Remove cache interface for CPU device */
724 static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
726 unsigned int cpu = sys_dev->id;
728 struct _index_kobject *this_object;
731 retval = cpuid4_cache_sysfs_init(cpu);
732 if (unlikely(retval < 0))
735 cache_kobject[cpu]->parent = &sys_dev->kobj;
736 kobject_set_name(cache_kobject[cpu], "%s", "cache");
737 cache_kobject[cpu]->ktype = &ktype_percpu_entry;
738 retval = kobject_register(cache_kobject[cpu]);
740 cpuid4_cache_sysfs_exit(cpu);
744 for (i = 0; i < num_cache_leaves; i++) {
745 this_object = INDEX_KOBJECT_PTR(cpu,i);
746 this_object->cpu = cpu;
747 this_object->index = i;
748 this_object->kobj.parent = cache_kobject[cpu];
749 kobject_set_name(&(this_object->kobj), "index%1lu", i);
750 this_object->kobj.ktype = &ktype_cache;
751 retval = kobject_register(&(this_object->kobj));
752 if (unlikely(retval)) {
753 for (j = 0; j < i; j++) {
755 &(INDEX_KOBJECT_PTR(cpu,j)->kobj));
757 kobject_unregister(cache_kobject[cpu]);
758 cpuid4_cache_sysfs_exit(cpu);
763 cpu_set(cpu, cache_dev_map);
768 static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
770 unsigned int cpu = sys_dev->id;
773 if (cpuid4_info[cpu] == NULL)
775 if (!cpu_isset(cpu, cache_dev_map))
777 cpu_clear(cpu, cache_dev_map);
779 for (i = 0; i < num_cache_leaves; i++)
780 kobject_unregister(&(INDEX_KOBJECT_PTR(cpu,i)->kobj));
781 kobject_unregister(cache_kobject[cpu]);
782 cpuid4_cache_sysfs_exit(cpu);
785 static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
786 unsigned long action, void *hcpu)
788 unsigned int cpu = (unsigned long)hcpu;
789 struct sys_device *sys_dev;
791 sys_dev = get_cpu_sysdev(cpu);
794 case CPU_ONLINE_FROZEN:
795 cache_add_dev(sys_dev);
798 case CPU_DEAD_FROZEN:
799 cache_remove_dev(sys_dev);
805 static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier =
807 .notifier_call = cacheinfo_cpu_callback,
810 static int __cpuinit cache_sysfs_init(void)
814 if (num_cache_leaves == 0)
817 for_each_online_cpu(i) {
819 struct sys_device *sys_dev = get_cpu_sysdev(i);
821 err = cache_add_dev(sys_dev);
825 register_hotcpu_notifier(&cacheinfo_cpu_notifier);
829 device_initcall(cache_sysfs_init);