Merge branch 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6] / arch / sparc64 / kernel / sysfs.c
1 /* sysfs.c: Toplogy sysfs support code for sparc64.
2  *
3  * Copyright (C) 2007 David S. Miller <davem@davemloft.net>
4  */
5 #include <linux/sysdev.h>
6 #include <linux/cpu.h>
7 #include <linux/smp.h>
8 #include <linux/percpu.h>
9 #include <linux/init.h>
10
11 #include <asm/hypervisor.h>
12 #include <asm/spitfire.h>
13
14 static DEFINE_PER_CPU(struct hv_mmu_statistics, mmu_stats) __attribute__((aligned(64)));
15
16 #define SHOW_MMUSTAT_ULONG(NAME) \
17 static ssize_t show_##NAME(struct sys_device *dev, \
18                         struct sysdev_attribute *attr, char *buf) \
19 { \
20         struct hv_mmu_statistics *p = &per_cpu(mmu_stats, dev->id); \
21         return sprintf(buf, "%lu\n", p->NAME); \
22 } \
23 static SYSDEV_ATTR(NAME, 0444, show_##NAME, NULL)
24
25 SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctx0_8k_tte);
26 SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctx0_8k_tte);
27 SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctx0_64k_tte);
28 SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctx0_64k_tte);
29 SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctx0_4mb_tte);
30 SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctx0_4mb_tte);
31 SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctx0_256mb_tte);
32 SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctx0_256mb_tte);
33 SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctxnon0_8k_tte);
34 SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctxnon0_8k_tte);
35 SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctxnon0_64k_tte);
36 SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctxnon0_64k_tte);
37 SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctxnon0_4mb_tte);
38 SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctxnon0_4mb_tte);
39 SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctxnon0_256mb_tte);
40 SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctxnon0_256mb_tte);
41 SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctx0_8k_tte);
42 SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctx0_8k_tte);
43 SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctx0_64k_tte);
44 SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctx0_64k_tte);
45 SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctx0_4mb_tte);
46 SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctx0_4mb_tte);
47 SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctx0_256mb_tte);
48 SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctx0_256mb_tte);
49 SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctxnon0_8k_tte);
50 SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctxnon0_8k_tte);
51 SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctxnon0_64k_tte);
52 SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctxnon0_64k_tte);
53 SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctxnon0_4mb_tte);
54 SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctxnon0_4mb_tte);
55 SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctxnon0_256mb_tte);
56 SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctxnon0_256mb_tte);
57
58 static struct attribute *mmu_stat_attrs[] = {
59         &attr_immu_tsb_hits_ctx0_8k_tte.attr,
60         &attr_immu_tsb_ticks_ctx0_8k_tte.attr,
61         &attr_immu_tsb_hits_ctx0_64k_tte.attr,
62         &attr_immu_tsb_ticks_ctx0_64k_tte.attr,
63         &attr_immu_tsb_hits_ctx0_4mb_tte.attr,
64         &attr_immu_tsb_ticks_ctx0_4mb_tte.attr,
65         &attr_immu_tsb_hits_ctx0_256mb_tte.attr,
66         &attr_immu_tsb_ticks_ctx0_256mb_tte.attr,
67         &attr_immu_tsb_hits_ctxnon0_8k_tte.attr,
68         &attr_immu_tsb_ticks_ctxnon0_8k_tte.attr,
69         &attr_immu_tsb_hits_ctxnon0_64k_tte.attr,
70         &attr_immu_tsb_ticks_ctxnon0_64k_tte.attr,
71         &attr_immu_tsb_hits_ctxnon0_4mb_tte.attr,
72         &attr_immu_tsb_ticks_ctxnon0_4mb_tte.attr,
73         &attr_immu_tsb_hits_ctxnon0_256mb_tte.attr,
74         &attr_immu_tsb_ticks_ctxnon0_256mb_tte.attr,
75         &attr_dmmu_tsb_hits_ctx0_8k_tte.attr,
76         &attr_dmmu_tsb_ticks_ctx0_8k_tte.attr,
77         &attr_dmmu_tsb_hits_ctx0_64k_tte.attr,
78         &attr_dmmu_tsb_ticks_ctx0_64k_tte.attr,
79         &attr_dmmu_tsb_hits_ctx0_4mb_tte.attr,
80         &attr_dmmu_tsb_ticks_ctx0_4mb_tte.attr,
81         &attr_dmmu_tsb_hits_ctx0_256mb_tte.attr,
82         &attr_dmmu_tsb_ticks_ctx0_256mb_tte.attr,
83         &attr_dmmu_tsb_hits_ctxnon0_8k_tte.attr,
84         &attr_dmmu_tsb_ticks_ctxnon0_8k_tte.attr,
85         &attr_dmmu_tsb_hits_ctxnon0_64k_tte.attr,
86         &attr_dmmu_tsb_ticks_ctxnon0_64k_tte.attr,
87         &attr_dmmu_tsb_hits_ctxnon0_4mb_tte.attr,
88         &attr_dmmu_tsb_ticks_ctxnon0_4mb_tte.attr,
89         &attr_dmmu_tsb_hits_ctxnon0_256mb_tte.attr,
90         &attr_dmmu_tsb_ticks_ctxnon0_256mb_tte.attr,
91         NULL,
92 };
93
94 static struct attribute_group mmu_stat_group = {
95         .attrs = mmu_stat_attrs,
96         .name = "mmu_stats",
97 };
98
99 /* XXX convert to rusty's on_one_cpu */
100 static unsigned long run_on_cpu(unsigned long cpu,
101                                 unsigned long (*func)(unsigned long),
102                                 unsigned long arg)
103 {
104         cpumask_t old_affinity = current->cpus_allowed;
105         unsigned long ret;
106
107         /* should return -EINVAL to userspace */
108         if (set_cpus_allowed(current, cpumask_of_cpu(cpu)))
109                 return 0;
110
111         ret = func(arg);
112
113         set_cpus_allowed(current, old_affinity);
114
115         return ret;
116 }
117
118 static unsigned long read_mmustat_enable(unsigned long junk)
119 {
120         unsigned long ra = 0;
121
122         sun4v_mmustat_info(&ra);
123
124         return ra != 0;
125 }
126
127 static unsigned long write_mmustat_enable(unsigned long val)
128 {
129         unsigned long ra, orig_ra;
130
131         if (val)
132                 ra = __pa(&per_cpu(mmu_stats, smp_processor_id()));
133         else
134                 ra = 0UL;
135
136         return sun4v_mmustat_conf(ra, &orig_ra);
137 }
138
139 static ssize_t show_mmustat_enable(struct sys_device *s,
140                                 struct sysdev_attribute *attr, char *buf)
141 {
142         unsigned long val = run_on_cpu(s->id, read_mmustat_enable, 0);
143         return sprintf(buf, "%lx\n", val);
144 }
145
146 static ssize_t store_mmustat_enable(struct sys_device *s,
147                         struct sysdev_attribute *attr, const char *buf,
148                         size_t count)
149 {
150         unsigned long val, err;
151         int ret = sscanf(buf, "%ld", &val);
152
153         if (ret != 1)
154                 return -EINVAL;
155
156         err = run_on_cpu(s->id, write_mmustat_enable, val);
157         if (err)
158                 return -EIO;
159
160         return count;
161 }
162
163 static SYSDEV_ATTR(mmustat_enable, 0644, show_mmustat_enable, store_mmustat_enable);
164
165 static int mmu_stats_supported;
166
167 static int register_mmu_stats(struct sys_device *s)
168 {
169         if (!mmu_stats_supported)
170                 return 0;
171         sysdev_create_file(s, &attr_mmustat_enable);
172         return sysfs_create_group(&s->kobj, &mmu_stat_group);
173 }
174
175 #ifdef CONFIG_HOTPLUG_CPU
176 static void unregister_mmu_stats(struct sys_device *s)
177 {
178         if (!mmu_stats_supported)
179                 return;
180         sysfs_remove_group(&s->kobj, &mmu_stat_group);
181         sysdev_remove_file(s, &attr_mmustat_enable);
182 }
183 #endif
184
185 #define SHOW_CPUDATA_ULONG_NAME(NAME, MEMBER) \
186 static ssize_t show_##NAME(struct sys_device *dev, \
187                 struct sysdev_attribute *attr, char *buf) \
188 { \
189         cpuinfo_sparc *c = &cpu_data(dev->id); \
190         return sprintf(buf, "%lu\n", c->MEMBER); \
191 }
192
193 #define SHOW_CPUDATA_UINT_NAME(NAME, MEMBER) \
194 static ssize_t show_##NAME(struct sys_device *dev, \
195                 struct sysdev_attribute *attr, char *buf) \
196 { \
197         cpuinfo_sparc *c = &cpu_data(dev->id); \
198         return sprintf(buf, "%u\n", c->MEMBER); \
199 }
200
201 SHOW_CPUDATA_ULONG_NAME(clock_tick, clock_tick);
202 SHOW_CPUDATA_UINT_NAME(l1_dcache_size, dcache_size);
203 SHOW_CPUDATA_UINT_NAME(l1_dcache_line_size, dcache_line_size);
204 SHOW_CPUDATA_UINT_NAME(l1_icache_size, icache_size);
205 SHOW_CPUDATA_UINT_NAME(l1_icache_line_size, icache_line_size);
206 SHOW_CPUDATA_UINT_NAME(l2_cache_size, ecache_size);
207 SHOW_CPUDATA_UINT_NAME(l2_cache_line_size, ecache_line_size);
208
209 static struct sysdev_attribute cpu_core_attrs[] = {
210         _SYSDEV_ATTR(clock_tick,          0444, show_clock_tick, NULL),
211         _SYSDEV_ATTR(l1_dcache_size,      0444, show_l1_dcache_size, NULL),
212         _SYSDEV_ATTR(l1_dcache_line_size, 0444, show_l1_dcache_line_size, NULL),
213         _SYSDEV_ATTR(l1_icache_size,      0444, show_l1_icache_size, NULL),
214         _SYSDEV_ATTR(l1_icache_line_size, 0444, show_l1_icache_line_size, NULL),
215         _SYSDEV_ATTR(l2_cache_size,       0444, show_l2_cache_size, NULL),
216         _SYSDEV_ATTR(l2_cache_line_size,  0444, show_l2_cache_line_size, NULL),
217 };
218
219 static DEFINE_PER_CPU(struct cpu, cpu_devices);
220
221 static void register_cpu_online(unsigned int cpu)
222 {
223         struct cpu *c = &per_cpu(cpu_devices, cpu);
224         struct sys_device *s = &c->sysdev;
225         int i;
226
227         for (i = 0; i < ARRAY_SIZE(cpu_core_attrs); i++)
228                 sysdev_create_file(s, &cpu_core_attrs[i]);
229
230         register_mmu_stats(s);
231 }
232
233 #ifdef CONFIG_HOTPLUG_CPU
234 static void unregister_cpu_online(unsigned int cpu)
235 {
236         struct cpu *c = &per_cpu(cpu_devices, cpu);
237         struct sys_device *s = &c->sysdev;
238         int i;
239
240         unregister_mmu_stats(s);
241         for (i = 0; i < ARRAY_SIZE(cpu_core_attrs); i++)
242                 sysdev_remove_file(s, &cpu_core_attrs[i]);
243 }
244 #endif
245
246 static int __cpuinit sysfs_cpu_notify(struct notifier_block *self,
247                                       unsigned long action, void *hcpu)
248 {
249         unsigned int cpu = (unsigned int)(long)hcpu;
250
251         switch (action) {
252         case CPU_ONLINE:
253         case CPU_ONLINE_FROZEN:
254                 register_cpu_online(cpu);
255                 break;
256 #ifdef CONFIG_HOTPLUG_CPU
257         case CPU_DEAD:
258         case CPU_DEAD_FROZEN:
259                 unregister_cpu_online(cpu);
260                 break;
261 #endif
262         }
263         return NOTIFY_OK;
264 }
265
266 static struct notifier_block __cpuinitdata sysfs_cpu_nb = {
267         .notifier_call  = sysfs_cpu_notify,
268 };
269
270 static void __init check_mmu_stats(void)
271 {
272         unsigned long dummy1, err;
273
274         if (tlb_type != hypervisor)
275                 return;
276
277         err = sun4v_mmustat_info(&dummy1);
278         if (!err)
279                 mmu_stats_supported = 1;
280 }
281
282 static void register_nodes(void)
283 {
284 #ifdef CONFIG_NUMA
285         int i;
286
287         for (i = 0; i < MAX_NUMNODES; i++)
288                 register_one_node(i);
289 #endif
290 }
291
292 static int __init topology_init(void)
293 {
294         int cpu;
295
296         register_nodes();
297
298         check_mmu_stats();
299
300         register_cpu_notifier(&sysfs_cpu_nb);
301
302         for_each_possible_cpu(cpu) {
303                 struct cpu *c = &per_cpu(cpu_devices, cpu);
304
305                 register_cpu(c, cpu);
306                 if (cpu_online(cpu))
307                         register_cpu_online(cpu);
308         }
309
310         return 0;
311 }
312
313 subsys_initcall(topology_init);