4 * @remark Copyright 2002-2008 OProfile authors
5 * @remark Read the file COPYING
7 * @author John Levon <levon@movementarian.org>
8 * @author Robert Richter <robert.richter@amd.com>
11 #include <linux/init.h>
12 #include <linux/notifier.h>
13 #include <linux/smp.h>
14 #include <linux/oprofile.h>
15 #include <linux/sysdev.h>
16 #include <linux/slab.h>
17 #include <linux/moduleparam.h>
18 #include <linux/kdebug.h>
19 #include <linux/cpu.h>
24 #include "op_counter.h"
25 #include "op_x86_model.h"
27 static struct op_x86_model_spec const *model;
28 static DEFINE_PER_CPU(struct op_msrs, cpu_msrs);
29 static DEFINE_PER_CPU(unsigned long, saved_lvtpc);
31 static int nmi_start(void);
32 static void nmi_stop(void);
33 static void nmi_cpu_start(void *dummy);
34 static void nmi_cpu_stop(void *dummy);
36 /* 0 == registered but off, 1 == registered and on */
37 static int nmi_enabled = 0;
40 static int oprofile_cpu_notifier(struct notifier_block *b, unsigned long action,
43 int cpu = (unsigned long)data;
47 smp_call_function_single(cpu, nmi_cpu_start, NULL, 0);
49 case CPU_DOWN_PREPARE:
50 smp_call_function_single(cpu, nmi_cpu_stop, NULL, 1);
56 static struct notifier_block oprofile_cpu_nb = {
57 .notifier_call = oprofile_cpu_notifier
63 static int nmi_suspend(struct sys_device *dev, pm_message_t state)
65 /* Only one CPU left, just stop that one */
71 static int nmi_resume(struct sys_device *dev)
78 static struct sysdev_class oprofile_sysclass = {
81 .suspend = nmi_suspend,
84 static struct sys_device device_oprofile = {
86 .cls = &oprofile_sysclass,
89 static int __init init_sysfs(void)
93 error = sysdev_class_register(&oprofile_sysclass);
95 error = sysdev_register(&device_oprofile);
99 static void exit_sysfs(void)
101 sysdev_unregister(&device_oprofile);
102 sysdev_class_unregister(&oprofile_sysclass);
106 #define init_sysfs() do { } while (0)
107 #define exit_sysfs() do { } while (0)
108 #endif /* CONFIG_PM */
110 static int profile_exceptions_notify(struct notifier_block *self,
111 unsigned long val, void *data)
113 struct die_args *args = (struct die_args *)data;
114 int ret = NOTIFY_DONE;
115 int cpu = smp_processor_id();
119 if (model->check_ctrs(args->regs, &per_cpu(cpu_msrs, cpu)))
128 static void nmi_cpu_save_registers(struct op_msrs *msrs)
130 unsigned int const nr_ctrs = model->num_counters;
131 unsigned int const nr_ctrls = model->num_controls;
132 struct op_msr *counters = msrs->counters;
133 struct op_msr *controls = msrs->controls;
136 for (i = 0; i < nr_ctrs; ++i) {
137 if (counters[i].addr) {
138 rdmsr(counters[i].addr,
139 counters[i].saved.low,
140 counters[i].saved.high);
144 for (i = 0; i < nr_ctrls; ++i) {
145 if (controls[i].addr) {
146 rdmsr(controls[i].addr,
147 controls[i].saved.low,
148 controls[i].saved.high);
153 static void nmi_save_registers(void *dummy)
155 int cpu = smp_processor_id();
156 struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
157 nmi_cpu_save_registers(msrs);
160 static void free_msrs(void)
163 for_each_possible_cpu(i) {
164 kfree(per_cpu(cpu_msrs, i).counters);
165 per_cpu(cpu_msrs, i).counters = NULL;
166 kfree(per_cpu(cpu_msrs, i).controls);
167 per_cpu(cpu_msrs, i).controls = NULL;
171 static int allocate_msrs(void)
174 size_t controls_size = sizeof(struct op_msr) * model->num_controls;
175 size_t counters_size = sizeof(struct op_msr) * model->num_counters;
178 for_each_possible_cpu(i) {
179 per_cpu(cpu_msrs, i).counters = kmalloc(counters_size,
181 if (!per_cpu(cpu_msrs, i).counters) {
185 per_cpu(cpu_msrs, i).controls = kmalloc(controls_size,
187 if (!per_cpu(cpu_msrs, i).controls) {
199 static void nmi_cpu_setup(void *dummy)
201 int cpu = smp_processor_id();
202 struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
203 spin_lock(&oprofilefs_lock);
204 model->setup_ctrs(msrs);
205 spin_unlock(&oprofilefs_lock);
206 per_cpu(saved_lvtpc, cpu) = apic_read(APIC_LVTPC);
207 apic_write(APIC_LVTPC, APIC_DM_NMI);
210 static struct notifier_block profile_exceptions_nb = {
211 .notifier_call = profile_exceptions_notify,
216 static int nmi_setup(void)
221 if (!allocate_msrs())
224 err = register_die_notifier(&profile_exceptions_nb);
230 /* We need to serialize save and setup for HT because the subset
231 * of msrs are distinct for save and setup operations
234 /* Assume saved/restored counters are the same on all CPUs */
235 model->fill_in_addresses(&per_cpu(cpu_msrs, 0));
236 for_each_possible_cpu(cpu) {
238 memcpy(per_cpu(cpu_msrs, cpu).counters,
239 per_cpu(cpu_msrs, 0).counters,
240 sizeof(struct op_msr) * model->num_counters);
242 memcpy(per_cpu(cpu_msrs, cpu).controls,
243 per_cpu(cpu_msrs, 0).controls,
244 sizeof(struct op_msr) * model->num_controls);
248 on_each_cpu(nmi_save_registers, NULL, 1);
249 on_each_cpu(nmi_cpu_setup, NULL, 1);
254 static void nmi_restore_registers(struct op_msrs *msrs)
256 unsigned int const nr_ctrs = model->num_counters;
257 unsigned int const nr_ctrls = model->num_controls;
258 struct op_msr *counters = msrs->counters;
259 struct op_msr *controls = msrs->controls;
262 for (i = 0; i < nr_ctrls; ++i) {
263 if (controls[i].addr) {
264 wrmsr(controls[i].addr,
265 controls[i].saved.low,
266 controls[i].saved.high);
270 for (i = 0; i < nr_ctrs; ++i) {
271 if (counters[i].addr) {
272 wrmsr(counters[i].addr,
273 counters[i].saved.low,
274 counters[i].saved.high);
279 static void nmi_cpu_shutdown(void *dummy)
282 int cpu = smp_processor_id();
283 struct op_msrs *msrs = &__get_cpu_var(cpu_msrs);
285 /* restoring APIC_LVTPC can trigger an apic error because the delivery
286 * mode and vector nr combination can be illegal. That's by design: on
287 * power on apic lvt contain a zero vector nr which are legal only for
288 * NMI delivery mode. So inhibit apic err before restoring lvtpc
290 v = apic_read(APIC_LVTERR);
291 apic_write(APIC_LVTERR, v | APIC_LVT_MASKED);
292 apic_write(APIC_LVTPC, per_cpu(saved_lvtpc, cpu));
293 apic_write(APIC_LVTERR, v);
294 nmi_restore_registers(msrs);
297 static void nmi_shutdown(void)
299 struct op_msrs *msrs;
302 on_each_cpu(nmi_cpu_shutdown, NULL, 1);
303 unregister_die_notifier(&profile_exceptions_nb);
304 msrs = &get_cpu_var(cpu_msrs);
305 model->shutdown(msrs);
307 put_cpu_var(cpu_msrs);
310 static void nmi_cpu_start(void *dummy)
312 struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs);
316 static int nmi_start(void)
318 on_each_cpu(nmi_cpu_start, NULL, 1);
322 static void nmi_cpu_stop(void *dummy)
324 struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs);
328 static void nmi_stop(void)
330 on_each_cpu(nmi_cpu_stop, NULL, 1);
333 struct op_counter_config counter_config[OP_MAX_COUNTER];
335 static int nmi_create_files(struct super_block *sb, struct dentry *root)
339 for (i = 0; i < model->num_counters; ++i) {
343 /* quick little hack to _not_ expose a counter if it is not
344 * available for use. This should protect userspace app.
345 * NOTE: assumes 1:1 mapping here (that counters are organized
346 * sequentially in their struct assignment).
348 if (unlikely(!avail_to_resrv_perfctr_nmi_bit(i)))
351 snprintf(buf, sizeof(buf), "%d", i);
352 dir = oprofilefs_mkdir(sb, root, buf);
353 oprofilefs_create_ulong(sb, dir, "enabled", &counter_config[i].enabled);
354 oprofilefs_create_ulong(sb, dir, "event", &counter_config[i].event);
355 oprofilefs_create_ulong(sb, dir, "count", &counter_config[i].count);
356 oprofilefs_create_ulong(sb, dir, "unit_mask", &counter_config[i].unit_mask);
357 oprofilefs_create_ulong(sb, dir, "kernel", &counter_config[i].kernel);
358 oprofilefs_create_ulong(sb, dir, "user", &counter_config[i].user);
365 module_param(p4force, int, 0);
367 static int __init p4_init(char **cpu_type)
369 __u8 cpu_model = boot_cpu_data.x86_model;
371 if (!p4force && (cpu_model > 6 || cpu_model == 5))
375 *cpu_type = "i386/p4";
379 switch (smp_num_siblings) {
381 *cpu_type = "i386/p4";
386 *cpu_type = "i386/p4-ht";
387 model = &op_p4_ht2_spec;
392 printk(KERN_INFO "oprofile: P4 HyperThreading detected with > 2 threads\n");
393 printk(KERN_INFO "oprofile: Reverting to timer mode.\n");
397 static int __init ppro_init(char **cpu_type)
399 __u8 cpu_model = boot_cpu_data.x86_model;
403 *cpu_type = "i386/ppro";
406 *cpu_type = "i386/pii";
409 *cpu_type = "i386/piii";
412 *cpu_type = "i386/p6_mobile";
415 *cpu_type = "i386/p6";
418 *cpu_type = "i386/core";
421 *cpu_type = "i386/core_2";
424 *cpu_type = "i386/core_2";
431 model = &op_ppro_spec;
435 /* in order to get sysfs right */
436 static int using_nmi;
438 int __init op_nmi_init(struct oprofile_operations *ops)
440 __u8 vendor = boot_cpu_data.x86_vendor;
441 __u8 family = boot_cpu_data.x86;
450 /* Needs to be at least an Athlon (or hammer in 32bit mode) */
456 model = &op_amd_spec;
457 cpu_type = "i386/athlon";
460 model = &op_amd_spec;
461 /* Actually it could be i386/hammer too, but give
462 user space an consistent name. */
463 cpu_type = "x86-64/hammer";
466 model = &op_amd_spec;
467 cpu_type = "x86-64/family10";
470 model = &op_amd_spec;
471 cpu_type = "x86-64/family11h";
476 case X86_VENDOR_INTEL:
480 if (!p4_init(&cpu_type))
484 /* A P6-class processor */
486 if (!ppro_init(&cpu_type))
500 register_cpu_notifier(&oprofile_cpu_nb);
502 /* default values, can be overwritten by model */
503 ops->create_files = nmi_create_files;
504 ops->setup = nmi_setup;
505 ops->shutdown = nmi_shutdown;
506 ops->start = nmi_start;
507 ops->stop = nmi_stop;
508 ops->cpu_type = cpu_type;
511 ret = model->init(ops);
517 printk(KERN_INFO "oprofile: using NMI interrupt.\n");
521 void op_nmi_exit(void)
526 unregister_cpu_notifier(&oprofile_cpu_nb);