4 * @remark Copyright 2002 OProfile authors
5 * @remark Read the file COPYING
7 * @author John Levon <levon@movementarian.org>
10 #include <linux/init.h>
11 #include <linux/notifier.h>
12 #include <linux/smp.h>
13 #include <linux/oprofile.h>
14 #include <linux/sysdev.h>
15 #include <linux/slab.h>
16 #include <linux/moduleparam.h>
21 #include "op_counter.h"
22 #include "op_x86_model.h"
24 static struct op_x86_model_spec const * model;
25 static struct op_msrs cpu_msrs[NR_CPUS];
26 static unsigned long saved_lvtpc[NR_CPUS];
28 static int nmi_start(void);
29 static void nmi_stop(void);
31 /* 0 == registered but off, 1 == registered and on */
32 static int nmi_enabled = 0;
36 static int nmi_suspend(struct sys_device *dev, pm_message_t state)
44 static int nmi_resume(struct sys_device *dev)
52 static struct sysdev_class oprofile_sysclass = {
53 set_kset_name("oprofile"),
55 .suspend = nmi_suspend,
59 static struct sys_device device_oprofile = {
61 .cls = &oprofile_sysclass,
65 static int __init init_driverfs(void)
68 if (!(error = sysdev_class_register(&oprofile_sysclass)))
69 error = sysdev_register(&device_oprofile);
74 static void exit_driverfs(void)
76 sysdev_unregister(&device_oprofile);
77 sysdev_class_unregister(&oprofile_sysclass);
81 #define init_driverfs() do { } while (0)
82 #define exit_driverfs() do { } while (0)
83 #endif /* CONFIG_PM */
86 static int nmi_callback(struct pt_regs * regs, int cpu)
88 return model->check_ctrs(regs, &cpu_msrs[cpu]);
92 static void nmi_cpu_save_registers(struct op_msrs * msrs)
94 unsigned int const nr_ctrs = model->num_counters;
95 unsigned int const nr_ctrls = model->num_controls;
96 struct op_msr * counters = msrs->counters;
97 struct op_msr * controls = msrs->controls;
100 for (i = 0; i < nr_ctrs; ++i) {
101 rdmsr(counters[i].addr,
102 counters[i].saved.low,
103 counters[i].saved.high);
106 for (i = 0; i < nr_ctrls; ++i) {
107 rdmsr(controls[i].addr,
108 controls[i].saved.low,
109 controls[i].saved.high);
114 static void nmi_save_registers(void * dummy)
116 int cpu = smp_processor_id();
117 struct op_msrs * msrs = &cpu_msrs[cpu];
118 model->fill_in_addresses(msrs);
119 nmi_cpu_save_registers(msrs);
123 static void free_msrs(void)
126 for_each_possible_cpu(i) {
127 kfree(cpu_msrs[i].counters);
128 cpu_msrs[i].counters = NULL;
129 kfree(cpu_msrs[i].controls);
130 cpu_msrs[i].controls = NULL;
135 static int allocate_msrs(void)
138 size_t controls_size = sizeof(struct op_msr) * model->num_controls;
139 size_t counters_size = sizeof(struct op_msr) * model->num_counters;
142 for_each_online_cpu(i) {
143 cpu_msrs[i].counters = kmalloc(counters_size, GFP_KERNEL);
144 if (!cpu_msrs[i].counters) {
148 cpu_msrs[i].controls = kmalloc(controls_size, GFP_KERNEL);
149 if (!cpu_msrs[i].controls) {
162 static void nmi_cpu_setup(void * dummy)
164 int cpu = smp_processor_id();
165 struct op_msrs * msrs = &cpu_msrs[cpu];
166 spin_lock(&oprofilefs_lock);
167 model->setup_ctrs(msrs);
168 spin_unlock(&oprofilefs_lock);
169 saved_lvtpc[cpu] = apic_read(APIC_LVTPC);
170 apic_write(APIC_LVTPC, APIC_DM_NMI);
174 static int nmi_setup(void)
176 if (!allocate_msrs())
179 /* We walk a thin line between law and rape here.
180 * We need to be careful to install our NMI handler
181 * without actually triggering any NMIs as this will
182 * break the core code horrifically.
184 if (reserve_lapic_nmi() < 0) {
188 /* We need to serialize save and setup for HT because the subset
189 * of msrs are distinct for save and setup operations
191 on_each_cpu(nmi_save_registers, NULL, 0, 1);
192 on_each_cpu(nmi_cpu_setup, NULL, 0, 1);
193 set_nmi_callback(nmi_callback);
199 static void nmi_restore_registers(struct op_msrs * msrs)
201 unsigned int const nr_ctrs = model->num_counters;
202 unsigned int const nr_ctrls = model->num_controls;
203 struct op_msr * counters = msrs->counters;
204 struct op_msr * controls = msrs->controls;
207 for (i = 0; i < nr_ctrls; ++i) {
208 wrmsr(controls[i].addr,
209 controls[i].saved.low,
210 controls[i].saved.high);
213 for (i = 0; i < nr_ctrs; ++i) {
214 wrmsr(counters[i].addr,
215 counters[i].saved.low,
216 counters[i].saved.high);
221 static void nmi_cpu_shutdown(void * dummy)
224 int cpu = smp_processor_id();
225 struct op_msrs * msrs = &cpu_msrs[cpu];
227 /* restoring APIC_LVTPC can trigger an apic error because the delivery
228 * mode and vector nr combination can be illegal. That's by design: on
229 * power on apic lvt contain a zero vector nr which are legal only for
230 * NMI delivery mode. So inhibit apic err before restoring lvtpc
232 v = apic_read(APIC_LVTERR);
233 apic_write(APIC_LVTERR, v | APIC_LVT_MASKED);
234 apic_write(APIC_LVTPC, saved_lvtpc[cpu]);
235 apic_write(APIC_LVTERR, v);
236 nmi_restore_registers(msrs);
240 static void nmi_shutdown(void)
243 on_each_cpu(nmi_cpu_shutdown, NULL, 0, 1);
244 unset_nmi_callback();
250 static void nmi_cpu_start(void * dummy)
252 struct op_msrs const * msrs = &cpu_msrs[smp_processor_id()];
257 static int nmi_start(void)
259 on_each_cpu(nmi_cpu_start, NULL, 0, 1);
264 static void nmi_cpu_stop(void * dummy)
266 struct op_msrs const * msrs = &cpu_msrs[smp_processor_id()];
271 static void nmi_stop(void)
273 on_each_cpu(nmi_cpu_stop, NULL, 0, 1);
277 struct op_counter_config counter_config[OP_MAX_COUNTER];
279 static int nmi_create_files(struct super_block * sb, struct dentry * root)
283 for (i = 0; i < model->num_counters; ++i) {
287 snprintf(buf, sizeof(buf), "%d", i);
288 dir = oprofilefs_mkdir(sb, root, buf);
289 oprofilefs_create_ulong(sb, dir, "enabled", &counter_config[i].enabled);
290 oprofilefs_create_ulong(sb, dir, "event", &counter_config[i].event);
291 oprofilefs_create_ulong(sb, dir, "count", &counter_config[i].count);
292 oprofilefs_create_ulong(sb, dir, "unit_mask", &counter_config[i].unit_mask);
293 oprofilefs_create_ulong(sb, dir, "kernel", &counter_config[i].kernel);
294 oprofilefs_create_ulong(sb, dir, "user", &counter_config[i].user);
301 module_param(p4force, int, 0);
303 static int __init p4_init(char ** cpu_type)
305 __u8 cpu_model = boot_cpu_data.x86_model;
307 if (!p4force && (cpu_model > 6 || cpu_model == 5))
311 *cpu_type = "i386/p4";
315 switch (smp_num_siblings) {
317 *cpu_type = "i386/p4";
322 *cpu_type = "i386/p4-ht";
323 model = &op_p4_ht2_spec;
328 printk(KERN_INFO "oprofile: P4 HyperThreading detected with > 2 threads\n");
329 printk(KERN_INFO "oprofile: Reverting to timer mode.\n");
334 static int __init ppro_init(char ** cpu_type)
336 __u8 cpu_model = boot_cpu_data.x86_model;
339 *cpu_type = "i386/core";
340 else if (cpu_model > 0xd)
342 else if (cpu_model == 9) {
343 *cpu_type = "i386/p6_mobile";
344 } else if (cpu_model > 5) {
345 *cpu_type = "i386/piii";
346 } else if (cpu_model > 2) {
347 *cpu_type = "i386/pii";
349 *cpu_type = "i386/ppro";
352 model = &op_ppro_spec;
356 /* in order to get driverfs right */
357 static int using_nmi;
359 int __init op_nmi_init(struct oprofile_operations *ops)
361 __u8 vendor = boot_cpu_data.x86_vendor;
362 __u8 family = boot_cpu_data.x86;
370 /* Needs to be at least an Athlon (or hammer in 32bit mode) */
376 model = &op_athlon_spec;
377 cpu_type = "i386/athlon";
380 model = &op_athlon_spec;
381 /* Actually it could be i386/hammer too, but give
382 user space an consistent name. */
383 cpu_type = "x86-64/hammer";
388 case X86_VENDOR_INTEL:
392 if (!p4_init(&cpu_type))
396 /* A P6-class processor */
398 if (!ppro_init(&cpu_type))
413 ops->create_files = nmi_create_files;
414 ops->setup = nmi_setup;
415 ops->shutdown = nmi_shutdown;
416 ops->start = nmi_start;
417 ops->stop = nmi_stop;
418 ops->cpu_type = cpu_type;
419 printk(KERN_INFO "oprofile: using NMI interrupt.\n");
424 void op_nmi_exit(void)