4 * @remark Copyright 2002 OProfile authors
5 * @remark Read the file COPYING
7 * @author John Levon <levon@movementarian.org>
10 #include <linux/init.h>
11 #include <linux/notifier.h>
12 #include <linux/smp.h>
13 #include <linux/oprofile.h>
14 #include <linux/sysdev.h>
15 #include <linux/slab.h>
20 #include "op_counter.h"
21 #include "op_x86_model.h"
23 static struct op_x86_model_spec const * model;
24 static struct op_msrs cpu_msrs[NR_CPUS];
25 static unsigned long saved_lvtpc[NR_CPUS];
27 static int nmi_start(void);
28 static void nmi_stop(void);
30 /* 0 == registered but off, 1 == registered and on */
31 static int nmi_enabled = 0;
35 static int nmi_suspend(struct sys_device *dev, pm_message_t state)
43 static int nmi_resume(struct sys_device *dev)
51 static struct sysdev_class oprofile_sysclass = {
52 set_kset_name("oprofile"),
54 .suspend = nmi_suspend,
58 static struct sys_device device_oprofile = {
60 .cls = &oprofile_sysclass,
64 static int __init init_driverfs(void)
67 if (!(error = sysdev_class_register(&oprofile_sysclass)))
68 error = sysdev_register(&device_oprofile);
73 static void exit_driverfs(void)
75 sysdev_unregister(&device_oprofile);
76 sysdev_class_unregister(&oprofile_sysclass);
80 #define init_driverfs() do { } while (0)
81 #define exit_driverfs() do { } while (0)
82 #endif /* CONFIG_PM */
85 static int nmi_callback(struct pt_regs * regs, int cpu)
87 return model->check_ctrs(regs, &cpu_msrs[cpu]);
91 static void nmi_cpu_save_registers(struct op_msrs * msrs)
93 unsigned int const nr_ctrs = model->num_counters;
94 unsigned int const nr_ctrls = model->num_controls;
95 struct op_msr * counters = msrs->counters;
96 struct op_msr * controls = msrs->controls;
99 for (i = 0; i < nr_ctrs; ++i) {
100 rdmsr(counters[i].addr,
101 counters[i].saved.low,
102 counters[i].saved.high);
105 for (i = 0; i < nr_ctrls; ++i) {
106 rdmsr(controls[i].addr,
107 controls[i].saved.low,
108 controls[i].saved.high);
113 static void nmi_save_registers(void * dummy)
115 int cpu = smp_processor_id();
116 struct op_msrs * msrs = &cpu_msrs[cpu];
117 model->fill_in_addresses(msrs);
118 nmi_cpu_save_registers(msrs);
122 static void free_msrs(void)
125 for (i = 0; i < NR_CPUS; ++i) {
126 kfree(cpu_msrs[i].counters);
127 cpu_msrs[i].counters = NULL;
128 kfree(cpu_msrs[i].controls);
129 cpu_msrs[i].controls = NULL;
134 static int allocate_msrs(void)
137 size_t controls_size = sizeof(struct op_msr) * model->num_controls;
138 size_t counters_size = sizeof(struct op_msr) * model->num_counters;
141 for (i = 0; i < NR_CPUS; ++i) {
145 cpu_msrs[i].counters = kmalloc(counters_size, GFP_KERNEL);
146 if (!cpu_msrs[i].counters) {
150 cpu_msrs[i].controls = kmalloc(controls_size, GFP_KERNEL);
151 if (!cpu_msrs[i].controls) {
164 static void nmi_cpu_setup(void * dummy)
166 int cpu = smp_processor_id();
167 struct op_msrs * msrs = &cpu_msrs[cpu];
168 spin_lock(&oprofilefs_lock);
169 model->setup_ctrs(msrs);
170 spin_unlock(&oprofilefs_lock);
171 saved_lvtpc[cpu] = apic_read(APIC_LVTPC);
172 apic_write(APIC_LVTPC, APIC_DM_NMI);
176 static int nmi_setup(void)
178 if (!allocate_msrs())
181 /* We walk a thin line between law and rape here.
182 * We need to be careful to install our NMI handler
183 * without actually triggering any NMIs as this will
184 * break the core code horrifically.
186 if (reserve_lapic_nmi() < 0) {
190 /* We need to serialize save and setup for HT because the subset
191 * of msrs are distinct for save and setup operations
193 on_each_cpu(nmi_save_registers, NULL, 0, 1);
194 on_each_cpu(nmi_cpu_setup, NULL, 0, 1);
195 set_nmi_callback(nmi_callback);
201 static void nmi_restore_registers(struct op_msrs * msrs)
203 unsigned int const nr_ctrs = model->num_counters;
204 unsigned int const nr_ctrls = model->num_controls;
205 struct op_msr * counters = msrs->counters;
206 struct op_msr * controls = msrs->controls;
209 for (i = 0; i < nr_ctrls; ++i) {
210 wrmsr(controls[i].addr,
211 controls[i].saved.low,
212 controls[i].saved.high);
215 for (i = 0; i < nr_ctrs; ++i) {
216 wrmsr(counters[i].addr,
217 counters[i].saved.low,
218 counters[i].saved.high);
223 static void nmi_cpu_shutdown(void * dummy)
226 int cpu = smp_processor_id();
227 struct op_msrs * msrs = &cpu_msrs[cpu];
229 /* restoring APIC_LVTPC can trigger an apic error because the delivery
230 * mode and vector nr combination can be illegal. That's by design: on
231 * power on apic lvt contain a zero vector nr which are legal only for
232 * NMI delivery mode. So inhibit apic err before restoring lvtpc
234 v = apic_read(APIC_LVTERR);
235 apic_write(APIC_LVTERR, v | APIC_LVT_MASKED);
236 apic_write(APIC_LVTPC, saved_lvtpc[cpu]);
237 apic_write(APIC_LVTERR, v);
238 nmi_restore_registers(msrs);
242 static void nmi_shutdown(void)
245 on_each_cpu(nmi_cpu_shutdown, NULL, 0, 1);
246 unset_nmi_callback();
252 static void nmi_cpu_start(void * dummy)
254 struct op_msrs const * msrs = &cpu_msrs[smp_processor_id()];
259 static int nmi_start(void)
261 on_each_cpu(nmi_cpu_start, NULL, 0, 1);
266 static void nmi_cpu_stop(void * dummy)
268 struct op_msrs const * msrs = &cpu_msrs[smp_processor_id()];
273 static void nmi_stop(void)
275 on_each_cpu(nmi_cpu_stop, NULL, 0, 1);
279 struct op_counter_config counter_config[OP_MAX_COUNTER];
281 static int nmi_create_files(struct super_block * sb, struct dentry * root)
285 for (i = 0; i < model->num_counters; ++i) {
289 snprintf(buf, 2, "%d", i);
290 dir = oprofilefs_mkdir(sb, root, buf);
291 oprofilefs_create_ulong(sb, dir, "enabled", &counter_config[i].enabled);
292 oprofilefs_create_ulong(sb, dir, "event", &counter_config[i].event);
293 oprofilefs_create_ulong(sb, dir, "count", &counter_config[i].count);
294 oprofilefs_create_ulong(sb, dir, "unit_mask", &counter_config[i].unit_mask);
295 oprofilefs_create_ulong(sb, dir, "kernel", &counter_config[i].kernel);
296 oprofilefs_create_ulong(sb, dir, "user", &counter_config[i].user);
303 static int __init p4_init(char ** cpu_type)
305 __u8 cpu_model = boot_cpu_data.x86_model;
311 *cpu_type = "i386/p4";
315 switch (smp_num_siblings) {
317 *cpu_type = "i386/p4";
322 *cpu_type = "i386/p4-ht";
323 model = &op_p4_ht2_spec;
328 printk(KERN_INFO "oprofile: P4 HyperThreading detected with > 2 threads\n");
329 printk(KERN_INFO "oprofile: Reverting to timer mode.\n");
334 static int __init ppro_init(char ** cpu_type)
336 __u8 cpu_model = boot_cpu_data.x86_model;
341 if (cpu_model == 9) {
342 *cpu_type = "i386/p6_mobile";
343 } else if (cpu_model > 5) {
344 *cpu_type = "i386/piii";
345 } else if (cpu_model > 2) {
346 *cpu_type = "i386/pii";
348 *cpu_type = "i386/ppro";
351 model = &op_ppro_spec;
355 /* in order to get driverfs right */
356 static int using_nmi;
358 int __init op_nmi_init(struct oprofile_operations *ops)
360 __u8 vendor = boot_cpu_data.x86_vendor;
361 __u8 family = boot_cpu_data.x86;
369 /* Needs to be at least an Athlon (or hammer in 32bit mode) */
375 model = &op_athlon_spec;
376 cpu_type = "i386/athlon";
379 model = &op_athlon_spec;
380 /* Actually it could be i386/hammer too, but give
381 user space an consistent name. */
382 cpu_type = "x86-64/hammer";
387 case X86_VENDOR_INTEL:
391 if (!p4_init(&cpu_type))
395 /* A P6-class processor */
397 if (!ppro_init(&cpu_type))
412 ops->create_files = nmi_create_files;
413 ops->setup = nmi_setup;
414 ops->shutdown = nmi_shutdown;
415 ops->start = nmi_start;
416 ops->stop = nmi_stop;
417 ops->cpu_type = cpu_type;
418 printk(KERN_INFO "oprofile: using NMI interrupt.\n");
423 void op_nmi_exit(void)