Merge branch 'master' of /home/trondmy/repositories/git/linux-2.6/
[linux-2.6] / arch / i386 / kernel / cpu / perfctr-watchdog.c
1 /* local apic based NMI watchdog for various CPUs.
2    This file also handles reservation of performance counters for coordination
3    with other users (like oprofile).
4
5    Note that these events normally don't tick when the CPU idles. This means
6    the frequency varies with CPU load.
7
8    Original code for K7/P6 written by Keith Owens */
9
10 #include <linux/percpu.h>
11 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/bitops.h>
14 #include <linux/smp.h>
15 #include <linux/nmi.h>
16 #include <asm/apic.h>
17 #include <asm/intel_arch_perfmon.h>
18
19 struct nmi_watchdog_ctlblk {
20         unsigned int cccr_msr;
21         unsigned int perfctr_msr;  /* the MSR to reset in NMI handler */
22         unsigned int evntsel_msr;  /* the MSR to select the events to handle */
23 };
24
25 /* Interface defining a CPU specific perfctr watchdog */
26 struct wd_ops {
27         int (*reserve)(void);
28         void (*unreserve)(void);
29         int (*setup)(unsigned nmi_hz);
30         void (*rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
31         void (*stop)(void *);
32         unsigned perfctr;
33         unsigned evntsel;
34         u64 checkbit;
35 };
36
37 static struct wd_ops *wd_ops;
38
39 /* this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's
40  * offset from MSR_P4_BSU_ESCR0.  It will be the max for all platforms (for now)
41  */
42 #define NMI_MAX_COUNTER_BITS 66
43
44 /* perfctr_nmi_owner tracks the ownership of the perfctr registers:
45  * evtsel_nmi_owner tracks the ownership of the event selection
46  * - different performance counters/ event selection may be reserved for
47  *   different subsystems this reservation system just tries to coordinate
48  *   things a little
49  */
50 static DECLARE_BITMAP(perfctr_nmi_owner, NMI_MAX_COUNTER_BITS);
51 static DECLARE_BITMAP(evntsel_nmi_owner, NMI_MAX_COUNTER_BITS);
52
53 static DEFINE_PER_CPU(struct nmi_watchdog_ctlblk, nmi_watchdog_ctlblk);
54
55 /* converts an msr to an appropriate reservation bit */
56 static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr)
57 {
58         return wd_ops ? msr - wd_ops->perfctr : 0;
59 }
60
61 /* converts an msr to an appropriate reservation bit */
62 /* returns the bit offset of the event selection register */
63 static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr)
64 {
65         return wd_ops ? msr - wd_ops->evntsel : 0;
66 }
67
68 /* checks for a bit availability (hack for oprofile) */
69 int avail_to_resrv_perfctr_nmi_bit(unsigned int counter)
70 {
71         BUG_ON(counter > NMI_MAX_COUNTER_BITS);
72
73         return (!test_bit(counter, perfctr_nmi_owner));
74 }
75
76 /* checks the an msr for availability */
77 int avail_to_resrv_perfctr_nmi(unsigned int msr)
78 {
79         unsigned int counter;
80
81         counter = nmi_perfctr_msr_to_bit(msr);
82         BUG_ON(counter > NMI_MAX_COUNTER_BITS);
83
84         return (!test_bit(counter, perfctr_nmi_owner));
85 }
86
87 int reserve_perfctr_nmi(unsigned int msr)
88 {
89         unsigned int counter;
90
91         counter = nmi_perfctr_msr_to_bit(msr);
92         BUG_ON(counter > NMI_MAX_COUNTER_BITS);
93
94         if (!test_and_set_bit(counter, perfctr_nmi_owner))
95                 return 1;
96         return 0;
97 }
98
99 void release_perfctr_nmi(unsigned int msr)
100 {
101         unsigned int counter;
102
103         counter = nmi_perfctr_msr_to_bit(msr);
104         BUG_ON(counter > NMI_MAX_COUNTER_BITS);
105
106         clear_bit(counter, perfctr_nmi_owner);
107 }
108
109 int reserve_evntsel_nmi(unsigned int msr)
110 {
111         unsigned int counter;
112
113         counter = nmi_evntsel_msr_to_bit(msr);
114         BUG_ON(counter > NMI_MAX_COUNTER_BITS);
115
116         if (!test_and_set_bit(counter, evntsel_nmi_owner))
117                 return 1;
118         return 0;
119 }
120
121 void release_evntsel_nmi(unsigned int msr)
122 {
123         unsigned int counter;
124
125         counter = nmi_evntsel_msr_to_bit(msr);
126         BUG_ON(counter > NMI_MAX_COUNTER_BITS);
127
128         clear_bit(counter, evntsel_nmi_owner);
129 }
130
131 EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi);
132 EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit);
133 EXPORT_SYMBOL(reserve_perfctr_nmi);
134 EXPORT_SYMBOL(release_perfctr_nmi);
135 EXPORT_SYMBOL(reserve_evntsel_nmi);
136 EXPORT_SYMBOL(release_evntsel_nmi);
137
138 void disable_lapic_nmi_watchdog(void)
139 {
140         BUG_ON(nmi_watchdog != NMI_LOCAL_APIC);
141
142         if (atomic_read(&nmi_active) <= 0)
143                 return;
144
145         on_each_cpu(wd_ops->stop, NULL, 0, 1);
146         wd_ops->unreserve();
147
148         BUG_ON(atomic_read(&nmi_active) != 0);
149 }
150
151 void enable_lapic_nmi_watchdog(void)
152 {
153         BUG_ON(nmi_watchdog != NMI_LOCAL_APIC);
154
155         /* are we already enabled */
156         if (atomic_read(&nmi_active) != 0)
157                 return;
158
159         /* are we lapic aware */
160         if (!wd_ops)
161                 return;
162         if (!wd_ops->reserve()) {
163                 printk(KERN_ERR "NMI watchdog: cannot reserve perfctrs\n");
164                 return;
165         }
166
167         on_each_cpu(setup_apic_nmi_watchdog, NULL, 0, 1);
168         touch_nmi_watchdog();
169 }
170
171 /*
172  * Activate the NMI watchdog via the local APIC.
173  */
174
175 static unsigned int adjust_for_32bit_ctr(unsigned int hz)
176 {
177         u64 counter_val;
178         unsigned int retval = hz;
179
180         /*
181          * On Intel CPUs with P6/ARCH_PERFMON only 32 bits in the counter
182          * are writable, with higher bits sign extending from bit 31.
183          * So, we can only program the counter with 31 bit values and
184          * 32nd bit should be 1, for 33.. to be 1.
185          * Find the appropriate nmi_hz
186          */
187         counter_val = (u64)cpu_khz * 1000;
188         do_div(counter_val, retval);
189         if (counter_val > 0x7fffffffULL) {
190                 u64 count = (u64)cpu_khz * 1000;
191                 do_div(count, 0x7fffffffUL);
192                 retval = count + 1;
193         }
194         return retval;
195 }
196
197 static void
198 write_watchdog_counter(unsigned int perfctr_msr, const char *descr, unsigned nmi_hz)
199 {
200         u64 count = (u64)cpu_khz * 1000;
201
202         do_div(count, nmi_hz);
203         if(descr)
204                 Dprintk("setting %s to -0x%08Lx\n", descr, count);
205         wrmsrl(perfctr_msr, 0 - count);
206 }
207
208 static void write_watchdog_counter32(unsigned int perfctr_msr,
209                 const char *descr, unsigned nmi_hz)
210 {
211         u64 count = (u64)cpu_khz * 1000;
212
213         do_div(count, nmi_hz);
214         if(descr)
215                 Dprintk("setting %s to -0x%08Lx\n", descr, count);
216         wrmsr(perfctr_msr, (u32)(-count), 0);
217 }
218
219 /* AMD K7/K8/Family10h/Family11h support. AMD keeps this interface
220    nicely stable so there is not much variety */
221
222 #define K7_EVNTSEL_ENABLE       (1 << 22)
223 #define K7_EVNTSEL_INT          (1 << 20)
224 #define K7_EVNTSEL_OS           (1 << 17)
225 #define K7_EVNTSEL_USR          (1 << 16)
226 #define K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING    0x76
227 #define K7_NMI_EVENT            K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING
228
229 static int setup_k7_watchdog(unsigned nmi_hz)
230 {
231         unsigned int perfctr_msr, evntsel_msr;
232         unsigned int evntsel;
233         struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
234
235         perfctr_msr = MSR_K7_PERFCTR0;
236         evntsel_msr = MSR_K7_EVNTSEL0;
237
238         wrmsrl(perfctr_msr, 0UL);
239
240         evntsel = K7_EVNTSEL_INT
241                 | K7_EVNTSEL_OS
242                 | K7_EVNTSEL_USR
243                 | K7_NMI_EVENT;
244
245         /* setup the timer */
246         wrmsr(evntsel_msr, evntsel, 0);
247         write_watchdog_counter(perfctr_msr, "K7_PERFCTR0",nmi_hz);
248         apic_write(APIC_LVTPC, APIC_DM_NMI);
249         evntsel |= K7_EVNTSEL_ENABLE;
250         wrmsr(evntsel_msr, evntsel, 0);
251
252         wd->perfctr_msr = perfctr_msr;
253         wd->evntsel_msr = evntsel_msr;
254         wd->cccr_msr = 0;  //unused
255         return 1;
256 }
257
258 static void single_msr_stop_watchdog(void *arg)
259 {
260         struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
261
262         wrmsr(wd->evntsel_msr, 0, 0);
263 }
264
265 static int single_msr_reserve(void)
266 {
267         if (!reserve_perfctr_nmi(wd_ops->perfctr))
268                 return 0;
269
270         if (!reserve_evntsel_nmi(wd_ops->evntsel)) {
271                 release_perfctr_nmi(wd_ops->perfctr);
272                 return 0;
273         }
274         return 1;
275 }
276
277 static void single_msr_unreserve(void)
278 {
279         release_evntsel_nmi(wd_ops->perfctr);
280         release_perfctr_nmi(wd_ops->evntsel);
281 }
282
283 static void single_msr_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz)
284 {
285         /* start the cycle over again */
286         write_watchdog_counter(wd->perfctr_msr, NULL, nmi_hz);
287 }
288
289 static struct wd_ops k7_wd_ops = {
290         .reserve = single_msr_reserve,
291         .unreserve = single_msr_unreserve,
292         .setup = setup_k7_watchdog,
293         .rearm = single_msr_rearm,
294         .stop = single_msr_stop_watchdog,
295         .perfctr = MSR_K7_PERFCTR0,
296         .evntsel = MSR_K7_EVNTSEL0,
297         .checkbit = 1ULL<<63,
298 };
299
300 /* Intel Model 6 (PPro+,P2,P3,P-M,Core1) */
301
302 #define P6_EVNTSEL0_ENABLE      (1 << 22)
303 #define P6_EVNTSEL_INT          (1 << 20)
304 #define P6_EVNTSEL_OS           (1 << 17)
305 #define P6_EVNTSEL_USR          (1 << 16)
306 #define P6_EVENT_CPU_CLOCKS_NOT_HALTED  0x79
307 #define P6_NMI_EVENT            P6_EVENT_CPU_CLOCKS_NOT_HALTED
308
309 static int setup_p6_watchdog(unsigned nmi_hz)
310 {
311         unsigned int perfctr_msr, evntsel_msr;
312         unsigned int evntsel;
313         struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
314
315         perfctr_msr = MSR_P6_PERFCTR0;
316         evntsel_msr = MSR_P6_EVNTSEL0;
317
318         wrmsrl(perfctr_msr, 0UL);
319
320         evntsel = P6_EVNTSEL_INT
321                 | P6_EVNTSEL_OS
322                 | P6_EVNTSEL_USR
323                 | P6_NMI_EVENT;
324
325         /* setup the timer */
326         wrmsr(evntsel_msr, evntsel, 0);
327         nmi_hz = adjust_for_32bit_ctr(nmi_hz);
328         write_watchdog_counter32(perfctr_msr, "P6_PERFCTR0",nmi_hz);
329         apic_write(APIC_LVTPC, APIC_DM_NMI);
330         evntsel |= P6_EVNTSEL0_ENABLE;
331         wrmsr(evntsel_msr, evntsel, 0);
332
333         wd->perfctr_msr = perfctr_msr;
334         wd->evntsel_msr = evntsel_msr;
335         wd->cccr_msr = 0;  //unused
336         return 1;
337 }
338
339 static void p6_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz)
340 {
341         /* P6 based Pentium M need to re-unmask
342          * the apic vector but it doesn't hurt
343          * other P6 variant.
344          * ArchPerfom/Core Duo also needs this */
345         apic_write(APIC_LVTPC, APIC_DM_NMI);
346         /* P6/ARCH_PERFMON has 32 bit counter write */
347         write_watchdog_counter32(wd->perfctr_msr, NULL,nmi_hz);
348 }
349
350 static struct wd_ops p6_wd_ops = {
351         .reserve = single_msr_reserve,
352         .unreserve = single_msr_unreserve,
353         .setup = setup_p6_watchdog,
354         .rearm = p6_rearm,
355         .stop = single_msr_stop_watchdog,
356         .perfctr = MSR_P6_PERFCTR0,
357         .evntsel = MSR_P6_EVNTSEL0,
358         .checkbit = 1ULL<<39,
359 };
360
361 /* Intel P4 performance counters. By far the most complicated of all. */
362
363 #define MSR_P4_MISC_ENABLE_PERF_AVAIL   (1<<7)
364 #define P4_ESCR_EVENT_SELECT(N) ((N)<<25)
365 #define P4_ESCR_OS              (1<<3)
366 #define P4_ESCR_USR             (1<<2)
367 #define P4_CCCR_OVF_PMI0        (1<<26)
368 #define P4_CCCR_OVF_PMI1        (1<<27)
369 #define P4_CCCR_THRESHOLD(N)    ((N)<<20)
370 #define P4_CCCR_COMPLEMENT      (1<<19)
371 #define P4_CCCR_COMPARE         (1<<18)
372 #define P4_CCCR_REQUIRED        (3<<16)
373 #define P4_CCCR_ESCR_SELECT(N)  ((N)<<13)
374 #define P4_CCCR_ENABLE          (1<<12)
375 #define P4_CCCR_OVF             (1<<31)
376
377 /* Set up IQ_COUNTER0 to behave like a clock, by having IQ_CCCR0 filter
378    CRU_ESCR0 (with any non-null event selector) through a complemented
379    max threshold. [IA32-Vol3, Section 14.9.9] */
380
381 static int setup_p4_watchdog(unsigned nmi_hz)
382 {
383         unsigned int perfctr_msr, evntsel_msr, cccr_msr;
384         unsigned int evntsel, cccr_val;
385         unsigned int misc_enable, dummy;
386         unsigned int ht_num;
387         struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
388
389         rdmsr(MSR_IA32_MISC_ENABLE, misc_enable, dummy);
390         if (!(misc_enable & MSR_P4_MISC_ENABLE_PERF_AVAIL))
391                 return 0;
392
393 #ifdef CONFIG_SMP
394         /* detect which hyperthread we are on */
395         if (smp_num_siblings == 2) {
396                 unsigned int ebx, apicid;
397
398                 ebx = cpuid_ebx(1);
399                 apicid = (ebx >> 24) & 0xff;
400                 ht_num = apicid & 1;
401         } else
402 #endif
403                 ht_num = 0;
404
405         /* performance counters are shared resources
406          * assign each hyperthread its own set
407          * (re-use the ESCR0 register, seems safe
408          * and keeps the cccr_val the same)
409          */
410         if (!ht_num) {
411                 /* logical cpu 0 */
412                 perfctr_msr = MSR_P4_IQ_PERFCTR0;
413                 evntsel_msr = MSR_P4_CRU_ESCR0;
414                 cccr_msr = MSR_P4_IQ_CCCR0;
415                 cccr_val = P4_CCCR_OVF_PMI0 | P4_CCCR_ESCR_SELECT(4);
416         } else {
417                 /* logical cpu 1 */
418                 perfctr_msr = MSR_P4_IQ_PERFCTR1;
419                 evntsel_msr = MSR_P4_CRU_ESCR0;
420                 cccr_msr = MSR_P4_IQ_CCCR1;
421                 cccr_val = P4_CCCR_OVF_PMI1 | P4_CCCR_ESCR_SELECT(4);
422         }
423
424         evntsel = P4_ESCR_EVENT_SELECT(0x3F)
425                 | P4_ESCR_OS
426                 | P4_ESCR_USR;
427
428         cccr_val |= P4_CCCR_THRESHOLD(15)
429                  | P4_CCCR_COMPLEMENT
430                  | P4_CCCR_COMPARE
431                  | P4_CCCR_REQUIRED;
432
433         wrmsr(evntsel_msr, evntsel, 0);
434         wrmsr(cccr_msr, cccr_val, 0);
435         write_watchdog_counter(perfctr_msr, "P4_IQ_COUNTER0", nmi_hz);
436         apic_write(APIC_LVTPC, APIC_DM_NMI);
437         cccr_val |= P4_CCCR_ENABLE;
438         wrmsr(cccr_msr, cccr_val, 0);
439         wd->perfctr_msr = perfctr_msr;
440         wd->evntsel_msr = evntsel_msr;
441         wd->cccr_msr = cccr_msr;
442         return 1;
443 }
444
445 static void stop_p4_watchdog(void *arg)
446 {
447         struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
448         wrmsr(wd->cccr_msr, 0, 0);
449         wrmsr(wd->evntsel_msr, 0, 0);
450 }
451
452 static int p4_reserve(void)
453 {
454         if (!reserve_perfctr_nmi(MSR_P4_IQ_PERFCTR0))
455                 return 0;
456 #ifdef CONFIG_SMP
457         if (smp_num_siblings > 1 && !reserve_perfctr_nmi(MSR_P4_IQ_PERFCTR1))
458                 goto fail1;
459 #endif
460         if (!reserve_evntsel_nmi(MSR_P4_CRU_ESCR0))
461                 goto fail2;
462         /* RED-PEN why is ESCR1 not reserved here? */
463         return 1;
464  fail2:
465 #ifdef CONFIG_SMP
466         if (smp_num_siblings > 1)
467                 release_perfctr_nmi(MSR_P4_IQ_PERFCTR1);
468  fail1:
469 #endif
470         release_perfctr_nmi(MSR_P4_IQ_PERFCTR0);
471         return 0;
472 }
473
474 static void p4_unreserve(void)
475 {
476 #ifdef CONFIG_SMP
477         if (smp_num_siblings > 1)
478                 release_evntsel_nmi(MSR_P4_IQ_PERFCTR1);
479 #endif
480         release_evntsel_nmi(MSR_P4_IQ_PERFCTR0);
481         release_perfctr_nmi(MSR_P4_CRU_ESCR0);
482 }
483
484 static void p4_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz)
485 {
486         unsigned dummy;
487         /*
488          * P4 quirks:
489          * - An overflown perfctr will assert its interrupt
490          *   until the OVF flag in its CCCR is cleared.
491          * - LVTPC is masked on interrupt and must be
492          *   unmasked by the LVTPC handler.
493          */
494         rdmsrl(wd->cccr_msr, dummy);
495         dummy &= ~P4_CCCR_OVF;
496         wrmsrl(wd->cccr_msr, dummy);
497         apic_write(APIC_LVTPC, APIC_DM_NMI);
498         /* start the cycle over again */
499         write_watchdog_counter(wd->perfctr_msr, NULL, nmi_hz);
500 }
501
502 static struct wd_ops p4_wd_ops = {
503         .reserve = p4_reserve,
504         .unreserve = p4_unreserve,
505         .setup = setup_p4_watchdog,
506         .rearm = p4_rearm,
507         .stop = stop_p4_watchdog,
508         /* RED-PEN this is wrong for the other sibling */
509         .perfctr = MSR_P4_BPU_PERFCTR0,
510         .evntsel = MSR_P4_BSU_ESCR0,
511         .checkbit = 1ULL<<39,
512 };
513
514 /* Watchdog using the Intel architected PerfMon. Used for Core2 and hopefully
515    all future Intel CPUs. */
516
517 #define ARCH_PERFMON_NMI_EVENT_SEL      ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
518 #define ARCH_PERFMON_NMI_EVENT_UMASK    ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
519
520 static int setup_intel_arch_watchdog(unsigned nmi_hz)
521 {
522         unsigned int ebx;
523         union cpuid10_eax eax;
524         unsigned int unused;
525         unsigned int perfctr_msr, evntsel_msr;
526         unsigned int evntsel;
527         struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
528
529         /*
530          * Check whether the Architectural PerfMon supports
531          * Unhalted Core Cycles Event or not.
532          * NOTE: Corresponding bit = 0 in ebx indicates event present.
533          */
534         cpuid(10, &(eax.full), &ebx, &unused, &unused);
535         if ((eax.split.mask_length < (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX+1)) ||
536             (ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT))
537                 return 0;
538
539         perfctr_msr = MSR_ARCH_PERFMON_PERFCTR1;
540         evntsel_msr = MSR_ARCH_PERFMON_EVENTSEL1;
541
542         wrmsrl(perfctr_msr, 0UL);
543
544         evntsel = ARCH_PERFMON_EVENTSEL_INT
545                 | ARCH_PERFMON_EVENTSEL_OS
546                 | ARCH_PERFMON_EVENTSEL_USR
547                 | ARCH_PERFMON_NMI_EVENT_SEL
548                 | ARCH_PERFMON_NMI_EVENT_UMASK;
549
550         /* setup the timer */
551         wrmsr(evntsel_msr, evntsel, 0);
552         nmi_hz = adjust_for_32bit_ctr(nmi_hz);
553         write_watchdog_counter32(perfctr_msr, "INTEL_ARCH_PERFCTR0", nmi_hz);
554         apic_write(APIC_LVTPC, APIC_DM_NMI);
555         evntsel |= ARCH_PERFMON_EVENTSEL0_ENABLE;
556         wrmsr(evntsel_msr, evntsel, 0);
557
558         wd->perfctr_msr = perfctr_msr;
559         wd->evntsel_msr = evntsel_msr;
560         wd->cccr_msr = 0;  //unused
561         wd_ops->checkbit = 1ULL << (eax.split.bit_width - 1);
562         return 1;
563 }
564
565 static struct wd_ops intel_arch_wd_ops = {
566         .reserve = single_msr_reserve,
567         .unreserve = single_msr_unreserve,
568         .setup = setup_intel_arch_watchdog,
569         .rearm = p6_rearm,
570         .stop = single_msr_stop_watchdog,
571         .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
572         .evntsel = MSR_ARCH_PERFMON_EVENTSEL0,
573 };
574
575 static void probe_nmi_watchdog(void)
576 {
577         switch (boot_cpu_data.x86_vendor) {
578         case X86_VENDOR_AMD:
579                 if (boot_cpu_data.x86 != 6 && boot_cpu_data.x86 != 15 &&
580                     boot_cpu_data.x86 != 16)
581                         return;
582                 wd_ops = &k7_wd_ops;
583                 break;
584         case X86_VENDOR_INTEL:
585                 if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
586                         wd_ops = &intel_arch_wd_ops;
587                         break;
588                 }
589                 switch (boot_cpu_data.x86) {
590                 case 6:
591                         if (boot_cpu_data.x86_model > 0xd)
592                                 return;
593
594                         wd_ops = &p6_wd_ops;
595                         break;
596                 case 15:
597                         if (boot_cpu_data.x86_model > 0x4)
598                                 return;
599
600                         wd_ops = &p4_wd_ops;
601                         break;
602                 default:
603                         return;
604                 }
605                 break;
606         }
607 }
608
609 /* Interface to nmi.c */
610
611 int lapic_watchdog_init(unsigned nmi_hz)
612 {
613         if (!wd_ops) {
614                 probe_nmi_watchdog();
615                 if (!wd_ops)
616                         return -1;
617         }
618
619         if (!(wd_ops->setup(nmi_hz))) {
620                 printk(KERN_ERR "Cannot setup NMI watchdog on CPU %d\n",
621                        raw_smp_processor_id());
622                 return -1;
623         }
624
625         return 0;
626 }
627
628 void lapic_watchdog_stop(void)
629 {
630         if (wd_ops)
631                 wd_ops->stop(NULL);
632 }
633
634 unsigned lapic_adjust_nmi_hz(unsigned hz)
635 {
636         struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
637         if (wd->perfctr_msr == MSR_P6_PERFCTR0 ||
638             wd->perfctr_msr == MSR_ARCH_PERFMON_PERFCTR1)
639                 hz = adjust_for_32bit_ctr(hz);
640         return hz;
641 }
642
643 int lapic_wd_event(unsigned nmi_hz)
644 {
645         struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
646         u64 ctr;
647         rdmsrl(wd->perfctr_msr, ctr);
648         if (ctr & wd_ops->checkbit) { /* perfctr still running? */
649                 return 0;
650         }
651         wd_ops->rearm(wd, nmi_hz);
652         return 1;
653 }
654
655 int lapic_watchdog_ok(void)
656 {
657         return wd_ops != NULL;
658 }