3 * P4 model-specific MSR operations
5 * @remark Copyright 2002 OProfile authors
6 * @remark Read the file COPYING
8 * @author Graydon Hoare
11 #include <linux/oprofile.h>
12 #include <linux/smp.h>
14 #include <asm/ptrace.h>
15 #include <asm/fixmap.h>
19 #include "op_x86_model.h"
20 #include "op_counter.h"
24 #define NUM_COUNTERS_NON_HT 8
25 #define NUM_ESCRS_NON_HT 45
26 #define NUM_CCCRS_NON_HT 18
27 #define NUM_CONTROLS_NON_HT (NUM_ESCRS_NON_HT + NUM_CCCRS_NON_HT)
29 #define NUM_COUNTERS_HT2 4
30 #define NUM_ESCRS_HT2 23
31 #define NUM_CCCRS_HT2 9
32 #define NUM_CONTROLS_HT2 (NUM_ESCRS_HT2 + NUM_CCCRS_HT2)
34 static unsigned int num_counters = NUM_COUNTERS_NON_HT;
35 static unsigned int num_controls = NUM_CONTROLS_NON_HT;
37 /* this has to be checked dynamically since the
38 hyper-threadedness of a chip is discovered at
40 static inline void setup_num_counters(void)
43 if (smp_num_siblings == 2){
44 num_counters = NUM_COUNTERS_HT2;
45 num_controls = NUM_CONTROLS_HT2;
50 static int inline addr_increment(void)
53 return smp_num_siblings == 2 ? 2 : 1;
60 /* tables to simulate simplified hardware view of p4 registers */
61 struct p4_counter_binding {
67 struct p4_event_binding {
68 int escr_select; /* value to put in CCCR */
69 int event_select; /* value to put in ESCR */
71 int virt_counter; /* for this counter... */
72 int escr_address; /* use this ESCR */
76 /* nb: these CTR_* defines are a duplicate of defines in
77 event/i386.p4*events. */
80 #define CTR_BPU_0 (1 << 0)
81 #define CTR_MS_0 (1 << 1)
82 #define CTR_FLAME_0 (1 << 2)
83 #define CTR_IQ_4 (1 << 3)
84 #define CTR_BPU_2 (1 << 4)
85 #define CTR_MS_2 (1 << 5)
86 #define CTR_FLAME_2 (1 << 6)
87 #define CTR_IQ_5 (1 << 7)
89 static struct p4_counter_binding p4_counters [NUM_COUNTERS_NON_HT] = {
90 { CTR_BPU_0, MSR_P4_BPU_PERFCTR0, MSR_P4_BPU_CCCR0 },
91 { CTR_MS_0, MSR_P4_MS_PERFCTR0, MSR_P4_MS_CCCR0 },
92 { CTR_FLAME_0, MSR_P4_FLAME_PERFCTR0, MSR_P4_FLAME_CCCR0 },
93 { CTR_IQ_4, MSR_P4_IQ_PERFCTR4, MSR_P4_IQ_CCCR4 },
94 { CTR_BPU_2, MSR_P4_BPU_PERFCTR2, MSR_P4_BPU_CCCR2 },
95 { CTR_MS_2, MSR_P4_MS_PERFCTR2, MSR_P4_MS_CCCR2 },
96 { CTR_FLAME_2, MSR_P4_FLAME_PERFCTR2, MSR_P4_FLAME_CCCR2 },
97 { CTR_IQ_5, MSR_P4_IQ_PERFCTR5, MSR_P4_IQ_CCCR5 }
100 #define NUM_UNUSED_CCCRS NUM_CCCRS_NON_HT - NUM_COUNTERS_NON_HT
102 /* p4 event codes in libop/op_event.h are indices into this table. */
104 static struct p4_event_binding p4_events[NUM_EVENTS] = {
106 { /* BRANCH_RETIRED */
108 { {CTR_IQ_4, MSR_P4_CRU_ESCR2},
109 {CTR_IQ_5, MSR_P4_CRU_ESCR3} }
112 { /* MISPRED_BRANCH_RETIRED */
114 { { CTR_IQ_4, MSR_P4_CRU_ESCR0},
115 { CTR_IQ_5, MSR_P4_CRU_ESCR1} }
118 { /* TC_DELIVER_MODE */
120 { { CTR_MS_0, MSR_P4_TC_ESCR0},
121 { CTR_MS_2, MSR_P4_TC_ESCR1} }
124 { /* BPU_FETCH_REQUEST */
126 { { CTR_BPU_0, MSR_P4_BPU_ESCR0},
127 { CTR_BPU_2, MSR_P4_BPU_ESCR1} }
130 { /* ITLB_REFERENCE */
132 { { CTR_BPU_0, MSR_P4_ITLB_ESCR0},
133 { CTR_BPU_2, MSR_P4_ITLB_ESCR1} }
136 { /* MEMORY_CANCEL */
138 { { CTR_FLAME_0, MSR_P4_DAC_ESCR0},
139 { CTR_FLAME_2, MSR_P4_DAC_ESCR1} }
142 { /* MEMORY_COMPLETE */
144 { { CTR_FLAME_0, MSR_P4_SAAT_ESCR0},
145 { CTR_FLAME_2, MSR_P4_SAAT_ESCR1} }
148 { /* LOAD_PORT_REPLAY */
150 { { CTR_FLAME_0, MSR_P4_SAAT_ESCR0},
151 { CTR_FLAME_2, MSR_P4_SAAT_ESCR1} }
154 { /* STORE_PORT_REPLAY */
156 { { CTR_FLAME_0, MSR_P4_SAAT_ESCR0},
157 { CTR_FLAME_2, MSR_P4_SAAT_ESCR1} }
160 { /* MOB_LOAD_REPLAY */
162 { { CTR_BPU_0, MSR_P4_MOB_ESCR0},
163 { CTR_BPU_2, MSR_P4_MOB_ESCR1} }
166 { /* PAGE_WALK_TYPE */
168 { { CTR_BPU_0, MSR_P4_PMH_ESCR0},
169 { CTR_BPU_2, MSR_P4_PMH_ESCR1} }
172 { /* BSQ_CACHE_REFERENCE */
174 { { CTR_BPU_0, MSR_P4_BSU_ESCR0},
175 { CTR_BPU_2, MSR_P4_BSU_ESCR1} }
178 { /* IOQ_ALLOCATION */
180 { { CTR_BPU_0, MSR_P4_FSB_ESCR0},
184 { /* IOQ_ACTIVE_ENTRIES */
186 { { CTR_BPU_2, MSR_P4_FSB_ESCR1},
190 { /* FSB_DATA_ACTIVITY */
192 { { CTR_BPU_0, MSR_P4_FSB_ESCR0},
193 { CTR_BPU_2, MSR_P4_FSB_ESCR1} }
196 { /* BSQ_ALLOCATION */
198 { { CTR_BPU_0, MSR_P4_BSU_ESCR0},
202 { /* BSQ_ACTIVE_ENTRIES */
204 { { CTR_BPU_2, MSR_P4_BSU_ESCR1 /* guess */},
210 { { CTR_IQ_4, MSR_P4_CRU_ESCR2},
211 { CTR_IQ_5, MSR_P4_CRU_ESCR3} }
214 { /* SSE_INPUT_ASSIST */
216 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
217 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
220 { /* PACKED_SP_UOP */
222 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
223 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
226 { /* PACKED_DP_UOP */
228 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
229 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
232 { /* SCALAR_SP_UOP */
234 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
235 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
238 { /* SCALAR_DP_UOP */
240 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
241 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
244 { /* 64BIT_MMX_UOP */
246 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
247 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
250 { /* 128BIT_MMX_UOP */
252 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
253 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
258 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
259 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
262 { /* X87_SIMD_MOVES_UOP */
264 { { CTR_FLAME_0, MSR_P4_FIRM_ESCR0},
265 { CTR_FLAME_2, MSR_P4_FIRM_ESCR1} }
268 { /* MACHINE_CLEAR */
270 { { CTR_IQ_4, MSR_P4_CRU_ESCR2},
271 { CTR_IQ_5, MSR_P4_CRU_ESCR3} }
274 { /* GLOBAL_POWER_EVENTS */
275 0x06, 0x13 /* older manual says 0x05, newer 0x13 */,
276 { { CTR_BPU_0, MSR_P4_FSB_ESCR0},
277 { CTR_BPU_2, MSR_P4_FSB_ESCR1} }
282 { { CTR_MS_0, MSR_P4_MS_ESCR0},
283 { CTR_MS_2, MSR_P4_MS_ESCR1} }
286 { /* UOP_QUEUE_WRITES */
288 { { CTR_MS_0, MSR_P4_MS_ESCR0},
289 { CTR_MS_2, MSR_P4_MS_ESCR1} }
292 { /* FRONT_END_EVENT */
294 { { CTR_IQ_4, MSR_P4_CRU_ESCR2},
295 { CTR_IQ_5, MSR_P4_CRU_ESCR3} }
298 { /* EXECUTION_EVENT */
300 { { CTR_IQ_4, MSR_P4_CRU_ESCR2},
301 { CTR_IQ_5, MSR_P4_CRU_ESCR3} }
306 { { CTR_IQ_4, MSR_P4_CRU_ESCR2},
307 { CTR_IQ_5, MSR_P4_CRU_ESCR3} }
310 { /* INSTR_RETIRED */
312 { { CTR_IQ_4, MSR_P4_CRU_ESCR0},
313 { CTR_IQ_5, MSR_P4_CRU_ESCR1} }
318 { { CTR_IQ_4, MSR_P4_CRU_ESCR0},
319 { CTR_IQ_5, MSR_P4_CRU_ESCR1} }
324 { { CTR_IQ_4, MSR_P4_RAT_ESCR0},
325 { CTR_IQ_5, MSR_P4_RAT_ESCR1} }
328 { /* RETIRED_MISPRED_BRANCH_TYPE */
330 { { CTR_MS_0, MSR_P4_TBPU_ESCR0},
331 { CTR_MS_2, MSR_P4_TBPU_ESCR1} }
334 { /* RETIRED_BRANCH_TYPE */
336 { { CTR_MS_0, MSR_P4_TBPU_ESCR0},
337 { CTR_MS_2, MSR_P4_TBPU_ESCR1} }
342 #define MISC_PMC_ENABLED_P(x) ((x) & 1 << 7)
344 #define ESCR_RESERVED_BITS 0x80000003
345 #define ESCR_CLEAR(escr) ((escr) &= ESCR_RESERVED_BITS)
346 #define ESCR_SET_USR_0(escr, usr) ((escr) |= (((usr) & 1) << 2))
347 #define ESCR_SET_OS_0(escr, os) ((escr) |= (((os) & 1) << 3))
348 #define ESCR_SET_USR_1(escr, usr) ((escr) |= (((usr) & 1)))
349 #define ESCR_SET_OS_1(escr, os) ((escr) |= (((os) & 1) << 1))
350 #define ESCR_SET_EVENT_SELECT(escr, sel) ((escr) |= (((sel) & 0x3f) << 25))
351 #define ESCR_SET_EVENT_MASK(escr, mask) ((escr) |= (((mask) & 0xffff) << 9))
352 #define ESCR_READ(escr,high,ev,i) do {rdmsr(ev->bindings[(i)].escr_address, (escr), (high));} while (0)
353 #define ESCR_WRITE(escr,high,ev,i) do {wrmsr(ev->bindings[(i)].escr_address, (escr), (high));} while (0)
355 #define CCCR_RESERVED_BITS 0x38030FFF
356 #define CCCR_CLEAR(cccr) ((cccr) &= CCCR_RESERVED_BITS)
357 #define CCCR_SET_REQUIRED_BITS(cccr) ((cccr) |= 0x00030000)
358 #define CCCR_SET_ESCR_SELECT(cccr, sel) ((cccr) |= (((sel) & 0x07) << 13))
359 #define CCCR_SET_PMI_OVF_0(cccr) ((cccr) |= (1<<26))
360 #define CCCR_SET_PMI_OVF_1(cccr) ((cccr) |= (1<<27))
361 #define CCCR_SET_ENABLE(cccr) ((cccr) |= (1<<12))
362 #define CCCR_SET_DISABLE(cccr) ((cccr) &= ~(1<<12))
363 #define CCCR_READ(low, high, i) do {rdmsr(p4_counters[(i)].cccr_address, (low), (high));} while (0)
364 #define CCCR_WRITE(low, high, i) do {wrmsr(p4_counters[(i)].cccr_address, (low), (high));} while (0)
365 #define CCCR_OVF_P(cccr) ((cccr) & (1U<<31))
366 #define CCCR_CLEAR_OVF(cccr) ((cccr) &= (~(1U<<31)))
368 #define CTRL_IS_RESERVED(msrs,c) (msrs->controls[(c)].addr ? 1 : 0)
369 #define CTR_IS_RESERVED(msrs,c) (msrs->counters[(c)].addr ? 1 : 0)
370 #define CTR_READ(l,h,i) do {rdmsr(p4_counters[(i)].counter_address, (l), (h));} while (0)
371 #define CTR_WRITE(l,i) do {wrmsr(p4_counters[(i)].counter_address, -(u32)(l), -1);} while (0)
372 #define CTR_OVERFLOW_P(ctr) (!((ctr) & 0x80000000))
375 /* this assigns a "stagger" to the current CPU, which is used throughout
376 the code in this module as an extra array offset, to select the "even"
377 or "odd" part of all the divided resources. */
378 static unsigned int get_stagger(void)
381 int cpu = smp_processor_id();
382 return (cpu != first_cpu(per_cpu(cpu_sibling_map, cpu)));
388 /* finally, mediate access to a real hardware counter
389 by passing a "virtual" counter numer to this macro,
390 along with your stagger setting. */
391 #define VIRT_CTR(stagger, i) ((i) + ((num_counters) * (stagger)))
393 static unsigned long reset_value[NUM_COUNTERS_NON_HT];
396 static void p4_fill_in_addresses(struct op_msrs * const msrs)
399 unsigned int addr, cccraddr, stag;
401 setup_num_counters();
402 stag = get_stagger();
404 /* initialize some registers */
405 for (i = 0; i < num_counters; ++i) {
406 msrs->counters[i].addr = 0;
408 for (i = 0; i < num_controls; ++i) {
409 msrs->controls[i].addr = 0;
412 /* the counter & cccr registers we pay attention to */
413 for (i = 0; i < num_counters; ++i) {
414 addr = p4_counters[VIRT_CTR(stag, i)].counter_address;
415 cccraddr = p4_counters[VIRT_CTR(stag, i)].cccr_address;
416 if (reserve_perfctr_nmi(addr)){
417 msrs->counters[i].addr = addr;
418 msrs->controls[i].addr = cccraddr;
422 /* 43 ESCR registers in three or four discontiguous group */
423 for (addr = MSR_P4_BSU_ESCR0 + stag;
424 addr < MSR_P4_IQ_ESCR0; ++i, addr += addr_increment()) {
425 if (reserve_evntsel_nmi(addr))
426 msrs->controls[i].addr = addr;
429 /* no IQ_ESCR0/1 on some models, we save a seconde time BSU_ESCR0/1
430 * to avoid special case in nmi_{save|restore}_registers() */
431 if (boot_cpu_data.x86_model >= 0x3) {
432 for (addr = MSR_P4_BSU_ESCR0 + stag;
433 addr <= MSR_P4_BSU_ESCR1; ++i, addr += addr_increment()) {
434 if (reserve_evntsel_nmi(addr))
435 msrs->controls[i].addr = addr;
438 for (addr = MSR_P4_IQ_ESCR0 + stag;
439 addr <= MSR_P4_IQ_ESCR1; ++i, addr += addr_increment()) {
440 if (reserve_evntsel_nmi(addr))
441 msrs->controls[i].addr = addr;
445 for (addr = MSR_P4_RAT_ESCR0 + stag;
446 addr <= MSR_P4_SSU_ESCR0; ++i, addr += addr_increment()) {
447 if (reserve_evntsel_nmi(addr))
448 msrs->controls[i].addr = addr;
451 for (addr = MSR_P4_MS_ESCR0 + stag;
452 addr <= MSR_P4_TC_ESCR1; ++i, addr += addr_increment()) {
453 if (reserve_evntsel_nmi(addr))
454 msrs->controls[i].addr = addr;
457 for (addr = MSR_P4_IX_ESCR0 + stag;
458 addr <= MSR_P4_CRU_ESCR3; ++i, addr += addr_increment()) {
459 if (reserve_evntsel_nmi(addr))
460 msrs->controls[i].addr = addr;
463 /* there are 2 remaining non-contiguously located ESCRs */
465 if (num_counters == NUM_COUNTERS_NON_HT) {
466 /* standard non-HT CPUs handle both remaining ESCRs*/
467 if (reserve_evntsel_nmi(MSR_P4_CRU_ESCR5))
468 msrs->controls[i++].addr = MSR_P4_CRU_ESCR5;
469 if (reserve_evntsel_nmi(MSR_P4_CRU_ESCR4))
470 msrs->controls[i++].addr = MSR_P4_CRU_ESCR4;
472 } else if (stag == 0) {
473 /* HT CPUs give the first remainder to the even thread, as
474 the 32nd control register */
475 if (reserve_evntsel_nmi(MSR_P4_CRU_ESCR4))
476 msrs->controls[i++].addr = MSR_P4_CRU_ESCR4;
479 /* and two copies of the second to the odd thread,
480 for the 22st and 23nd control registers */
481 if (reserve_evntsel_nmi(MSR_P4_CRU_ESCR5)) {
482 msrs->controls[i++].addr = MSR_P4_CRU_ESCR5;
483 msrs->controls[i++].addr = MSR_P4_CRU_ESCR5;
489 static void pmc_setup_one_p4_counter(unsigned int ctr)
492 int const maxbind = 2;
493 unsigned int cccr = 0;
494 unsigned int escr = 0;
495 unsigned int high = 0;
496 unsigned int counter_bit;
497 struct p4_event_binding *ev = NULL;
500 stag = get_stagger();
502 /* convert from counter *number* to counter *bit* */
503 counter_bit = 1 << VIRT_CTR(stag, ctr);
505 /* find our event binding structure. */
506 if (counter_config[ctr].event <= 0 || counter_config[ctr].event > NUM_EVENTS) {
508 "oprofile: P4 event code 0x%lx out of range\n",
509 counter_config[ctr].event);
513 ev = &(p4_events[counter_config[ctr].event - 1]);
515 for (i = 0; i < maxbind; i++) {
516 if (ev->bindings[i].virt_counter & counter_bit) {
519 ESCR_READ(escr, high, ev, i);
522 ESCR_SET_USR_0(escr, counter_config[ctr].user);
523 ESCR_SET_OS_0(escr, counter_config[ctr].kernel);
525 ESCR_SET_USR_1(escr, counter_config[ctr].user);
526 ESCR_SET_OS_1(escr, counter_config[ctr].kernel);
528 ESCR_SET_EVENT_SELECT(escr, ev->event_select);
529 ESCR_SET_EVENT_MASK(escr, counter_config[ctr].unit_mask);
530 ESCR_WRITE(escr, high, ev, i);
533 CCCR_READ(cccr, high, VIRT_CTR(stag, ctr));
535 CCCR_SET_REQUIRED_BITS(cccr);
536 CCCR_SET_ESCR_SELECT(cccr, ev->escr_select);
538 CCCR_SET_PMI_OVF_0(cccr);
540 CCCR_SET_PMI_OVF_1(cccr);
542 CCCR_WRITE(cccr, high, VIRT_CTR(stag, ctr));
548 "oprofile: P4 event code 0x%lx no binding, stag %d ctr %d\n",
549 counter_config[ctr].event, stag, ctr);
553 static void p4_setup_ctrs(struct op_msrs const * const msrs)
556 unsigned int low, high;
559 stag = get_stagger();
561 rdmsr(MSR_IA32_MISC_ENABLE, low, high);
562 if (! MISC_PMC_ENABLED_P(low)) {
563 printk(KERN_ERR "oprofile: P4 PMC not available\n");
567 /* clear the cccrs we will use */
568 for (i = 0 ; i < num_counters ; i++) {
569 if (unlikely(!CTRL_IS_RESERVED(msrs,i)))
571 rdmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high);
573 CCCR_SET_REQUIRED_BITS(low);
574 wrmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high);
577 /* clear all escrs (including those outside our concern) */
578 for (i = num_counters; i < num_controls; i++) {
579 if (unlikely(!CTRL_IS_RESERVED(msrs,i)))
581 wrmsr(msrs->controls[i].addr, 0, 0);
584 /* setup all counters */
585 for (i = 0 ; i < num_counters ; ++i) {
586 if ((counter_config[i].enabled) && (CTRL_IS_RESERVED(msrs,i))) {
587 reset_value[i] = counter_config[i].count;
588 pmc_setup_one_p4_counter(i);
589 CTR_WRITE(counter_config[i].count, VIRT_CTR(stag, i));
597 static int p4_check_ctrs(struct pt_regs * const regs,
598 struct op_msrs const * const msrs)
600 unsigned long ctr, low, high, stag, real;
603 stag = get_stagger();
605 for (i = 0; i < num_counters; ++i) {
611 * there is some eccentricity in the hardware which
612 * requires that we perform 2 extra corrections:
614 * - check both the CCCR:OVF flag for overflow and the
615 * counter high bit for un-flagged overflows.
617 * - write the counter back twice to ensure it gets
620 * the former seems to be related to extra NMIs happening
621 * during the current NMI; the latter is reported as errata
622 * N15 in intel doc 249199-029, pentium 4 specification
623 * update, though their suggested work-around does not
624 * appear to solve the problem.
627 real = VIRT_CTR(stag, i);
629 CCCR_READ(low, high, real);
630 CTR_READ(ctr, high, real);
631 if (CCCR_OVF_P(low) || CTR_OVERFLOW_P(ctr)) {
632 oprofile_add_sample(regs, i);
633 CTR_WRITE(reset_value[i], real);
635 CCCR_WRITE(low, high, real);
636 CTR_WRITE(reset_value[i], real);
640 /* P4 quirk: you have to re-unmask the apic vector */
641 apic_write(APIC_LVTPC, apic_read(APIC_LVTPC) & ~APIC_LVT_MASKED);
643 /* See op_model_ppro.c */
648 static void p4_start(struct op_msrs const * const msrs)
650 unsigned int low, high, stag;
653 stag = get_stagger();
655 for (i = 0; i < num_counters; ++i) {
658 CCCR_READ(low, high, VIRT_CTR(stag, i));
659 CCCR_SET_ENABLE(low);
660 CCCR_WRITE(low, high, VIRT_CTR(stag, i));
665 static void p4_stop(struct op_msrs const * const msrs)
667 unsigned int low, high, stag;
670 stag = get_stagger();
672 for (i = 0; i < num_counters; ++i) {
675 CCCR_READ(low, high, VIRT_CTR(stag, i));
676 CCCR_SET_DISABLE(low);
677 CCCR_WRITE(low, high, VIRT_CTR(stag, i));
681 static void p4_shutdown(struct op_msrs const * const msrs)
685 for (i = 0 ; i < num_counters ; ++i) {
686 if (CTR_IS_RESERVED(msrs,i))
687 release_perfctr_nmi(msrs->counters[i].addr);
689 /* some of the control registers are specially reserved in
690 * conjunction with the counter registers (hence the starting offset).
691 * This saves a few bits.
693 for (i = num_counters ; i < num_controls ; ++i) {
694 if (CTRL_IS_RESERVED(msrs,i))
695 release_evntsel_nmi(msrs->controls[i].addr);
701 struct op_x86_model_spec const op_p4_ht2_spec = {
702 .num_counters = NUM_COUNTERS_HT2,
703 .num_controls = NUM_CONTROLS_HT2,
704 .fill_in_addresses = &p4_fill_in_addresses,
705 .setup_ctrs = &p4_setup_ctrs,
706 .check_ctrs = &p4_check_ctrs,
709 .shutdown = &p4_shutdown
713 struct op_x86_model_spec const op_p4_spec = {
714 .num_counters = NUM_COUNTERS_NON_HT,
715 .num_controls = NUM_CONTROLS_NON_HT,
716 .fill_in_addresses = &p4_fill_in_addresses,
717 .setup_ctrs = &p4_setup_ctrs,
718 .check_ctrs = &p4_check_ctrs,
721 .shutdown = &p4_shutdown