/*
* Performance counters:
*
- * Copyright(C) 2008, Thomas Gleixner <tglx@linutronix.de>
- * Copyright(C) 2008, Red Hat, Inc., Ingo Molnar
+ * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
+ * Copyright (C) 2008-2009, Red Hat, Inc., Ingo Molnar
+ * Copyright (C) 2008-2009, Red Hat, Inc., Peter Zijlstra
*
* Data type definitions, declarations, prototypes.
*
- * Started by: Thomas Gleixner and Ingo Molnar
+ * Started by: Thomas Gleixner and Ingo Molnar
*
* For licencing details see kernel-base/COPYING
*/
/*
* attr.type
*/
-enum perf_event_types {
- PERF_TYPE_HARDWARE = 0,
- PERF_TYPE_SOFTWARE = 1,
- PERF_TYPE_TRACEPOINT = 2,
-
- /*
- * available TYPE space, raw is the max value.
- */
-
- PERF_TYPE_RAW = 128,
+enum perf_type_id {
+ PERF_TYPE_HARDWARE = 0,
+ PERF_TYPE_SOFTWARE = 1,
+ PERF_TYPE_TRACEPOINT = 2,
+ PERF_TYPE_HW_CACHE = 3,
+ PERF_TYPE_RAW = 4,
+
+ PERF_TYPE_MAX, /* non-ABI */
};
/*
- * Generalized performance counter event types, used by the attr.event_id
- * parameter of the sys_perf_counter_open() syscall:
+ * Generalized performance counter event types, used by the
+ * attr.event_id parameter of the sys_perf_counter_open()
+ * syscall:
*/
-enum attr_ids {
+enum perf_hw_id {
/*
* Common hardware events, generalized by the kernel:
*/
- PERF_COUNT_CPU_CYCLES = 0,
- PERF_COUNT_INSTRUCTIONS = 1,
- PERF_COUNT_CACHE_REFERENCES = 2,
- PERF_COUNT_CACHE_MISSES = 3,
- PERF_COUNT_BRANCH_INSTRUCTIONS = 4,
- PERF_COUNT_BRANCH_MISSES = 5,
- PERF_COUNT_BUS_CYCLES = 6,
-
- PERF_HW_EVENTS_MAX = 7,
+ PERF_COUNT_HW_CPU_CYCLES = 0,
+ PERF_COUNT_HW_INSTRUCTIONS = 1,
+ PERF_COUNT_HW_CACHE_REFERENCES = 2,
+ PERF_COUNT_HW_CACHE_MISSES = 3,
+ PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4,
+ PERF_COUNT_HW_BRANCH_MISSES = 5,
+ PERF_COUNT_HW_BUS_CYCLES = 6,
+
+ PERF_COUNT_HW_MAX, /* non-ABI */
};
/*
- * Special "software" counters provided by the kernel, even if the hardware
- * does not support performance counters. These counters measure various
- * physical and sw events of the kernel (and allow the profiling of them as
- * well):
+ * Generalized hardware cache counters:
+ *
+ * { L1-D, L1-I, LLC, ITLB, DTLB, BPU } x
+ * { read, write, prefetch } x
+ * { accesses, misses }
*/
-enum sw_event_ids {
- PERF_COUNT_CPU_CLOCK = 0,
- PERF_COUNT_TASK_CLOCK = 1,
- PERF_COUNT_PAGE_FAULTS = 2,
- PERF_COUNT_CONTEXT_SWITCHES = 3,
- PERF_COUNT_CPU_MIGRATIONS = 4,
- PERF_COUNT_PAGE_FAULTS_MIN = 5,
- PERF_COUNT_PAGE_FAULTS_MAJ = 6,
-
- PERF_SW_EVENTS_MAX = 7,
+enum perf_hw_cache_id {
+ PERF_COUNT_HW_CACHE_L1D = 0,
+ PERF_COUNT_HW_CACHE_L1I = 1,
+ PERF_COUNT_HW_CACHE_LL = 2,
+ PERF_COUNT_HW_CACHE_DTLB = 3,
+ PERF_COUNT_HW_CACHE_ITLB = 4,
+ PERF_COUNT_HW_CACHE_BPU = 5,
+
+ PERF_COUNT_HW_CACHE_MAX, /* non-ABI */
};
-#define __PERF_COUNTER_MASK(name) \
- (((1ULL << PERF_COUNTER_##name##_BITS) - 1) << \
- PERF_COUNTER_##name##_SHIFT)
+enum perf_hw_cache_op_id {
+ PERF_COUNT_HW_CACHE_OP_READ = 0,
+ PERF_COUNT_HW_CACHE_OP_WRITE = 1,
+ PERF_COUNT_HW_CACHE_OP_PREFETCH = 2,
-#define PERF_COUNTER_RAW_BITS 1
-#define PERF_COUNTER_RAW_SHIFT 63
-#define PERF_COUNTER_RAW_MASK __PERF_COUNTER_MASK(RAW)
+ PERF_COUNT_HW_CACHE_OP_MAX, /* non-ABI */
+};
-#define PERF_COUNTER_CONFIG_BITS 63
-#define PERF_COUNTER_CONFIG_SHIFT 0
-#define PERF_COUNTER_CONFIG_MASK __PERF_COUNTER_MASK(CONFIG)
+enum perf_hw_cache_op_result_id {
+ PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0,
+ PERF_COUNT_HW_CACHE_RESULT_MISS = 1,
-#define PERF_COUNTER_TYPE_BITS 7
-#define PERF_COUNTER_TYPE_SHIFT 56
-#define PERF_COUNTER_TYPE_MASK __PERF_COUNTER_MASK(TYPE)
+ PERF_COUNT_HW_CACHE_RESULT_MAX, /* non-ABI */
+};
-#define PERF_COUNTER_EVENT_BITS 56
-#define PERF_COUNTER_EVENT_SHIFT 0
-#define PERF_COUNTER_EVENT_MASK __PERF_COUNTER_MASK(EVENT)
+/*
+ * Special "software" counters provided by the kernel, even if the hardware
+ * does not support performance counters. These counters measure various
+ * physical and sw events of the kernel (and allow the profiling of them as
+ * well):
+ */
+enum perf_sw_ids {
+ PERF_COUNT_SW_CPU_CLOCK = 0,
+ PERF_COUNT_SW_TASK_CLOCK = 1,
+ PERF_COUNT_SW_PAGE_FAULTS = 2,
+ PERF_COUNT_SW_CONTEXT_SWITCHES = 3,
+ PERF_COUNT_SW_CPU_MIGRATIONS = 4,
+ PERF_COUNT_SW_PAGE_FAULTS_MIN = 5,
+ PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6,
+
+ PERF_COUNT_SW_MAX, /* non-ABI */
+};
/*
* Bits that can be set in attr.sample_type to request information
* in the overflow packets.
*/
enum perf_counter_sample_format {
- PERF_SAMPLE_IP = 1U << 0,
- PERF_SAMPLE_TID = 1U << 1,
- PERF_SAMPLE_TIME = 1U << 2,
- PERF_SAMPLE_ADDR = 1U << 3,
- PERF_SAMPLE_GROUP = 1U << 4,
- PERF_SAMPLE_CALLCHAIN = 1U << 5,
- PERF_SAMPLE_CONFIG = 1U << 6,
- PERF_SAMPLE_CPU = 1U << 7,
+ PERF_SAMPLE_IP = 1U << 0,
+ PERF_SAMPLE_TID = 1U << 1,
+ PERF_SAMPLE_TIME = 1U << 2,
+ PERF_SAMPLE_ADDR = 1U << 3,
+ PERF_SAMPLE_GROUP = 1U << 4,
+ PERF_SAMPLE_CALLCHAIN = 1U << 5,
+ PERF_SAMPLE_ID = 1U << 6,
+ PERF_SAMPLE_CPU = 1U << 7,
+ PERF_SAMPLE_PERIOD = 1U << 8,
+
+ PERF_SAMPLE_MAX = 1U << 9, /* non-ABI */
};
/*
* in increasing order of bit value, after the counter value.
*/
enum perf_counter_read_format {
- PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0,
- PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1,
- PERF_FORMAT_ID = 1U << 2,
+ PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0,
+ PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1,
+ PERF_FORMAT_ID = 1U << 2,
+
+ PERF_FORMAT_MAX = 1U << 3, /* non-ABI */
};
+#define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */
+
/*
* Hardware event to monitor via a performance monitoring counter:
*/
struct perf_counter_attr {
+
+ /*
+ * Major type: hardware/software/tracepoint/etc.
+ */
+ __u32 type;
+
/*
- * The MSB of the config word signifies if the rest contains cpu
- * specific (raw) counter configuration data, if unset, the next
- * 7 bits are an event type and the rest of the bits are the event
- * identifier.
+ * Size of the attr structure, for fwd/bwd compat.
+ */
+ __u32 size;
+
+ /*
+ * Type specific configuration information.
*/
__u64 config;
exclude_hv : 1, /* ditto hypervisor */
exclude_idle : 1, /* don't count when idle */
mmap : 1, /* include mmap data */
- munmap : 1, /* include munmap data */
comm : 1, /* include comm data */
freq : 1, /* use freq, not period */
- __reserved_1 : 52;
+ __reserved_1 : 53;
__u32 wakeup_events; /* wakeup every n events */
__u32 __reserved_2;
__u64 __reserved_3;
- __u64 __reserved_4;
};
/*
/*
* Control data for the mmap() data buffer.
*
- * User-space reading this value should issue an rmb(), on SMP capable
- * platforms, after reading this value -- see perf_counter_wakeup().
+ * User-space reading the @data_head value should issue an rmb(), on
+ * SMP capable platforms, after reading this value -- see
+ * perf_counter_wakeup().
+ *
+ * When the mapping is PROT_WRITE the @data_tail value should be
+ * written by userspace to reflect the last read data. In this case
+ * the kernel will not over-write unread data.
*/
__u64 data_head; /* head in the data section */
+ __u64 data_tail; /* user-space written tail */
};
-#define PERF_EVENT_MISC_CPUMODE_MASK (3 << 0)
-#define PERF_EVENT_MISC_CPUMODE_UNKNOWN (0 << 0)
-#define PERF_EVENT_MISC_KERNEL (1 << 0)
-#define PERF_EVENT_MISC_USER (2 << 0)
-#define PERF_EVENT_MISC_HYPERVISOR (3 << 0)
-#define PERF_EVENT_MISC_OVERFLOW (1 << 2)
+#define PERF_EVENT_MISC_CPUMODE_MASK (3 << 0)
+#define PERF_EVENT_MISC_CPUMODE_UNKNOWN (0 << 0)
+#define PERF_EVENT_MISC_KERNEL (1 << 0)
+#define PERF_EVENT_MISC_USER (2 << 0)
+#define PERF_EVENT_MISC_HYPERVISOR (3 << 0)
+#define PERF_EVENT_MISC_OVERFLOW (1 << 2)
struct perf_event_header {
__u32 type;
* };
*/
PERF_EVENT_MMAP = 1,
- PERF_EVENT_MUNMAP = 2,
+
+ /*
+ * struct {
+ * struct perf_event_header header;
+ * u64 id;
+ * u64 lost;
+ * };
+ */
+ PERF_EVENT_LOST = 2,
/*
* struct {
* struct {
* struct perf_event_header header;
* u64 time;
+ * u64 id;
* u64 sample_period;
* };
*/
* struct {
* struct perf_event_header header;
* u64 time;
+ * u64 id;
* };
*/
PERF_EVENT_THROTTLE = 5,
PERF_EVENT_UNTHROTTLE = 6,
+ /*
+ * struct {
+ * struct perf_event_header header;
+ * u32 pid, ppid;
+ * };
+ */
+ PERF_EVENT_FORK = 7,
+
/*
* When header.misc & PERF_EVENT_MISC_OVERFLOW the event_type field
- * will be PERF_RECORD_*
+ * will be PERF_SAMPLE_*
*
* struct {
* struct perf_event_header header;
*
- * { u64 ip; } && PERF_RECORD_IP
- * { u32 pid, tid; } && PERF_RECORD_TID
- * { u64 time; } && PERF_RECORD_TIME
- * { u64 addr; } && PERF_RECORD_ADDR
- * { u64 config; } && PERF_RECORD_CONFIG
- * { u32 cpu, res; } && PERF_RECORD_CPU
+ * { u64 ip; } && PERF_SAMPLE_IP
+ * { u32 pid, tid; } && PERF_SAMPLE_TID
+ * { u64 time; } && PERF_SAMPLE_TIME
+ * { u64 addr; } && PERF_SAMPLE_ADDR
+ * { u64 config; } && PERF_SAMPLE_CONFIG
+ * { u32 cpu, res; } && PERF_SAMPLE_CPU
*
* { u64 nr;
- * { u64 id, val; } cnt[nr]; } && PERF_RECORD_GROUP
+ * { u64 id, val; } cnt[nr]; } && PERF_SAMPLE_GROUP
*
- * { u16 nr,
- * hv,
- * kernel,
- * user;
- * u64 ips[nr]; } && PERF_RECORD_CALLCHAIN
+ * { u64 nr,
+ * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN
* };
*/
};
+enum perf_callchain_context {
+ PERF_CONTEXT_HV = (__u64)-32,
+ PERF_CONTEXT_KERNEL = (__u64)-128,
+ PERF_CONTEXT_USER = (__u64)-512,
+
+ PERF_CONTEXT_GUEST = (__u64)-2048,
+ PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176,
+ PERF_CONTEXT_GUEST_USER = (__u64)-2560,
+
+ PERF_CONTEXT_MAX = (__u64)-4095,
+};
+
#ifdef __KERNEL__
/*
* Kernel-internal data types and definitions:
#include <linux/pid_namespace.h>
#include <asm/atomic.h>
-struct task_struct;
-
-static inline u64 perf_event_raw(struct perf_counter_attr *attr)
-{
- return attr->config & PERF_COUNTER_RAW_MASK;
-}
+#define PERF_MAX_STACK_DEPTH 255
-static inline u64 perf_event_config(struct perf_counter_attr *attr)
-{
- return attr->config & PERF_COUNTER_CONFIG_MASK;
-}
-
-static inline u64 perf_event_type(struct perf_counter_attr *attr)
-{
- return (attr->config & PERF_COUNTER_TYPE_MASK) >>
- PERF_COUNTER_TYPE_SHIFT;
-}
+struct perf_callchain_entry {
+ __u64 nr;
+ __u64 ip[PERF_MAX_STACK_DEPTH];
+};
-static inline u64 perf_event_id(struct perf_counter_attr *attr)
-{
- return attr->config & PERF_COUNTER_EVENT_MASK;
-}
+struct task_struct;
/**
* struct hw_perf_counter - performance counter hardware details:
#ifdef CONFIG_PERF_COUNTERS
union {
struct { /* hardware */
- u64 config;
- unsigned long config_base;
- unsigned long counter_base;
- int idx;
+ u64 config;
+ unsigned long config_base;
+ unsigned long counter_base;
+ int idx;
};
union { /* software */
- atomic64_t count;
- struct hrtimer hrtimer;
+ atomic64_t count;
+ struct hrtimer hrtimer;
};
};
atomic64_t prev_count;
u64 sample_period;
+ u64 last_period;
atomic64_t period_left;
u64 interrupts;
+
+ u64 freq_count;
+ u64 freq_interrupts;
+ u64 freq_stamp;
#endif
};
struct perf_mmap_data {
struct rcu_head rcu_head;
int nr_pages; /* nr of data pages */
+ int writable; /* are we writable */
int nr_locked; /* nr pages mlocked */
atomic_t poll; /* POLL_ for wakeups */
atomic_long_t done_head; /* completed head */
atomic_t lock; /* concurrent writes */
-
atomic_t wakeup; /* needs a wakeup */
+ atomic_t lost; /* nr records lost */
struct perf_counter_mmap_page *user_page;
void *data_pages[0];
* Protect the states of the counters in the list,
* nr_active, and the list:
*/
- spinlock_t lock;
+ spinlock_t lock;
/*
* Protect the list of counters. Locking either mutex or lock
* is sufficient to ensure the list doesn't change; to change
* the list you need to lock both the mutex and the spinlock.
*/
- struct mutex mutex;
+ struct mutex mutex;
- struct list_head counter_list;
- struct list_head event_list;
- int nr_counters;
- int nr_active;
- int is_active;
- atomic_t refcount;
- struct task_struct *task;
+ struct list_head counter_list;
+ struct list_head event_list;
+ int nr_counters;
+ int nr_active;
+ int is_active;
+ atomic_t refcount;
+ struct task_struct *task;
/*
* Context clock, runs when context enabled.
*/
- u64 time;
- u64 timestamp;
+ u64 time;
+ u64 timestamp;
/*
* These fields let us detect when two contexts have both
* been cloned (inherited) from a common ancestor.
*/
- struct perf_counter_context *parent_ctx;
- u64 parent_gen;
- u64 generation;
- int pin_count;
- struct rcu_head rcu_head;
+ struct perf_counter_context *parent_ctx;
+ u64 parent_gen;
+ u64 generation;
+ int pin_count;
+ struct rcu_head rcu_head;
};
/**
extern int perf_counter_init_task(struct task_struct *child);
extern void perf_counter_exit_task(struct task_struct *child);
extern void perf_counter_free_task(struct task_struct *task);
+extern void set_perf_counter_pending(void);
extern void perf_counter_do_pending(void);
extern void perf_counter_print_debug(void);
extern void __perf_disable(void);
struct perf_counter_context *ctx, int cpu);
extern void perf_counter_update_userpage(struct perf_counter *counter);
-extern int perf_counter_overflow(struct perf_counter *counter,
- int nmi, struct pt_regs *regs, u64 addr);
+struct perf_sample_data {
+ struct pt_regs *regs;
+ u64 addr;
+ u64 period;
+};
+
+extern int perf_counter_overflow(struct perf_counter *counter, int nmi,
+ struct perf_sample_data *data);
+
/*
* Return 1 for a software counter, 0 for a hardware counter
*/
static inline int is_software_counter(struct perf_counter *counter)
{
- return !perf_event_raw(&counter->attr) &&
- perf_event_type(&counter->attr) != PERF_TYPE_HARDWARE;
+ return (counter->attr.type != PERF_TYPE_RAW) &&
+ (counter->attr.type != PERF_TYPE_HARDWARE) &&
+ (counter->attr.type != PERF_TYPE_HW_CACHE);
}
extern void perf_swcounter_event(u32, u64, int, struct pt_regs *, u64);
-extern void perf_counter_mmap(unsigned long addr, unsigned long len,
- unsigned long pgoff, struct file *file);
+extern void __perf_counter_mmap(struct vm_area_struct *vma);
-extern void perf_counter_munmap(unsigned long addr, unsigned long len,
- unsigned long pgoff, struct file *file);
+static inline void perf_counter_mmap(struct vm_area_struct *vma)
+{
+ if (vma->vm_flags & VM_EXEC)
+ __perf_counter_mmap(vma);
+}
extern void perf_counter_comm(struct task_struct *tsk);
-
-extern void perf_counter_task_migration(struct task_struct *task, int cpu);
-
-#define MAX_STACK_DEPTH 255
-
-struct perf_callchain_entry {
- u16 nr, hv, kernel, user;
- u64 ip[MAX_STACK_DEPTH];
-};
+extern void perf_counter_fork(struct task_struct *tsk);
extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs);
-extern int sysctl_perf_counter_priv;
+extern int sysctl_perf_counter_paranoid;
extern int sysctl_perf_counter_mlock;
-extern int sysctl_perf_counter_limit;
+extern int sysctl_perf_counter_sample_rate;
extern void perf_counter_init(void);
perf_swcounter_event(u32 event, u64 nr, int nmi,
struct pt_regs *regs, u64 addr) { }
-static inline void
-perf_counter_mmap(unsigned long addr, unsigned long len,
- unsigned long pgoff, struct file *file) { }
-
-static inline void
-perf_counter_munmap(unsigned long addr, unsigned long len,
- unsigned long pgoff, struct file *file) { }
-
+static inline void perf_counter_mmap(struct vm_area_struct *vma) { }
static inline void perf_counter_comm(struct task_struct *tsk) { }
+static inline void perf_counter_fork(struct task_struct *tsk) { }
static inline void perf_counter_init(void) { }
-static inline void perf_counter_task_migration(struct task_struct *task,
- int cpu) { }
#endif
#endif /* __KERNEL__ */