4 #include "parse-options.h"
5 #include "parse-events.h"
10 extern char *strcasestr(const char *haystack, const char *needle);
14 struct perf_counter_attr attrs[MAX_COUNTERS];
23 char debugfs_path[MAXPATHLEN];
25 #define CHW(x) .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_##x
26 #define CSW(x) .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_##x
28 static struct event_symbol event_symbols[] = {
29 { CHW(CPU_CYCLES), "cpu-cycles", "cycles" },
30 { CHW(INSTRUCTIONS), "instructions", "" },
31 { CHW(CACHE_REFERENCES), "cache-references", "" },
32 { CHW(CACHE_MISSES), "cache-misses", "" },
33 { CHW(BRANCH_INSTRUCTIONS), "branch-instructions", "branches" },
34 { CHW(BRANCH_MISSES), "branch-misses", "" },
35 { CHW(BUS_CYCLES), "bus-cycles", "" },
37 { CSW(CPU_CLOCK), "cpu-clock", "" },
38 { CSW(TASK_CLOCK), "task-clock", "" },
39 { CSW(PAGE_FAULTS), "page-faults", "faults" },
40 { CSW(PAGE_FAULTS_MIN), "minor-faults", "" },
41 { CSW(PAGE_FAULTS_MAJ), "major-faults", "" },
42 { CSW(CONTEXT_SWITCHES), "context-switches", "cs" },
43 { CSW(CPU_MIGRATIONS), "cpu-migrations", "migrations" },
46 #define __PERF_COUNTER_FIELD(config, name) \
47 ((config & PERF_COUNTER_##name##_MASK) >> PERF_COUNTER_##name##_SHIFT)
49 #define PERF_COUNTER_RAW(config) __PERF_COUNTER_FIELD(config, RAW)
50 #define PERF_COUNTER_CONFIG(config) __PERF_COUNTER_FIELD(config, CONFIG)
51 #define PERF_COUNTER_TYPE(config) __PERF_COUNTER_FIELD(config, TYPE)
52 #define PERF_COUNTER_ID(config) __PERF_COUNTER_FIELD(config, EVENT)
54 static char *hw_event_names[] = {
64 static char *sw_event_names[] = {
76 static char *hw_cache[][MAX_ALIASES] = {
77 { "L1-dcache", "l1-d", "l1d", "L1-data", },
78 { "L1-icache", "l1-i", "l1i", "L1-instruction", },
80 { "dTLB", "d-tlb", "Data-TLB", },
81 { "iTLB", "i-tlb", "Instruction-TLB", },
82 { "branch", "branches", "bpu", "btb", "bpc", },
85 static char *hw_cache_op[][MAX_ALIASES] = {
86 { "load", "loads", "read", },
87 { "store", "stores", "write", },
88 { "prefetch", "prefetches", "speculative-read", "speculative-load", },
91 static char *hw_cache_result[][MAX_ALIASES] = {
92 { "refs", "Reference", "ops", "access", },
93 { "misses", "miss", },
96 #define C(x) PERF_COUNT_HW_CACHE_##x
97 #define CACHE_READ (1 << C(OP_READ))
98 #define CACHE_WRITE (1 << C(OP_WRITE))
99 #define CACHE_PREFETCH (1 << C(OP_PREFETCH))
100 #define COP(x) (1 << x)
103 * cache operartion stat
104 * L1I : Read and prefetch only
105 * ITLB and BPU : Read-only
107 static unsigned long hw_cache_stat[C(MAX)] = {
108 [C(L1D)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
109 [C(L1I)] = (CACHE_READ | CACHE_PREFETCH),
110 [C(LL)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
111 [C(DTLB)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
112 [C(ITLB)] = (CACHE_READ),
113 [C(BPU)] = (CACHE_READ),
116 #define for_each_subsystem(sys_dir, sys_dirent, sys_next, file, st) \
117 while (!readdir_r(sys_dir, &sys_dirent, &sys_next) && sys_next) \
118 if (snprintf(file, MAXPATHLEN, "%s/%s", debugfs_path, \
119 sys_dirent.d_name) && \
120 (!stat(file, &st)) && (S_ISDIR(st.st_mode)) && \
121 (strcmp(sys_dirent.d_name, ".")) && \
122 (strcmp(sys_dirent.d_name, "..")))
124 #define for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next, file, st) \
125 while (!readdir_r(evt_dir, &evt_dirent, &evt_next) && evt_next) \
126 if (snprintf(file, MAXPATHLEN, "%s/%s/%s", debugfs_path, \
127 sys_dirent.d_name, evt_dirent.d_name) && \
128 (!stat(file, &st)) && (S_ISDIR(st.st_mode)) && \
129 (strcmp(evt_dirent.d_name, ".")) && \
130 (strcmp(evt_dirent.d_name, "..")))
132 #define MAX_EVENT_LENGTH 30
134 int valid_debugfs_mount(const char *debugfs)
138 if (statfs(debugfs, &st_fs) < 0)
140 else if (st_fs.f_type != (long) DEBUGFS_MAGIC)
145 static char *tracepoint_id_to_name(u64 config)
147 static char tracepoint_name[2 * MAX_EVENT_LENGTH];
148 DIR *sys_dir, *evt_dir;
149 struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent;
154 char evt_path[MAXPATHLEN];
156 if (valid_debugfs_mount(debugfs_path))
159 sys_dir = opendir(debugfs_path);
163 for_each_subsystem(sys_dir, sys_dirent, sys_next, evt_path, st) {
164 evt_dir = opendir(evt_path);
167 for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next,
169 snprintf(evt_path, MAXPATHLEN, "%s/%s/%s/id",
170 debugfs_path, sys_dirent.d_name,
172 fd = open(evt_path, O_RDONLY);
175 if (read(fd, id_buf, sizeof(id_buf)) < 0) {
184 snprintf(tracepoint_name, 2 * MAX_EVENT_LENGTH,
185 "%s:%s", sys_dirent.d_name,
187 return tracepoint_name;
198 static int is_cache_op_valid(u8 cache_type, u8 cache_op)
200 if (hw_cache_stat[cache_type] & COP(cache_op))
201 return 1; /* valid */
203 return 0; /* invalid */
206 static char *event_cache_name(u8 cache_type, u8 cache_op, u8 cache_result)
208 static char name[50];
211 sprintf(name, "%s-%s-%s", hw_cache[cache_type][0],
212 hw_cache_op[cache_op][0],
213 hw_cache_result[cache_result][0]);
215 sprintf(name, "%s-%s", hw_cache[cache_type][0],
216 hw_cache_op[cache_op][1]);
222 char *event_name(int counter)
224 u64 config = attrs[counter].config;
225 int type = attrs[counter].type;
228 if (attrs[counter].type == PERF_TYPE_RAW) {
229 sprintf(buf, "raw 0x%llx", config);
234 case PERF_TYPE_HARDWARE:
235 if (config < PERF_COUNT_HW_MAX)
236 return hw_event_names[config];
237 return "unknown-hardware";
239 case PERF_TYPE_HW_CACHE: {
240 u8 cache_type, cache_op, cache_result;
242 cache_type = (config >> 0) & 0xff;
243 if (cache_type > PERF_COUNT_HW_CACHE_MAX)
244 return "unknown-ext-hardware-cache-type";
246 cache_op = (config >> 8) & 0xff;
247 if (cache_op > PERF_COUNT_HW_CACHE_OP_MAX)
248 return "unknown-ext-hardware-cache-op";
250 cache_result = (config >> 16) & 0xff;
251 if (cache_result > PERF_COUNT_HW_CACHE_RESULT_MAX)
252 return "unknown-ext-hardware-cache-result";
254 if (!is_cache_op_valid(cache_type, cache_op))
255 return "invalid-cache";
257 return event_cache_name(cache_type, cache_op, cache_result);
260 case PERF_TYPE_SOFTWARE:
261 if (config < PERF_COUNT_SW_MAX)
262 return sw_event_names[config];
263 return "unknown-software";
265 case PERF_TYPE_TRACEPOINT:
266 return tracepoint_id_to_name(config);
275 static int parse_aliases(const char **str, char *names[][MAX_ALIASES], int size)
280 for (i = 0; i < size; i++) {
281 for (j = 0; j < MAX_ALIASES && names[i][j]; j++) {
282 n = strlen(names[i][j]);
283 if (n > longest && !strncasecmp(*str, names[i][j], n))
296 parse_generic_hw_event(const char **str, struct perf_counter_attr *attr)
298 const char *s = *str;
299 int cache_type = -1, cache_op = -1, cache_result = -1;
301 cache_type = parse_aliases(&s, hw_cache, PERF_COUNT_HW_CACHE_MAX);
303 * No fallback - if we cannot get a clear cache type
306 if (cache_type == -1)
309 while ((cache_op == -1 || cache_result == -1) && *s == '-') {
312 if (cache_op == -1) {
313 cache_op = parse_aliases(&s, hw_cache_op,
314 PERF_COUNT_HW_CACHE_OP_MAX);
316 if (!is_cache_op_valid(cache_type, cache_op))
322 if (cache_result == -1) {
323 cache_result = parse_aliases(&s, hw_cache_result,
324 PERF_COUNT_HW_CACHE_RESULT_MAX);
325 if (cache_result >= 0)
330 * Can't parse this as a cache op or result, so back up
338 * Fall back to reads:
341 cache_op = PERF_COUNT_HW_CACHE_OP_READ;
344 * Fall back to accesses:
346 if (cache_result == -1)
347 cache_result = PERF_COUNT_HW_CACHE_RESULT_ACCESS;
349 attr->config = cache_type | (cache_op << 8) | (cache_result << 16);
350 attr->type = PERF_TYPE_HW_CACHE;
356 static int parse_tracepoint_event(const char **strp,
357 struct perf_counter_attr *attr)
359 const char *evt_name;
360 char sys_name[MAX_EVENT_LENGTH];
363 unsigned int sys_length, evt_length;
365 char evt_path[MAXPATHLEN];
367 if (valid_debugfs_mount(debugfs_path))
370 evt_name = strchr(*strp, ':');
374 sys_length = evt_name - *strp;
375 if (sys_length >= MAX_EVENT_LENGTH)
378 strncpy(sys_name, *strp, sys_length);
379 sys_name[sys_length] = '\0';
380 evt_name = evt_name + 1;
381 evt_length = strlen(evt_name);
382 if (evt_length >= MAX_EVENT_LENGTH)
385 snprintf(evt_path, MAXPATHLEN, "%s/%s/%s/id", debugfs_path,
387 fd = open(evt_path, O_RDONLY);
391 if (read(fd, id_buf, sizeof(id_buf)) < 0) {
398 attr->type = PERF_TYPE_TRACEPOINT;
399 *strp = evt_name + evt_length;
403 static int check_events(const char *str, unsigned int i)
407 n = strlen(event_symbols[i].symbol);
408 if (!strncmp(str, event_symbols[i].symbol, n))
411 n = strlen(event_symbols[i].alias);
413 if (!strncmp(str, event_symbols[i].alias, n))
419 parse_symbolic_event(const char **strp, struct perf_counter_attr *attr)
421 const char *str = *strp;
425 for (i = 0; i < ARRAY_SIZE(event_symbols); i++) {
426 n = check_events(str, i);
428 attr->type = event_symbols[i].type;
429 attr->config = event_symbols[i].config;
437 static int parse_raw_event(const char **strp, struct perf_counter_attr *attr)
439 const char *str = *strp;
445 n = hex2u64(str + 1, &config);
448 attr->type = PERF_TYPE_RAW;
449 attr->config = config;
456 parse_numeric_event(const char **strp, struct perf_counter_attr *attr)
458 const char *str = *strp;
463 type = strtoul(str, &endp, 0);
464 if (endp > str && type < PERF_TYPE_MAX && *endp == ':') {
466 config = strtoul(str, &endp, 0);
469 attr->config = config;
478 parse_event_modifier(const char **strp, struct perf_counter_attr *attr)
480 const char *str = *strp;
481 int eu = 1, ek = 1, eh = 1;
488 else if (*str == 'k')
490 else if (*str == 'h')
496 if (str >= *strp + 2) {
498 attr->exclude_user = eu;
499 attr->exclude_kernel = ek;
500 attr->exclude_hv = eh;
507 * Each event can have multiple symbolic names.
508 * Symbolic names are (almost) exactly matched.
510 static int parse_event_symbols(const char **str, struct perf_counter_attr *attr)
512 if (!(parse_tracepoint_event(str, attr) ||
513 parse_raw_event(str, attr) ||
514 parse_numeric_event(str, attr) ||
515 parse_symbolic_event(str, attr) ||
516 parse_generic_hw_event(str, attr)))
519 parse_event_modifier(str, attr);
524 int parse_events(const struct option *opt __used, const char *str, int unset __used)
526 struct perf_counter_attr attr;
529 if (nr_counters == MAX_COUNTERS)
532 memset(&attr, 0, sizeof(attr));
533 if (!parse_event_symbols(&str, &attr))
536 if (!(*str == 0 || *str == ',' || isspace(*str)))
539 attrs[nr_counters] = attr;
546 while (isspace(*str))
553 static const char * const event_type_descriptors[] = {
558 "Hardware cache event",
562 * Print the events from <debugfs_mount_point>/tracing/events
565 static void print_tracepoint_events(void)
567 DIR *sys_dir, *evt_dir;
568 struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent;
570 char evt_path[MAXPATHLEN];
572 if (valid_debugfs_mount(debugfs_path))
575 sys_dir = opendir(debugfs_path);
579 for_each_subsystem(sys_dir, sys_dirent, sys_next, evt_path, st) {
580 evt_dir = opendir(evt_path);
583 for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next,
585 snprintf(evt_path, MAXPATHLEN, "%s:%s",
586 sys_dirent.d_name, evt_dirent.d_name);
587 fprintf(stderr, " %-40s [%s]\n", evt_path,
588 event_type_descriptors[PERF_TYPE_TRACEPOINT+1]);
598 * Print the help text for the event symbols:
600 void print_events(void)
602 struct event_symbol *syms = event_symbols;
603 unsigned int i, type, op, prev_type = -1;
606 fprintf(stderr, "\n");
607 fprintf(stderr, "List of pre-defined events (to be used in -e):\n");
609 for (i = 0; i < ARRAY_SIZE(event_symbols); i++, syms++) {
610 type = syms->type + 1;
611 if (type >= ARRAY_SIZE(event_type_descriptors))
614 if (type != prev_type)
615 fprintf(stderr, "\n");
617 if (strlen(syms->alias))
618 sprintf(name, "%s OR %s", syms->symbol, syms->alias);
620 strcpy(name, syms->symbol);
621 fprintf(stderr, " %-40s [%s]\n", name,
622 event_type_descriptors[type]);
627 fprintf(stderr, "\n");
628 for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) {
629 for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) {
630 /* skip invalid cache type */
631 if (!is_cache_op_valid(type, op))
634 for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) {
635 fprintf(stderr, " %-40s [%s]\n",
636 event_cache_name(type, op, i),
637 event_type_descriptors[4]);
642 fprintf(stderr, "\n");
643 fprintf(stderr, " %-40s [raw hardware event descriptor]\n",
645 fprintf(stderr, "\n");
647 print_tracepoint_events();