2 * ring buffer based function tracer
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
12 * Copyright (C) 2004 William Lee Irwin III
14 #include <linux/utsrelease.h>
15 #include <linux/kallsyms.h>
16 #include <linux/seq_file.h>
17 #include <linux/debugfs.h>
18 #include <linux/pagemap.h>
19 #include <linux/hardirq.h>
20 #include <linux/linkage.h>
21 #include <linux/uaccess.h>
22 #include <linux/ftrace.h>
23 #include <linux/module.h>
24 #include <linux/percpu.h>
25 #include <linux/ctype.h>
26 #include <linux/init.h>
27 #include <linux/gfp.h>
32 unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX;
33 unsigned long __read_mostly tracing_thresh;
35 static int tracing_disabled = 1;
38 ns2usecs(cycle_t nsec)
45 static atomic_t tracer_counter;
46 static struct trace_array global_trace;
48 static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu);
50 static struct trace_array max_tr;
52 static DEFINE_PER_CPU(struct trace_array_cpu, max_data);
54 static int tracer_enabled;
55 static unsigned long trace_nr_entries = 16384UL;
57 static struct tracer *trace_types __read_mostly;
58 static struct tracer *current_trace __read_mostly;
59 static int max_tracer_type_len;
61 static DEFINE_MUTEX(trace_types_lock);
63 #define ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(struct trace_entry))
65 static int __init set_nr_entries(char *str)
69 trace_nr_entries = simple_strtoul(str, &str, 0);
72 __setup("trace_entries=", set_nr_entries);
74 unsigned long nsecs_to_usecs(unsigned long nsecs)
80 __TRACE_FIRST_TYPE = 0,
88 enum trace_flag_type {
89 TRACE_FLAG_IRQS_OFF = 0x01,
90 TRACE_FLAG_NEED_RESCHED = 0x02,
91 TRACE_FLAG_HARDIRQ = 0x04,
92 TRACE_FLAG_SOFTIRQ = 0x08,
95 enum trace_iterator_flags {
96 TRACE_ITER_PRINT_PARENT = 0x01,
97 TRACE_ITER_SYM_OFFSET = 0x02,
98 TRACE_ITER_SYM_ADDR = 0x04,
99 TRACE_ITER_VERBOSE = 0x08,
102 #define TRACE_ITER_SYM_MASK \
103 (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
105 /* These must match the bit postions above */
106 static const char *trace_options[] = {
114 static unsigned trace_flags;
116 static DEFINE_SPINLOCK(ftrace_max_lock);
119 * Copy the new maximum trace into the separate maximum-trace
120 * structure. (this way the maximum trace is permanently saved,
121 * for later retrieval via /debugfs/tracing/latency_trace)
124 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
126 struct trace_array_cpu *data = tr->data[cpu];
129 max_tr.time_start = data->preempt_timestamp;
131 data = max_tr.data[cpu];
132 data->saved_latency = tracing_max_latency;
134 memcpy(data->comm, tsk->comm, TASK_COMM_LEN);
135 data->pid = tsk->pid;
136 data->uid = tsk->uid;
137 data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
138 data->policy = tsk->policy;
139 data->rt_priority = tsk->rt_priority;
141 /* record this tasks comm */
142 tracing_record_cmdline(current);
146 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
148 struct trace_array_cpu *data;
150 struct list_head save_pages;
153 WARN_ON_ONCE(!irqs_disabled());
154 spin_lock(&ftrace_max_lock);
155 /* clear out all the previous traces */
156 for_each_possible_cpu(i) {
158 save_trace = max_tr.data[i]->trace;
159 save_pages = max_tr.data[i]->trace_pages;
160 memcpy(max_tr.data[i], data, sizeof(*data));
161 data->trace = save_trace;
162 data->trace_pages = save_pages;
166 __update_max_tr(tr, tsk, cpu);
167 spin_unlock(&ftrace_max_lock);
171 * update_max_tr_single - only copy one trace over, and reset the rest
173 * @tsk - task with the latency
174 * @cpu - the cpu of the buffer to copy.
177 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
179 struct trace_array_cpu *data = tr->data[cpu];
181 struct list_head save_pages;
184 WARN_ON_ONCE(!irqs_disabled());
185 spin_lock(&ftrace_max_lock);
186 for_each_possible_cpu(i)
187 tracing_reset(max_tr.data[i]);
189 save_trace = max_tr.data[cpu]->trace;
190 save_pages = max_tr.data[cpu]->trace_pages;
191 memcpy(max_tr.data[cpu], data, sizeof(*data));
192 data->trace = save_trace;
193 data->trace_pages = save_pages;
196 __update_max_tr(tr, tsk, cpu);
197 spin_unlock(&ftrace_max_lock);
200 int register_tracer(struct tracer *type)
207 pr_info("Tracer must have a name\n");
211 mutex_lock(&trace_types_lock);
212 for (t = trace_types; t; t = t->next) {
213 if (strcmp(type->name, t->name) == 0) {
215 pr_info("Trace %s already registered\n",
222 #ifdef CONFIG_FTRACE_STARTUP_TEST
223 if (type->selftest) {
224 struct tracer *saved_tracer = current_trace;
225 struct trace_array_cpu *data;
226 struct trace_array *tr = &global_trace;
227 int saved_ctrl = tr->ctrl;
230 * Run a selftest on this tracer.
231 * Here we reset the trace buffer, and set the current
232 * tracer to be this tracer. The tracer can then run some
233 * internal tracing to verify that everything is in order.
234 * If we fail, we do not register this tracer.
236 for_each_possible_cpu(i) {
242 current_trace = type;
244 /* the test is responsible for initializing and enabling */
245 pr_info("Testing tracer %s: ", type->name);
246 ret = type->selftest(type, tr);
247 /* the test is responsible for resetting too */
248 current_trace = saved_tracer;
249 tr->ctrl = saved_ctrl;
251 printk(KERN_CONT "FAILED!\n");
254 printk(KERN_CONT "PASSED\n");
258 type->next = trace_types;
260 len = strlen(type->name);
261 if (len > max_tracer_type_len)
262 max_tracer_type_len = len;
265 mutex_unlock(&trace_types_lock);
270 void unregister_tracer(struct tracer *type)
275 mutex_lock(&trace_types_lock);
276 for (t = &trace_types; *t; t = &(*t)->next) {
280 pr_info("Trace %s not registered\n", type->name);
285 if (strlen(type->name) != max_tracer_type_len)
288 max_tracer_type_len = 0;
289 for (t = &trace_types; *t; t = &(*t)->next) {
290 len = strlen((*t)->name);
291 if (len > max_tracer_type_len)
292 max_tracer_type_len = len;
295 mutex_unlock(&trace_types_lock);
298 void notrace tracing_reset(struct trace_array_cpu *data)
301 data->trace_current = data->trace;
302 data->trace_current_idx = 0;
307 function_trace_call(unsigned long ip, unsigned long parent_ip)
309 struct trace_array *tr = &global_trace;
310 struct trace_array_cpu *data;
315 if (unlikely(!tracer_enabled))
318 local_irq_save(flags);
319 cpu = raw_smp_processor_id();
320 data = tr->data[cpu];
321 disabled = atomic_inc_return(&data->disabled);
323 if (likely(disabled == 1))
324 ftrace(tr, data, ip, parent_ip, flags);
326 atomic_dec(&data->disabled);
327 local_irq_restore(flags);
330 static struct ftrace_ops trace_ops __read_mostly =
332 .func = function_trace_call,
336 notrace void tracing_start_function_trace(void)
338 register_ftrace_function(&trace_ops);
341 notrace void tracing_stop_function_trace(void)
343 unregister_ftrace_function(&trace_ops);
346 #define SAVED_CMDLINES 128
347 static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
348 static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
349 static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
350 static int cmdline_idx;
351 static DEFINE_SPINLOCK(trace_cmdline_lock);
352 atomic_t trace_record_cmdline_disabled;
354 static void trace_init_cmdlines(void)
356 memset(&map_pid_to_cmdline, -1, sizeof(map_pid_to_cmdline));
357 memset(&map_cmdline_to_pid, -1, sizeof(map_cmdline_to_pid));
361 notrace void trace_stop_cmdline_recording(void);
363 static void notrace trace_save_cmdline(struct task_struct *tsk)
368 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
372 * It's not the end of the world if we don't get
373 * the lock, but we also don't want to spin
374 * nor do we want to disable interrupts,
375 * so if we miss here, then better luck next time.
377 if (!spin_trylock(&trace_cmdline_lock))
380 idx = map_pid_to_cmdline[tsk->pid];
381 if (idx >= SAVED_CMDLINES) {
382 idx = (cmdline_idx + 1) % SAVED_CMDLINES;
384 map = map_cmdline_to_pid[idx];
385 if (map <= PID_MAX_DEFAULT)
386 map_pid_to_cmdline[map] = (unsigned)-1;
388 map_pid_to_cmdline[tsk->pid] = idx;
393 memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
395 spin_unlock(&trace_cmdline_lock);
398 static notrace char *trace_find_cmdline(int pid)
400 char *cmdline = "<...>";
406 if (pid > PID_MAX_DEFAULT)
409 map = map_pid_to_cmdline[pid];
410 if (map >= SAVED_CMDLINES)
413 cmdline = saved_cmdlines[map];
419 notrace void tracing_record_cmdline(struct task_struct *tsk)
421 if (atomic_read(&trace_record_cmdline_disabled))
424 trace_save_cmdline(tsk);
427 static inline notrace struct trace_entry *
428 tracing_get_trace_entry(struct trace_array *tr,
429 struct trace_array_cpu *data)
431 unsigned long idx, idx_next;
432 struct trace_entry *entry;
434 struct list_head *next;
437 idx = data->trace_current_idx;
440 entry = data->trace_current + idx * TRACE_ENTRY_SIZE;
442 if (unlikely(idx_next >= ENTRIES_PER_PAGE)) {
443 page = virt_to_page(data->trace_current);
444 if (unlikely(&page->lru == data->trace_pages.prev))
445 next = data->trace_pages.next;
447 next = page->lru.next;
448 page = list_entry(next, struct page, lru);
449 data->trace_current = page_address(page);
453 data->trace_current_idx = idx_next;
458 static inline notrace void
459 tracing_generic_entry_update(struct trace_entry *entry,
462 struct task_struct *tsk = current;
465 pc = preempt_count();
467 entry->idx = atomic_inc_return(&tracer_counter);
468 entry->preempt_count = pc & 0xff;
469 entry->pid = tsk->pid;
470 entry->t = now(raw_smp_processor_id());
471 entry->flags = (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
472 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
473 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
474 (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
478 ftrace(struct trace_array *tr, struct trace_array_cpu *data,
479 unsigned long ip, unsigned long parent_ip,
482 struct trace_entry *entry;
484 entry = tracing_get_trace_entry(tr, data);
485 tracing_generic_entry_update(entry, flags);
486 entry->type = TRACE_FN;
488 entry->fn.parent_ip = parent_ip;
492 tracing_sched_switch_trace(struct trace_array *tr,
493 struct trace_array_cpu *data,
494 struct task_struct *prev, struct task_struct *next,
497 struct trace_entry *entry;
499 entry = tracing_get_trace_entry(tr, data);
500 tracing_generic_entry_update(entry, flags);
501 entry->type = TRACE_CTX;
502 entry->ctx.prev_pid = prev->pid;
503 entry->ctx.prev_prio = prev->prio;
504 entry->ctx.prev_state = prev->state;
505 entry->ctx.next_pid = next->pid;
506 entry->ctx.next_prio = next->prio;
509 enum trace_file_type {
510 TRACE_FILE_LAT_FMT = 1,
513 static struct trace_entry *
514 trace_entry_idx(struct trace_array *tr, struct trace_array_cpu *data,
515 struct trace_iterator *iter, int cpu)
518 struct trace_entry *array;
520 if (iter->next_idx[cpu] >= tr->entries ||
521 iter->next_idx[cpu] >= data->trace_idx)
524 if (!iter->next_page[cpu]) {
526 * Initialize. If the count of elements in
527 * this buffer is greater than the max entries
528 * we had an underrun. Which means we looped around.
529 * We can simply use the current pointer as our
532 if (data->trace_idx >= tr->entries) {
533 page = virt_to_page(data->trace_current);
534 iter->next_page[cpu] = &page->lru;
535 iter->next_page_idx[cpu] = data->trace_current_idx;
537 iter->next_page[cpu] = data->trace_pages.next;
538 iter->next_page_idx[cpu] = 0;
542 page = list_entry(iter->next_page[cpu], struct page, lru);
543 array = page_address(page);
545 return &array[iter->next_page_idx[cpu]];
548 static struct notrace trace_entry *
549 find_next_entry(struct trace_iterator *iter, int *ent_cpu)
551 struct trace_array *tr = iter->tr;
552 struct trace_entry *ent, *next = NULL;
556 for_each_possible_cpu(cpu) {
557 if (!tr->data[cpu]->trace)
559 ent = trace_entry_idx(tr, tr->data[cpu], iter, cpu);
561 (!next || (long)(next->idx - ent->idx) > 0)) {
573 static void *find_next_entry_inc(struct trace_iterator *iter)
575 struct trace_entry *next;
578 next = find_next_entry(iter, &next_cpu);
582 iter->next_idx[next_cpu]++;
583 iter->next_page_idx[next_cpu]++;
584 if (iter->next_page_idx[next_cpu] >= ENTRIES_PER_PAGE) {
585 struct trace_array_cpu *data = iter->tr->data[next_cpu];
587 iter->next_page_idx[next_cpu] = 0;
588 iter->next_page[next_cpu] =
589 iter->next_page[next_cpu]->next;
590 if (iter->next_page[next_cpu] == &data->trace_pages)
591 iter->next_page[next_cpu] =
592 data->trace_pages.next;
596 iter->cpu = next_cpu;
598 return next ? iter : NULL;
601 static void notrace *
602 s_next(struct seq_file *m, void *v, loff_t *pos)
604 struct trace_iterator *iter = m->private;
606 void *last_ent = iter->ent;
611 /* can't go backwards */
616 ent = find_next_entry_inc(iter);
620 while (ent && iter->idx < i)
621 ent = find_next_entry_inc(iter);
625 if (last_ent && !ent)
626 seq_puts(m, "\n\nvim:ft=help\n");
631 static void *s_start(struct seq_file *m, loff_t *pos)
633 struct trace_iterator *iter = m->private;
638 mutex_lock(&trace_types_lock);
640 if (!current_trace || current_trace != iter->trace)
643 atomic_inc(&trace_record_cmdline_disabled);
645 /* let the tracer grab locks here if needed */
646 if (current_trace->start)
647 current_trace->start(iter);
649 if (*pos != iter->pos) {
654 for_each_possible_cpu(i) {
655 iter->next_idx[i] = 0;
656 iter->next_page[i] = NULL;
659 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
664 p = s_next(m, p, &l);
670 static void s_stop(struct seq_file *m, void *p)
672 struct trace_iterator *iter = m->private;
674 atomic_dec(&trace_record_cmdline_disabled);
676 /* let the tracer release locks here if needed */
677 if (current_trace && current_trace == iter->trace && iter->trace->stop)
678 iter->trace->stop(iter);
680 mutex_unlock(&trace_types_lock);
684 seq_print_sym_short(struct seq_file *m, const char *fmt, unsigned long address)
686 #ifdef CONFIG_KALLSYMS
687 char str[KSYM_SYMBOL_LEN];
689 kallsyms_lookup(address, NULL, NULL, NULL, str);
691 seq_printf(m, fmt, str);
696 seq_print_sym_offset(struct seq_file *m, const char *fmt, unsigned long address)
698 #ifdef CONFIG_KALLSYMS
699 char str[KSYM_SYMBOL_LEN];
701 sprint_symbol(str, address);
702 seq_printf(m, fmt, str);
707 # define IP_FMT "%08lx"
709 # define IP_FMT "%016lx"
713 seq_print_ip_sym(struct seq_file *m, unsigned long ip, unsigned long sym_flags)
720 if (sym_flags & TRACE_ITER_SYM_OFFSET)
721 seq_print_sym_offset(m, "%s", ip);
723 seq_print_sym_short(m, "%s", ip);
725 if (sym_flags & TRACE_ITER_SYM_ADDR)
726 seq_printf(m, " <" IP_FMT ">", ip);
729 static void notrace print_lat_help_header(struct seq_file *m)
731 seq_puts(m, "# _------=> CPU# \n");
732 seq_puts(m, "# / _-----=> irqs-off \n");
733 seq_puts(m, "# | / _----=> need-resched \n");
734 seq_puts(m, "# || / _---=> hardirq/softirq \n");
735 seq_puts(m, "# ||| / _--=> preempt-depth \n");
736 seq_puts(m, "# |||| / \n");
737 seq_puts(m, "# ||||| delay \n");
738 seq_puts(m, "# cmd pid ||||| time | caller \n");
739 seq_puts(m, "# \\ / ||||| \\ | / \n");
742 static void notrace print_func_help_header(struct seq_file *m)
744 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n");
745 seq_puts(m, "# | | | | |\n");
750 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
752 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
753 struct trace_array *tr = iter->tr;
754 struct trace_array_cpu *data = tr->data[tr->cpu];
755 struct tracer *type = current_trace;
756 unsigned long total = 0;
757 unsigned long entries = 0;
759 const char *name = "preemption";
764 for_each_possible_cpu(cpu) {
765 if (tr->data[cpu]->trace) {
766 total += tr->data[cpu]->trace_idx;
767 if (tr->data[cpu]->trace_idx > tr->entries)
768 entries += tr->entries;
770 entries += tr->data[cpu]->trace_idx;
774 seq_printf(m, "%s latency trace v1.1.5 on %s\n",
776 seq_puts(m, "-----------------------------------"
777 "---------------------------------\n");
778 seq_printf(m, " latency: %lu us, #%lu/%lu, CPU#%d |"
779 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
780 nsecs_to_usecs(data->saved_latency),
784 #if defined(CONFIG_PREEMPT_NONE)
786 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
788 #elif defined(CONFIG_PREEMPT_DESKTOP)
793 /* These are reserved for later use */
796 seq_printf(m, " #P:%d)\n", num_online_cpus());
800 seq_puts(m, " -----------------\n");
801 seq_printf(m, " | task: %.16s-%d "
802 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
803 data->comm, data->pid, data->uid, data->nice,
804 data->policy, data->rt_priority);
805 seq_puts(m, " -----------------\n");
807 if (data->critical_start) {
808 seq_puts(m, " => started at: ");
809 seq_print_ip_sym(m, data->critical_start, sym_flags);
810 seq_puts(m, "\n => ended at: ");
811 seq_print_ip_sym(m, data->critical_end, sym_flags);
819 lat_print_generic(struct seq_file *m, struct trace_entry *entry, int cpu)
821 int hardirq, softirq;
824 comm = trace_find_cmdline(entry->pid);
826 seq_printf(m, "%8.8s-%-5d ", comm, entry->pid);
827 seq_printf(m, "%d", cpu);
828 seq_printf(m, "%c%c",
829 (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' : '.',
830 ((entry->flags & TRACE_FLAG_NEED_RESCHED) ? 'N' : '.'));
832 hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
833 softirq = entry->flags & TRACE_FLAG_SOFTIRQ;
834 if (hardirq && softirq)
847 if (entry->preempt_count)
848 seq_printf(m, "%x", entry->preempt_count);
853 unsigned long preempt_mark_thresh = 100;
856 lat_print_timestamp(struct seq_file *m, unsigned long long abs_usecs,
857 unsigned long rel_usecs)
859 seq_printf(m, " %4lldus", abs_usecs);
860 if (rel_usecs > preempt_mark_thresh)
862 else if (rel_usecs > 1)
868 static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
871 print_lat_fmt(struct seq_file *m, struct trace_iterator *iter,
872 unsigned int trace_idx, int cpu)
874 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
875 struct trace_entry *next_entry = find_next_entry(iter, NULL);
876 unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE);
877 struct trace_entry *entry = iter->ent;
878 unsigned long abs_usecs;
879 unsigned long rel_usecs;
885 rel_usecs = ns2usecs(next_entry->t - entry->t);
886 abs_usecs = ns2usecs(entry->t - iter->tr->time_start);
889 comm = trace_find_cmdline(entry->pid);
890 seq_printf(m, "%16s %5d %d %d %08x %08x [%08lx]"
891 " %ld.%03ldms (+%ld.%03ldms): ",
893 entry->pid, cpu, entry->flags,
894 entry->preempt_count, trace_idx,
897 abs_usecs % 1000, rel_usecs/1000, rel_usecs % 1000);
899 lat_print_generic(m, entry, cpu);
900 lat_print_timestamp(m, abs_usecs, rel_usecs);
902 switch (entry->type) {
904 seq_print_ip_sym(m, entry->fn.ip, sym_flags);
906 seq_print_ip_sym(m, entry->fn.parent_ip, sym_flags);
910 S = entry->ctx.prev_state < sizeof(state_to_char) ?
911 state_to_char[entry->ctx.prev_state] : 'X';
912 comm = trace_find_cmdline(entry->ctx.next_pid);
913 seq_printf(m, " %d:%d:%c --> %d:%d %s\n",
915 entry->ctx.prev_prio,
918 entry->ctx.next_prio,
922 seq_printf(m, "Unknown type %d\n", entry->type);
927 print_trace_fmt(struct seq_file *m, struct trace_iterator *iter)
929 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
930 struct trace_entry *entry = iter->ent;
931 unsigned long usec_rem;
932 unsigned long long t;
937 comm = trace_find_cmdline(iter->ent->pid);
939 t = ns2usecs(entry->t);
940 usec_rem = do_div(t, 1000000ULL);
941 secs = (unsigned long)t;
943 seq_printf(m, "%16s-%-5d ", comm, entry->pid);
944 seq_printf(m, "[%02d] ", iter->cpu);
945 seq_printf(m, "%5lu.%06lu: ", secs, usec_rem);
947 switch (entry->type) {
949 seq_print_ip_sym(m, entry->fn.ip, sym_flags);
950 if ((sym_flags & TRACE_ITER_PRINT_PARENT) &&
951 entry->fn.parent_ip) {
952 seq_printf(m, " <-");
953 seq_print_ip_sym(m, entry->fn.parent_ip, sym_flags);
957 S = entry->ctx.prev_state < sizeof(state_to_char) ?
958 state_to_char[entry->ctx.prev_state] : 'X';
959 seq_printf(m, " %d:%d:%c ==> %d:%d\n",
961 entry->ctx.prev_prio,
964 entry->ctx.next_prio);
970 static int trace_empty(struct trace_iterator *iter)
972 struct trace_array_cpu *data;
975 for_each_possible_cpu(cpu) {
976 data = iter->tr->data[cpu];
985 static int s_show(struct seq_file *m, void *v)
987 struct trace_iterator *iter = v;
989 if (iter->ent == NULL) {
991 seq_printf(m, "# tracer: %s\n", iter->trace->name);
994 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
995 /* print nothing if the buffers are empty */
996 if (trace_empty(iter))
998 print_trace_header(m, iter);
999 if (!(trace_flags & TRACE_ITER_VERBOSE))
1000 print_lat_help_header(m);
1002 if (!(trace_flags & TRACE_ITER_VERBOSE))
1003 print_func_help_header(m);
1006 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
1007 print_lat_fmt(m, iter, iter->idx, iter->cpu);
1009 print_trace_fmt(m, iter);
1015 static struct seq_operations tracer_seq_ops = {
1022 static struct trace_iterator notrace *
1023 __tracing_open(struct inode *inode, struct file *file, int *ret)
1025 struct trace_iterator *iter;
1027 if (tracing_disabled) {
1032 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1038 mutex_lock(&trace_types_lock);
1039 if (current_trace && current_trace->print_max)
1042 iter->tr = inode->i_private;
1043 iter->trace = current_trace;
1046 /* TODO stop tracer */
1047 *ret = seq_open(file, &tracer_seq_ops);
1049 struct seq_file *m = file->private_data;
1052 /* stop the trace while dumping */
1056 if (iter->trace && iter->trace->open)
1057 iter->trace->open(iter);
1062 mutex_unlock(&trace_types_lock);
1068 int tracing_open_generic(struct inode *inode, struct file *filp)
1070 if (tracing_disabled)
1073 filp->private_data = inode->i_private;
1077 int tracing_release(struct inode *inode, struct file *file)
1079 struct seq_file *m = (struct seq_file *)file->private_data;
1080 struct trace_iterator *iter = m->private;
1082 mutex_lock(&trace_types_lock);
1083 if (iter->trace && iter->trace->close)
1084 iter->trace->close(iter);
1086 /* reenable tracing if it was previously enabled */
1089 mutex_unlock(&trace_types_lock);
1091 seq_release(inode, file);
1096 static int tracing_open(struct inode *inode, struct file *file)
1100 __tracing_open(inode, file, &ret);
1105 static int tracing_lt_open(struct inode *inode, struct file *file)
1107 struct trace_iterator *iter;
1110 iter = __tracing_open(inode, file, &ret);
1113 iter->iter_flags |= TRACE_FILE_LAT_FMT;
1119 static void notrace *
1120 t_next(struct seq_file *m, void *v, loff_t *pos)
1122 struct tracer *t = m->private;
1134 static void *t_start(struct seq_file *m, loff_t *pos)
1136 struct tracer *t = m->private;
1139 mutex_lock(&trace_types_lock);
1140 for (; t && l < *pos; t = t_next(m, t, &l))
1146 static void t_stop(struct seq_file *m, void *p)
1148 mutex_unlock(&trace_types_lock);
1151 static int t_show(struct seq_file *m, void *v)
1153 struct tracer *t = v;
1158 seq_printf(m, "%s", t->name);
1167 static struct seq_operations show_traces_seq_ops = {
1174 static int show_traces_open(struct inode *inode, struct file *file)
1178 if (tracing_disabled)
1181 ret = seq_open(file, &show_traces_seq_ops);
1183 struct seq_file *m = file->private_data;
1184 m->private = trace_types;
1190 static struct file_operations tracing_fops = {
1191 .open = tracing_open,
1193 .llseek = seq_lseek,
1194 .release = tracing_release,
1197 static struct file_operations tracing_lt_fops = {
1198 .open = tracing_lt_open,
1200 .llseek = seq_lseek,
1201 .release = tracing_release,
1204 static struct file_operations show_traces_fops = {
1205 .open = show_traces_open,
1207 .release = seq_release,
1211 tracing_iter_ctrl_read(struct file *filp, char __user *ubuf,
1212 size_t cnt, loff_t *ppos)
1219 /* calulate max size */
1220 for (i = 0; trace_options[i]; i++) {
1221 len += strlen(trace_options[i]);
1222 len += 3; /* "no" and space */
1225 /* +2 for \n and \0 */
1226 buf = kmalloc(len + 2, GFP_KERNEL);
1230 for (i = 0; trace_options[i]; i++) {
1231 if (trace_flags & (1 << i))
1232 r += sprintf(buf + r, "%s ", trace_options[i]);
1234 r += sprintf(buf + r, "no%s ", trace_options[i]);
1237 r += sprintf(buf + r, "\n");
1238 WARN_ON(r >= len + 2);
1240 r = simple_read_from_buffer(ubuf, cnt, ppos,
1249 tracing_iter_ctrl_write(struct file *filp, const char __user *ubuf,
1250 size_t cnt, loff_t *ppos)
1260 if (copy_from_user(&buf, ubuf, cnt))
1265 if (strncmp(buf, "no", 2) == 0) {
1270 for (i = 0; trace_options[i]; i++) {
1271 int len = strlen(trace_options[i]);
1273 if (strncmp(cmp, trace_options[i], len) == 0) {
1275 trace_flags &= ~(1 << i);
1277 trace_flags |= (1 << i);
1287 static struct file_operations tracing_iter_fops = {
1288 .open = tracing_open_generic,
1289 .read = tracing_iter_ctrl_read,
1290 .write = tracing_iter_ctrl_write,
1294 tracing_ctrl_read(struct file *filp, char __user *ubuf,
1295 size_t cnt, loff_t *ppos)
1297 struct trace_array *tr = filp->private_data;
1301 r = sprintf(buf, "%ld\n", tr->ctrl);
1302 return simple_read_from_buffer(ubuf, cnt, ppos,
1307 tracing_ctrl_write(struct file *filp, const char __user *ubuf,
1308 size_t cnt, loff_t *ppos)
1310 struct trace_array *tr = filp->private_data;
1317 if (copy_from_user(&buf, ubuf, cnt))
1322 val = simple_strtoul(buf, NULL, 10);
1326 mutex_lock(&trace_types_lock);
1327 if (tr->ctrl ^ val) {
1335 if (current_trace && current_trace->ctrl_update)
1336 current_trace->ctrl_update(tr);
1338 mutex_unlock(&trace_types_lock);
1346 tracing_set_trace_read(struct file *filp, char __user *ubuf,
1347 size_t cnt, loff_t *ppos)
1349 char buf[max_tracer_type_len+2];
1352 mutex_lock(&trace_types_lock);
1354 r = sprintf(buf, "%s\n", current_trace->name);
1356 r = sprintf(buf, "\n");
1357 mutex_unlock(&trace_types_lock);
1359 return simple_read_from_buffer(ubuf, cnt, ppos,
1364 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
1365 size_t cnt, loff_t *ppos)
1367 struct trace_array *tr = &global_trace;
1369 char buf[max_tracer_type_len+1];
1372 if (cnt > max_tracer_type_len)
1373 cnt = max_tracer_type_len;
1375 if (copy_from_user(&buf, ubuf, cnt))
1380 /* strip ending whitespace. */
1381 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
1384 mutex_lock(&trace_types_lock);
1385 for (t = trace_types; t; t = t->next) {
1386 if (strcmp(t->name, buf) == 0)
1389 if (!t || t == current_trace)
1392 if (current_trace && current_trace->reset)
1393 current_trace->reset(tr);
1400 mutex_unlock(&trace_types_lock);
1408 tracing_max_lat_read(struct file *filp, char __user *ubuf,
1409 size_t cnt, loff_t *ppos)
1411 unsigned long *ptr = filp->private_data;
1415 r = snprintf(buf, 64, "%ld\n",
1416 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
1419 return simple_read_from_buffer(ubuf, cnt, ppos,
1424 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
1425 size_t cnt, loff_t *ppos)
1427 long *ptr = filp->private_data;
1434 if (copy_from_user(&buf, ubuf, cnt))
1439 val = simple_strtoul(buf, NULL, 10);
1446 static struct file_operations tracing_max_lat_fops = {
1447 .open = tracing_open_generic,
1448 .read = tracing_max_lat_read,
1449 .write = tracing_max_lat_write,
1452 static struct file_operations tracing_ctrl_fops = {
1453 .open = tracing_open_generic,
1454 .read = tracing_ctrl_read,
1455 .write = tracing_ctrl_write,
1458 static struct file_operations set_tracer_fops = {
1459 .open = tracing_open_generic,
1460 .read = tracing_set_trace_read,
1461 .write = tracing_set_trace_write,
1464 #ifdef CONFIG_DYNAMIC_FTRACE
1467 tracing_read_long(struct file *filp, char __user *ubuf,
1468 size_t cnt, loff_t *ppos)
1470 unsigned long *p = filp->private_data;
1474 r = sprintf(buf, "%ld\n", *p);
1475 return simple_read_from_buffer(ubuf, cnt, ppos,
1479 static struct file_operations tracing_read_long_fops = {
1480 .open = tracing_open_generic,
1481 .read = tracing_read_long,
1485 static struct dentry *d_tracer;
1487 struct dentry *tracing_init_dentry(void)
1494 d_tracer = debugfs_create_dir("tracing", NULL);
1496 if (!d_tracer && !once) {
1498 pr_warning("Could not create debugfs directory 'tracing'\n");
1505 #ifdef CONFIG_FTRACE_SELFTEST
1506 /* Let selftest have access to static functions in this file */
1507 #include "trace_selftest.c"
1510 static __init void tracer_init_debugfs(void)
1512 struct dentry *d_tracer;
1513 struct dentry *entry;
1515 d_tracer = tracing_init_dentry();
1517 entry = debugfs_create_file("tracing_enabled", 0644, d_tracer,
1518 &global_trace, &tracing_ctrl_fops);
1520 pr_warning("Could not create debugfs 'tracing_enabled' entry\n");
1522 entry = debugfs_create_file("iter_ctrl", 0644, d_tracer,
1523 NULL, &tracing_iter_fops);
1525 pr_warning("Could not create debugfs 'iter_ctrl' entry\n");
1527 entry = debugfs_create_file("latency_trace", 0444, d_tracer,
1528 &global_trace, &tracing_lt_fops);
1530 pr_warning("Could not create debugfs 'latency_trace' entry\n");
1532 entry = debugfs_create_file("trace", 0444, d_tracer,
1533 &global_trace, &tracing_fops);
1535 pr_warning("Could not create debugfs 'trace' entry\n");
1537 entry = debugfs_create_file("available_tracers", 0444, d_tracer,
1538 &global_trace, &show_traces_fops);
1540 pr_warning("Could not create debugfs 'trace' entry\n");
1542 entry = debugfs_create_file("current_tracer", 0444, d_tracer,
1543 &global_trace, &set_tracer_fops);
1545 pr_warning("Could not create debugfs 'trace' entry\n");
1547 entry = debugfs_create_file("tracing_max_latency", 0644, d_tracer,
1548 &tracing_max_latency,
1549 &tracing_max_lat_fops);
1551 pr_warning("Could not create debugfs "
1552 "'tracing_max_latency' entry\n");
1554 entry = debugfs_create_file("tracing_thresh", 0644, d_tracer,
1555 &tracing_thresh, &tracing_max_lat_fops);
1557 pr_warning("Could not create debugfs "
1558 "'tracing_threash' entry\n");
1560 #ifdef CONFIG_DYNAMIC_FTRACE
1561 entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer,
1562 &ftrace_update_tot_cnt,
1563 &tracing_read_long_fops);
1565 pr_warning("Could not create debugfs "
1566 "'dyn_ftrace_total_info' entry\n");
1570 /* dummy trace to disable tracing */
1571 static struct tracer no_tracer __read_mostly =
1576 static int trace_alloc_page(void)
1578 struct trace_array_cpu *data;
1580 struct page *page, *tmp;
1584 /* first allocate a page for each CPU */
1585 for_each_possible_cpu(i) {
1586 array = (void *)__get_free_page(GFP_KERNEL);
1587 if (array == NULL) {
1588 printk(KERN_ERR "tracer: failed to allocate page"
1589 "for trace buffer!\n");
1593 page = virt_to_page(array);
1594 list_add(&page->lru, &pages);
1596 /* Only allocate if we are actually using the max trace */
1597 #ifdef CONFIG_TRACER_MAX_TRACE
1598 array = (void *)__get_free_page(GFP_KERNEL);
1599 if (array == NULL) {
1600 printk(KERN_ERR "tracer: failed to allocate page"
1601 "for trace buffer!\n");
1604 page = virt_to_page(array);
1605 list_add(&page->lru, &pages);
1609 /* Now that we successfully allocate a page per CPU, add them */
1610 for_each_possible_cpu(i) {
1611 data = global_trace.data[i];
1612 page = list_entry(pages.next, struct page, lru);
1613 list_del(&page->lru);
1614 list_add_tail(&page->lru, &data->trace_pages);
1617 #ifdef CONFIG_TRACER_MAX_TRACE
1618 data = max_tr.data[i];
1619 page = list_entry(pages.next, struct page, lru);
1620 list_del(&page->lru);
1621 list_add_tail(&page->lru, &data->trace_pages);
1625 global_trace.entries += ENTRIES_PER_PAGE;
1630 list_for_each_entry_safe(page, tmp, &pages, lru) {
1631 list_del(&page->lru);
1637 __init static int tracer_alloc_buffers(void)
1639 struct trace_array_cpu *data;
1646 /* Allocate the first page for all buffers */
1647 for_each_possible_cpu(i) {
1648 data = global_trace.data[i] = &per_cpu(global_trace_cpu, i);
1649 max_tr.data[i] = &per_cpu(max_data, i);
1651 array = (void *)__get_free_page(GFP_KERNEL);
1652 if (array == NULL) {
1653 printk(KERN_ERR "tracer: failed to allocate page"
1654 "for trace buffer!\n");
1657 data->trace = array;
1659 /* set the array to the list */
1660 INIT_LIST_HEAD(&data->trace_pages);
1661 page = virt_to_page(array);
1662 list_add(&page->lru, &data->trace_pages);
1663 /* use the LRU flag to differentiate the two buffers */
1666 /* Only allocate if we are actually using the max trace */
1667 #ifdef CONFIG_TRACER_MAX_TRACE
1668 array = (void *)__get_free_page(GFP_KERNEL);
1669 if (array == NULL) {
1670 printk(KERN_ERR "tracer: failed to allocate page"
1671 "for trace buffer!\n");
1674 max_tr.data[i]->trace = array;
1676 INIT_LIST_HEAD(&max_tr.data[i]->trace_pages);
1677 page = virt_to_page(array);
1678 list_add(&page->lru, &max_tr.data[i]->trace_pages);
1684 * Since we allocate by orders of pages, we may be able to
1687 global_trace.entries = ENTRIES_PER_PAGE;
1690 while (global_trace.entries < trace_nr_entries) {
1691 if (trace_alloc_page())
1695 max_tr.entries = global_trace.entries;
1697 pr_info("tracer: %d pages allocated for %ld",
1698 pages, trace_nr_entries);
1699 pr_info(" entries of %ld bytes\n", (long)TRACE_ENTRY_SIZE);
1700 pr_info(" actual entries %ld\n", global_trace.entries);
1702 tracer_init_debugfs();
1704 trace_init_cmdlines();
1706 register_tracer(&no_tracer);
1707 current_trace = &no_tracer;
1709 /* All seems OK, enable tracing */
1710 tracing_disabled = 0;
1715 for (i-- ; i >= 0; i--) {
1716 struct page *page, *tmp;
1717 struct trace_array_cpu *data = global_trace.data[i];
1719 if (data && data->trace) {
1720 list_for_each_entry_safe(page, tmp,
1721 &data->trace_pages, lru) {
1722 list_del(&page->lru);
1728 #ifdef CONFIG_TRACER_MAX_TRACE
1729 data = max_tr.data[i];
1730 if (data && data->trace) {
1731 list_for_each_entry_safe(page, tmp,
1732 &data->trace_pages, lru) {
1733 list_del(&page->lru);
1743 fs_initcall(tracer_alloc_buffers);