3 * Function graph tracer.
4 * Copyright (c) 2008 Frederic Weisbecker <fweisbec@gmail.com>
5 * Mostly borrowed from function tracer which
6 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
9 #include <linux/debugfs.h>
10 #include <linux/uaccess.h>
11 #include <linux/ftrace.h>
16 #define TRACE_GRAPH_INDENT 2
17 /* Spaces between function call and time duration */
18 #define TRACE_GRAPH_TIMESPACE_ENTRY " "
19 /* Spaces between function call and closing braces */
20 #define TRACE_GRAPH_TIMESPACE_RET " "
22 #define TRACE_GRAPH_PRINT_OVERRUN 0x1
23 static struct tracer_opt trace_opts[] = {
24 /* Display overruns or not */
25 { TRACER_OPT(overrun, TRACE_GRAPH_PRINT_OVERRUN) },
29 static struct tracer_flags tracer_flags = {
30 .val = 0, /* Don't display overruns by default */
34 /* pid on the last trace processed */
35 static pid_t last_pid[NR_CPUS] = { [0 ... NR_CPUS-1] = -1 };
37 static int graph_trace_init(struct trace_array *tr)
41 for_each_online_cpu(cpu)
42 tracing_reset(tr, cpu);
44 ret = register_ftrace_graph(&trace_graph_return,
48 tracing_start_cmdline_record();
53 static void graph_trace_reset(struct trace_array *tr)
55 tracing_stop_cmdline_record();
56 unregister_ftrace_graph();
59 /* If the pid changed since the last trace, output this event */
60 static int verif_pid(struct trace_seq *s, pid_t pid, int cpu)
64 if (last_pid[cpu] != -1 && last_pid[cpu] == pid)
68 comm = trace_find_cmdline(pid);
70 return trace_seq_printf(s, "\nCPU[%03d] "
71 " ------------8<---------- thread %s-%d"
72 " ------------8<----------\n\n",
77 trace_branch_is_leaf(struct trace_iterator *iter,
78 struct ftrace_graph_ent_entry *curr)
80 struct ring_buffer_iter *ring_iter;
81 struct ring_buffer_event *event;
82 struct ftrace_graph_ret_entry *next;
84 ring_iter = iter->buffer_iter[iter->cpu];
89 event = ring_buffer_iter_peek(iter->buffer_iter[iter->cpu], NULL);
94 next = ring_buffer_event_data(event);
96 if (next->ent.type != TRACE_GRAPH_RET)
99 if (curr->ent.pid != next->ent.pid ||
100 curr->graph_ent.func != next->ret.func)
108 print_graph_duration(unsigned long long duration, struct trace_seq *s)
110 unsigned long nsecs_rem = do_div(duration, 1000);
111 return trace_seq_printf(s, "+ %llu.%lu us\n", duration, nsecs_rem);
114 /* Signal a overhead of time execution to the output */
116 print_graph_overhead(unsigned long long duration, struct trace_seq *s)
118 /* Duration exceeded 100 msecs */
119 if (duration > 100000ULL)
120 return trace_seq_printf(s, "! ");
122 /* Duration exceeded 10 msecs */
123 if (duration > 10000ULL)
124 return trace_seq_printf(s, "+ ");
126 return trace_seq_printf(s, " ");
129 /* Case of a leaf function on its call entry */
130 static enum print_line_t
131 print_graph_entry_leaf(struct trace_iterator *iter,
132 struct ftrace_graph_ent_entry *entry, struct trace_seq *s)
134 struct ftrace_graph_ret_entry *ret_entry;
135 struct ftrace_graph_ret *graph_ret;
136 struct ring_buffer_event *event;
137 struct ftrace_graph_ent *call;
138 unsigned long long duration;
142 event = ring_buffer_read(iter->buffer_iter[iter->cpu], NULL);
143 ret_entry = ring_buffer_event_data(event);
144 graph_ret = &ret_entry->ret;
145 call = &entry->graph_ent;
146 duration = graph_ret->rettime - graph_ret->calltime;
149 ret = print_graph_overhead(duration, s);
151 return TRACE_TYPE_PARTIAL_LINE;
154 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
155 ret = trace_seq_printf(s, " ");
157 return TRACE_TYPE_PARTIAL_LINE;
160 ret = seq_print_ip_sym(s, call->func, 0);
162 return TRACE_TYPE_PARTIAL_LINE;
164 ret = trace_seq_printf(s, "();");
166 return TRACE_TYPE_PARTIAL_LINE;
169 ret = trace_seq_printf(s, TRACE_GRAPH_TIMESPACE_ENTRY);
171 return TRACE_TYPE_PARTIAL_LINE;
173 ret = print_graph_duration(duration, s);
175 return TRACE_TYPE_PARTIAL_LINE;
177 return TRACE_TYPE_HANDLED;
180 static enum print_line_t
181 print_graph_entry_nested(struct ftrace_graph_ent_entry *entry,
186 struct ftrace_graph_ent *call = &entry->graph_ent;
189 ret = trace_seq_printf(s, " ");
191 return TRACE_TYPE_PARTIAL_LINE;
194 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
195 ret = trace_seq_printf(s, " ");
197 return TRACE_TYPE_PARTIAL_LINE;
200 ret = seq_print_ip_sym(s, call->func, 0);
202 return TRACE_TYPE_PARTIAL_LINE;
204 ret = trace_seq_printf(s, "() {");
206 return TRACE_TYPE_PARTIAL_LINE;
208 /* No duration to print at this state */
209 ret = trace_seq_printf(s, TRACE_GRAPH_TIMESPACE_ENTRY "-\n");
211 return TRACE_TYPE_PARTIAL_LINE;
213 return TRACE_TYPE_HANDLED;
216 static enum print_line_t
217 print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
218 struct trace_iterator *iter, int cpu)
221 struct trace_entry *ent = iter->ent;
223 if (!verif_pid(s, ent->pid, cpu))
224 return TRACE_TYPE_PARTIAL_LINE;
226 ret = trace_seq_printf(s, "CPU[%03d] ", cpu);
228 return TRACE_TYPE_PARTIAL_LINE;
230 if (trace_branch_is_leaf(iter, field))
231 return print_graph_entry_leaf(iter, field, s);
233 return print_graph_entry_nested(field, s);
237 static enum print_line_t
238 print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
239 struct trace_entry *ent, int cpu)
243 unsigned long long duration = trace->rettime - trace->calltime;
246 if (!verif_pid(s, ent->pid, cpu))
247 return TRACE_TYPE_PARTIAL_LINE;
250 ret = trace_seq_printf(s, "CPU[%03d] ", cpu);
252 return TRACE_TYPE_PARTIAL_LINE;
255 ret = print_graph_overhead(duration, s);
257 return TRACE_TYPE_PARTIAL_LINE;
260 for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) {
261 ret = trace_seq_printf(s, " ");
263 return TRACE_TYPE_PARTIAL_LINE;
266 ret = trace_seq_printf(s, "} ");
268 return TRACE_TYPE_PARTIAL_LINE;
271 ret = trace_seq_printf(s, TRACE_GRAPH_TIMESPACE_RET);
273 return TRACE_TYPE_PARTIAL_LINE;
275 ret = print_graph_duration(duration, s);
277 return TRACE_TYPE_PARTIAL_LINE;
280 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) {
281 ret = trace_seq_printf(s, " (Overruns: %lu)\n",
284 return TRACE_TYPE_PARTIAL_LINE;
286 return TRACE_TYPE_HANDLED;
290 print_graph_function(struct trace_iterator *iter)
292 struct trace_seq *s = &iter->seq;
293 struct trace_entry *entry = iter->ent;
295 switch (entry->type) {
296 case TRACE_GRAPH_ENT: {
297 struct ftrace_graph_ent_entry *field;
298 trace_assign_type(field, entry);
299 return print_graph_entry(field, s, iter,
302 case TRACE_GRAPH_RET: {
303 struct ftrace_graph_ret_entry *field;
304 trace_assign_type(field, entry);
305 return print_graph_return(&field->ret, s, entry, iter->cpu);
308 return TRACE_TYPE_UNHANDLED;
312 static struct tracer graph_trace __read_mostly = {
313 .name = "function_graph",
314 .init = graph_trace_init,
315 .reset = graph_trace_reset,
316 .print_line = print_graph_function,
317 .flags = &tracer_flags,
320 static __init int init_graph_trace(void)
322 return register_tracer(&graph_trace);
325 device_initcall(init_graph_trace);