3 * Function graph tracer.
4 * Copyright (c) 2008 Frederic Weisbecker <fweisbec@gmail.com>
5 * Mostly borrowed from function tracer which
6 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
9 #include <linux/debugfs.h>
10 #include <linux/uaccess.h>
11 #include <linux/ftrace.h>
16 #define TRACE_GRAPH_INDENT 2
19 #define TRACE_GRAPH_PRINT_OVERRUN 0x1
20 #define TRACE_GRAPH_PRINT_CPU 0x2
21 #define TRACE_GRAPH_PRINT_OVERHEAD 0x4
23 static struct tracer_opt trace_opts[] = {
24 /* Display overruns ? */
25 { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
27 { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
28 /* Display Overhead ? */
29 { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
33 static struct tracer_flags tracer_flags = {
34 /* Don't display overruns by default */
35 .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD,
39 /* pid on the last trace processed */
40 static pid_t last_pid[NR_CPUS] = { [0 ... NR_CPUS-1] = -1 };
42 static int graph_trace_init(struct trace_array *tr)
46 for_each_online_cpu(cpu)
47 tracing_reset(tr, cpu);
49 ret = register_ftrace_graph(&trace_graph_return,
53 tracing_start_cmdline_record();
58 static void graph_trace_reset(struct trace_array *tr)
60 tracing_stop_cmdline_record();
61 unregister_ftrace_graph();
64 static inline int log10_cpu(int nb)
73 static enum print_line_t
74 print_graph_cpu(struct trace_seq *s, int cpu)
78 int log10_this = log10_cpu(cpu);
79 int log10_all = log10_cpu(cpus_weight_nr(cpu_online_map));
82 for (i = 0; i < log10_all - log10_this; i++) {
83 ret = trace_seq_printf(s, " ");
85 return TRACE_TYPE_PARTIAL_LINE;
87 ret = trace_seq_printf(s, "%d) ", cpu);
89 return TRACE_TYPE_PARTIAL_LINE;
90 return TRACE_TYPE_HANDLED;
94 /* If the pid changed since the last trace, output this event */
95 static int verif_pid(struct trace_seq *s, pid_t pid, int cpu)
99 if (last_pid[cpu] != -1 && last_pid[cpu] == pid)
103 comm = trace_find_cmdline(pid);
105 return trace_seq_printf(s, "\n------------8<---------- thread %s-%d"
106 " ------------8<----------\n\n",
111 trace_branch_is_leaf(struct trace_iterator *iter,
112 struct ftrace_graph_ent_entry *curr)
114 struct ring_buffer_iter *ring_iter;
115 struct ring_buffer_event *event;
116 struct ftrace_graph_ret_entry *next;
118 ring_iter = iter->buffer_iter[iter->cpu];
123 event = ring_buffer_iter_peek(ring_iter, NULL);
128 next = ring_buffer_event_data(event);
130 if (next->ent.type != TRACE_GRAPH_RET)
133 if (curr->ent.pid != next->ent.pid ||
134 curr->graph_ent.func != next->ret.func)
142 print_graph_duration(unsigned long long duration, struct trace_seq *s)
144 unsigned long nsecs_rem = do_div(duration, 1000);
145 return trace_seq_printf(s, "%4llu.%3lu us | ", duration, nsecs_rem);
148 /* Signal a overhead of time execution to the output */
150 print_graph_overhead(unsigned long long duration, struct trace_seq *s)
152 /* Duration exceeded 100 msecs */
153 if (duration > 100000ULL)
154 return trace_seq_printf(s, "! ");
156 /* Duration exceeded 10 msecs */
157 if (duration > 10000ULL)
158 return trace_seq_printf(s, "+ ");
160 return trace_seq_printf(s, " ");
163 /* Case of a leaf function on its call entry */
164 static enum print_line_t
165 print_graph_entry_leaf(struct trace_iterator *iter,
166 struct ftrace_graph_ent_entry *entry, struct trace_seq *s)
168 struct ftrace_graph_ret_entry *ret_entry;
169 struct ftrace_graph_ret *graph_ret;
170 struct ring_buffer_event *event;
171 struct ftrace_graph_ent *call;
172 unsigned long long duration;
176 event = ring_buffer_read(iter->buffer_iter[iter->cpu], NULL);
177 ret_entry = ring_buffer_event_data(event);
178 graph_ret = &ret_entry->ret;
179 call = &entry->graph_ent;
180 duration = graph_ret->rettime - graph_ret->calltime;
182 /* Must not exceed 8 characters: 9999.999 us */
183 if (duration > 10000000ULL)
184 duration = 9999999ULL;
187 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
188 ret = print_graph_overhead(duration, s);
190 return TRACE_TYPE_PARTIAL_LINE;
194 ret = print_graph_duration(duration, s);
196 return TRACE_TYPE_PARTIAL_LINE;
199 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
200 ret = trace_seq_printf(s, " ");
202 return TRACE_TYPE_PARTIAL_LINE;
205 ret = seq_print_ip_sym(s, call->func, 0);
207 return TRACE_TYPE_PARTIAL_LINE;
209 ret = trace_seq_printf(s, "();\n");
211 return TRACE_TYPE_PARTIAL_LINE;
213 return TRACE_TYPE_HANDLED;
216 static enum print_line_t
217 print_graph_entry_nested(struct ftrace_graph_ent_entry *entry,
222 struct ftrace_graph_ent *call = &entry->graph_ent;
225 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
226 ret = trace_seq_printf(s, " ");
228 return TRACE_TYPE_PARTIAL_LINE;
232 ret = trace_seq_printf(s, " | ");
235 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
236 ret = trace_seq_printf(s, " ");
238 return TRACE_TYPE_PARTIAL_LINE;
241 ret = seq_print_ip_sym(s, call->func, 0);
243 return TRACE_TYPE_PARTIAL_LINE;
245 ret = trace_seq_printf(s, "() {\n");
247 return TRACE_TYPE_PARTIAL_LINE;
249 return TRACE_TYPE_HANDLED;
252 static enum print_line_t
253 print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
254 struct trace_iterator *iter, int cpu)
257 struct trace_entry *ent = iter->ent;
260 if (!verif_pid(s, ent->pid, cpu))
261 return TRACE_TYPE_PARTIAL_LINE;
264 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
265 ret = print_graph_cpu(s, cpu);
267 return TRACE_TYPE_PARTIAL_LINE;
270 if (trace_branch_is_leaf(iter, field))
271 return print_graph_entry_leaf(iter, field, s);
273 return print_graph_entry_nested(field, s);
277 static enum print_line_t
278 print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
279 struct trace_entry *ent, int cpu)
283 unsigned long long duration = trace->rettime - trace->calltime;
285 /* Must not exceed 8 characters: xxxx.yyy us */
286 if (duration > 10000000ULL)
287 duration = 9999999ULL;
290 if (!verif_pid(s, ent->pid, cpu))
291 return TRACE_TYPE_PARTIAL_LINE;
294 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
295 ret = print_graph_cpu(s, cpu);
297 return TRACE_TYPE_PARTIAL_LINE;
301 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
302 ret = print_graph_overhead(duration, s);
304 return TRACE_TYPE_PARTIAL_LINE;
308 ret = print_graph_duration(duration, s);
310 return TRACE_TYPE_PARTIAL_LINE;
313 for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) {
314 ret = trace_seq_printf(s, " ");
316 return TRACE_TYPE_PARTIAL_LINE;
319 ret = trace_seq_printf(s, "}\n");
321 return TRACE_TYPE_PARTIAL_LINE;
324 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) {
325 ret = trace_seq_printf(s, " (Overruns: %lu)\n",
328 return TRACE_TYPE_PARTIAL_LINE;
330 return TRACE_TYPE_HANDLED;
334 print_graph_function(struct trace_iterator *iter)
336 struct trace_seq *s = &iter->seq;
337 struct trace_entry *entry = iter->ent;
339 switch (entry->type) {
340 case TRACE_GRAPH_ENT: {
341 struct ftrace_graph_ent_entry *field;
342 trace_assign_type(field, entry);
343 return print_graph_entry(field, s, iter,
346 case TRACE_GRAPH_RET: {
347 struct ftrace_graph_ret_entry *field;
348 trace_assign_type(field, entry);
349 return print_graph_return(&field->ret, s, entry, iter->cpu);
352 return TRACE_TYPE_UNHANDLED;
356 static struct tracer graph_trace __read_mostly = {
357 .name = "function_graph",
358 .init = graph_trace_init,
359 .reset = graph_trace_reset,
360 .print_line = print_graph_function,
361 .flags = &tracer_flags,
364 static __init int init_graph_trace(void)
366 return register_tracer(&graph_trace);
369 device_initcall(init_graph_trace);