tracing/function-graph-tracer: enhancements for the trace output
[linux-2.6] / kernel / trace / trace_functions_graph.c
1 /*
2  *
3  * Function graph tracer.
4  * Copyright (c) 2008 Frederic Weisbecker <fweisbec@gmail.com>
5  * Mostly borrowed from function tracer which
6  * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
7  *
8  */
9 #include <linux/debugfs.h>
10 #include <linux/uaccess.h>
11 #include <linux/ftrace.h>
12 #include <linux/fs.h>
13
14 #include "trace.h"
15
16 #define TRACE_GRAPH_INDENT      2
17 /* Spaces between function call and time duration */
18 #define TRACE_GRAPH_TIMESPACE_ENTRY     "                    "
19 /* Spaces between function call and closing braces */
20 #define TRACE_GRAPH_TIMESPACE_RET       "                               "
21
22 #define TRACE_GRAPH_PRINT_OVERRUN       0x1
23 static struct tracer_opt trace_opts[] = {
24         /* Display overruns or not */
25         { TRACER_OPT(overrun, TRACE_GRAPH_PRINT_OVERRUN) },
26         { } /* Empty entry */
27 };
28
29 static struct tracer_flags tracer_flags = {
30         .val = 0, /* Don't display overruns by default */
31         .opts = trace_opts
32 };
33
34 /* pid on the last trace processed */
35 static pid_t last_pid[NR_CPUS] = { [0 ... NR_CPUS-1] = -1 };
36
37 static int graph_trace_init(struct trace_array *tr)
38 {
39         int cpu, ret;
40
41         for_each_online_cpu(cpu)
42                 tracing_reset(tr, cpu);
43
44         ret = register_ftrace_graph(&trace_graph_return,
45                                         &trace_graph_entry);
46         if (ret)
47                 return ret;
48         tracing_start_cmdline_record();
49
50         return 0;
51 }
52
53 static void graph_trace_reset(struct trace_array *tr)
54 {
55         tracing_stop_cmdline_record();
56         unregister_ftrace_graph();
57 }
58
59 /* If the pid changed since the last trace, output this event */
60 static int verif_pid(struct trace_seq *s, pid_t pid, int cpu)
61 {
62         char *comm;
63
64         if (last_pid[cpu] != -1 && last_pid[cpu] == pid)
65                 return 1;
66
67         last_pid[cpu] = pid;
68         comm = trace_find_cmdline(pid);
69
70         return trace_seq_printf(s, "\nCPU[%03d] "
71                                     " ------------8<---------- thread %s-%d"
72                                     " ------------8<----------\n\n",
73                                     cpu, comm, pid);
74 }
75
76 static bool
77 trace_branch_is_leaf(struct trace_iterator *iter,
78                 struct ftrace_graph_ent_entry *curr)
79 {
80         struct ring_buffer_iter *ring_iter;
81         struct ring_buffer_event *event;
82         struct ftrace_graph_ret_entry *next;
83
84         ring_iter = iter->buffer_iter[iter->cpu];
85
86         if (!ring_iter)
87                 return false;
88
89         event = ring_buffer_iter_peek(iter->buffer_iter[iter->cpu], NULL);
90
91         if (!event)
92                 return false;
93
94         next = ring_buffer_event_data(event);
95
96         if (next->ent.type != TRACE_GRAPH_RET)
97                 return false;
98
99         if (curr->ent.pid != next->ent.pid ||
100                         curr->graph_ent.func != next->ret.func)
101                 return false;
102
103         return true;
104 }
105
106
107 static inline int
108 print_graph_duration(unsigned long long duration, struct trace_seq *s)
109 {
110         unsigned long nsecs_rem = do_div(duration, 1000);
111         return trace_seq_printf(s, "+ %llu.%lu us\n", duration, nsecs_rem);
112 }
113
114 /* Signal a overhead of time execution to the output */
115 static int
116 print_graph_overhead(unsigned long long duration, struct trace_seq *s)
117 {
118         /* Duration exceeded 100 msecs */
119         if (duration > 100000ULL)
120                 return trace_seq_printf(s, "! ");
121
122         /* Duration exceeded 10 msecs */
123         if (duration > 10000ULL)
124                 return trace_seq_printf(s, "+ ");
125
126         return trace_seq_printf(s, "  ");
127 }
128
129 /* Case of a leaf function on its call entry */
130 static enum print_line_t
131 print_graph_entry_leaf(struct trace_iterator *iter,
132                 struct ftrace_graph_ent_entry *entry, struct trace_seq *s)
133 {
134         struct ftrace_graph_ret_entry *ret_entry;
135         struct ftrace_graph_ret *graph_ret;
136         struct ring_buffer_event *event;
137         struct ftrace_graph_ent *call;
138         unsigned long long duration;
139         int i;
140         int ret;
141
142         event = ring_buffer_read(iter->buffer_iter[iter->cpu], NULL);
143         ret_entry = ring_buffer_event_data(event);
144         graph_ret = &ret_entry->ret;
145         call = &entry->graph_ent;
146         duration = graph_ret->rettime - graph_ret->calltime;
147
148         /* Overhead */
149         ret = print_graph_overhead(duration, s);
150         if (!ret)
151                 return TRACE_TYPE_PARTIAL_LINE;
152
153         /* Function */
154         for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
155                 ret = trace_seq_printf(s, " ");
156                 if (!ret)
157                         return TRACE_TYPE_PARTIAL_LINE;
158         }
159
160         ret = seq_print_ip_sym(s, call->func, 0);
161         if (!ret)
162                 return TRACE_TYPE_PARTIAL_LINE;
163
164         ret = trace_seq_printf(s, "();");
165         if (!ret)
166                 return TRACE_TYPE_PARTIAL_LINE;
167
168         /* Duration */
169         ret = trace_seq_printf(s, TRACE_GRAPH_TIMESPACE_ENTRY);
170         if (!ret)
171                 return TRACE_TYPE_PARTIAL_LINE;
172
173         ret = print_graph_duration(duration, s);
174         if (!ret)
175                 return TRACE_TYPE_PARTIAL_LINE;
176
177         return TRACE_TYPE_HANDLED;
178 }
179
180 static enum print_line_t
181 print_graph_entry_nested(struct ftrace_graph_ent_entry *entry,
182                         struct trace_seq *s)
183 {
184         int i;
185         int ret;
186         struct ftrace_graph_ent *call = &entry->graph_ent;
187
188         /* No overhead */
189         ret = trace_seq_printf(s, "  ");
190         if (!ret)
191                 return TRACE_TYPE_PARTIAL_LINE;
192
193         /* Function */
194         for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
195                 ret = trace_seq_printf(s, " ");
196                 if (!ret)
197                         return TRACE_TYPE_PARTIAL_LINE;
198         }
199
200         ret = seq_print_ip_sym(s, call->func, 0);
201         if (!ret)
202                 return TRACE_TYPE_PARTIAL_LINE;
203
204         ret = trace_seq_printf(s, "() {");
205         if (!ret)
206                 return TRACE_TYPE_PARTIAL_LINE;
207
208         /* No duration to print at this state */
209         ret = trace_seq_printf(s, TRACE_GRAPH_TIMESPACE_ENTRY "-\n");
210         if (!ret)
211                 return TRACE_TYPE_PARTIAL_LINE;
212
213         return TRACE_TYPE_HANDLED;
214 }
215
216 static enum print_line_t
217 print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
218                         struct trace_iterator *iter, int cpu)
219 {
220         int ret;
221         struct trace_entry *ent = iter->ent;
222
223         if (!verif_pid(s, ent->pid, cpu))
224                 return TRACE_TYPE_PARTIAL_LINE;
225
226         ret = trace_seq_printf(s, "CPU[%03d] ", cpu);
227         if (!ret)
228                 return TRACE_TYPE_PARTIAL_LINE;
229
230         if (trace_branch_is_leaf(iter, field))
231                 return print_graph_entry_leaf(iter, field, s);
232         else
233                 return print_graph_entry_nested(field, s);
234
235 }
236
237 static enum print_line_t
238 print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
239                    struct trace_entry *ent, int cpu)
240 {
241         int i;
242         int ret;
243         unsigned long long duration = trace->rettime - trace->calltime;
244
245         /* Pid */
246         if (!verif_pid(s, ent->pid, cpu))
247                 return TRACE_TYPE_PARTIAL_LINE;
248
249         /* Cpu */
250         ret = trace_seq_printf(s, "CPU[%03d] ", cpu);
251         if (!ret)
252                 return TRACE_TYPE_PARTIAL_LINE;
253
254         /* Overhead */
255         ret = print_graph_overhead(duration, s);
256         if (!ret)
257                 return TRACE_TYPE_PARTIAL_LINE;
258
259         /* Closing brace */
260         for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) {
261                 ret = trace_seq_printf(s, " ");
262                 if (!ret)
263                         return TRACE_TYPE_PARTIAL_LINE;
264         }
265
266         ret = trace_seq_printf(s, "} ");
267         if (!ret)
268                 return TRACE_TYPE_PARTIAL_LINE;
269
270         /* Duration */
271         ret = trace_seq_printf(s, TRACE_GRAPH_TIMESPACE_RET);
272         if (!ret)
273                 return TRACE_TYPE_PARTIAL_LINE;
274
275         ret = print_graph_duration(duration, s);
276         if (!ret)
277                 return TRACE_TYPE_PARTIAL_LINE;
278
279         /* Overrun */
280         if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) {
281                 ret = trace_seq_printf(s, " (Overruns: %lu)\n",
282                                         trace->overrun);
283                 if (!ret)
284                         return TRACE_TYPE_PARTIAL_LINE;
285         }
286         return TRACE_TYPE_HANDLED;
287 }
288
289 enum print_line_t
290 print_graph_function(struct trace_iterator *iter)
291 {
292         struct trace_seq *s = &iter->seq;
293         struct trace_entry *entry = iter->ent;
294
295         switch (entry->type) {
296         case TRACE_GRAPH_ENT: {
297                 struct ftrace_graph_ent_entry *field;
298                 trace_assign_type(field, entry);
299                 return print_graph_entry(field, s, iter,
300                                          iter->cpu);
301         }
302         case TRACE_GRAPH_RET: {
303                 struct ftrace_graph_ret_entry *field;
304                 trace_assign_type(field, entry);
305                 return print_graph_return(&field->ret, s, entry, iter->cpu);
306         }
307         default:
308                 return TRACE_TYPE_UNHANDLED;
309         }
310 }
311
312 static struct tracer graph_trace __read_mostly = {
313         .name        = "function_graph",
314         .init        = graph_trace_init,
315         .reset       = graph_trace_reset,
316         .print_line = print_graph_function,
317         .flags          = &tracer_flags,
318 };
319
320 static __init int init_graph_trace(void)
321 {
322         return register_tracer(&graph_trace);
323 }
324
325 device_initcall(init_graph_trace);