tracing/function-graph-tracer: adjustments of the trace informations
[linux-2.6] / kernel / trace / trace_functions_graph.c
1 /*
2  *
3  * Function graph tracer.
4  * Copyright (c) 2008 Frederic Weisbecker <fweisbec@gmail.com>
5  * Mostly borrowed from function tracer which
6  * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
7  *
8  */
9 #include <linux/debugfs.h>
10 #include <linux/uaccess.h>
11 #include <linux/ftrace.h>
12 #include <linux/fs.h>
13
14 #include "trace.h"
15
16 #define TRACE_GRAPH_INDENT      2
17
18 /* Flag options */
19 #define TRACE_GRAPH_PRINT_OVERRUN       0x1
20 #define TRACE_GRAPH_PRINT_CPU           0x2
21 #define TRACE_GRAPH_PRINT_OVERHEAD      0x4
22
23 static struct tracer_opt trace_opts[] = {
24         /* Display overruns ? */
25         { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
26         /* Display CPU ? */
27         { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
28         /* Display Overhead ? */
29         { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
30         { } /* Empty entry */
31 };
32
33 static struct tracer_flags tracer_flags = {
34         /* Don't display overruns by default */
35         .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD,
36         .opts = trace_opts
37 };
38
39 /* pid on the last trace processed */
40 static pid_t last_pid[NR_CPUS] = { [0 ... NR_CPUS-1] = -1 };
41
42 static int graph_trace_init(struct trace_array *tr)
43 {
44         int cpu, ret;
45
46         for_each_online_cpu(cpu)
47                 tracing_reset(tr, cpu);
48
49         ret = register_ftrace_graph(&trace_graph_return,
50                                         &trace_graph_entry);
51         if (ret)
52                 return ret;
53         tracing_start_cmdline_record();
54
55         return 0;
56 }
57
58 static void graph_trace_reset(struct trace_array *tr)
59 {
60         tracing_stop_cmdline_record();
61         unregister_ftrace_graph();
62 }
63
64 static inline int log10_cpu(int nb)
65 {
66         if (nb / 100)
67                 return 3;
68         if (nb / 10)
69                 return 2;
70         return 1;
71 }
72
73 static enum print_line_t
74 print_graph_cpu(struct trace_seq *s, int cpu)
75 {
76         int i;
77         int ret;
78         int log10_this = log10_cpu(cpu);
79         int log10_all = log10_cpu(cpus_weight_nr(cpu_online_map));
80
81
82         for (i = 0; i < log10_all - log10_this; i++) {
83                 ret = trace_seq_printf(s, " ");
84                 if (!ret)
85                         return TRACE_TYPE_PARTIAL_LINE;
86         }
87         ret = trace_seq_printf(s, "%d) ", cpu);
88         if (!ret)
89                         return TRACE_TYPE_PARTIAL_LINE;
90         return TRACE_TYPE_HANDLED;
91 }
92
93
94 /* If the pid changed since the last trace, output this event */
95 static int verif_pid(struct trace_seq *s, pid_t pid, int cpu)
96 {
97         char *comm;
98
99         if (last_pid[cpu] != -1 && last_pid[cpu] == pid)
100                 return 1;
101
102         last_pid[cpu] = pid;
103         comm = trace_find_cmdline(pid);
104
105         return trace_seq_printf(s, "\n------------8<---------- thread %s-%d"
106                                     " ------------8<----------\n\n",
107                                     cpu, comm, pid);
108 }
109
110 static bool
111 trace_branch_is_leaf(struct trace_iterator *iter,
112                 struct ftrace_graph_ent_entry *curr)
113 {
114         struct ring_buffer_iter *ring_iter;
115         struct ring_buffer_event *event;
116         struct ftrace_graph_ret_entry *next;
117
118         ring_iter = iter->buffer_iter[iter->cpu];
119
120         if (!ring_iter)
121                 return false;
122
123         event = ring_buffer_iter_peek(ring_iter, NULL);
124
125         if (!event)
126                 return false;
127
128         next = ring_buffer_event_data(event);
129
130         if (next->ent.type != TRACE_GRAPH_RET)
131                 return false;
132
133         if (curr->ent.pid != next->ent.pid ||
134                         curr->graph_ent.func != next->ret.func)
135                 return false;
136
137         return true;
138 }
139
140
141 static inline int
142 print_graph_duration(unsigned long long duration, struct trace_seq *s)
143 {
144         unsigned long nsecs_rem = do_div(duration, 1000);
145         return trace_seq_printf(s, "%4llu.%3lu us | ", duration, nsecs_rem);
146 }
147
148 /* Signal a overhead of time execution to the output */
149 static int
150 print_graph_overhead(unsigned long long duration, struct trace_seq *s)
151 {
152         /* Duration exceeded 100 msecs */
153         if (duration > 100000ULL)
154                 return trace_seq_printf(s, "! ");
155
156         /* Duration exceeded 10 msecs */
157         if (duration > 10000ULL)
158                 return trace_seq_printf(s, "+ ");
159
160         return trace_seq_printf(s, "  ");
161 }
162
163 /* Case of a leaf function on its call entry */
164 static enum print_line_t
165 print_graph_entry_leaf(struct trace_iterator *iter,
166                 struct ftrace_graph_ent_entry *entry, struct trace_seq *s)
167 {
168         struct ftrace_graph_ret_entry *ret_entry;
169         struct ftrace_graph_ret *graph_ret;
170         struct ring_buffer_event *event;
171         struct ftrace_graph_ent *call;
172         unsigned long long duration;
173         int ret;
174         int i;
175
176         event = ring_buffer_read(iter->buffer_iter[iter->cpu], NULL);
177         ret_entry = ring_buffer_event_data(event);
178         graph_ret = &ret_entry->ret;
179         call = &entry->graph_ent;
180         duration = graph_ret->rettime - graph_ret->calltime;
181
182         /* Must not exceed 8 characters: 9999.999 us */
183         if (duration > 10000000ULL)
184                 duration = 9999999ULL;
185
186         /* Overhead */
187         if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
188                 ret = print_graph_overhead(duration, s);
189                 if (!ret)
190                         return TRACE_TYPE_PARTIAL_LINE;
191         }
192
193         /* Duration */
194         ret = print_graph_duration(duration, s);
195         if (!ret)
196                 return TRACE_TYPE_PARTIAL_LINE;
197
198         /* Function */
199         for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
200                 ret = trace_seq_printf(s, " ");
201                 if (!ret)
202                         return TRACE_TYPE_PARTIAL_LINE;
203         }
204
205         ret = seq_print_ip_sym(s, call->func, 0);
206         if (!ret)
207                 return TRACE_TYPE_PARTIAL_LINE;
208
209         ret = trace_seq_printf(s, "();\n");
210         if (!ret)
211                 return TRACE_TYPE_PARTIAL_LINE;
212
213         return TRACE_TYPE_HANDLED;
214 }
215
216 static enum print_line_t
217 print_graph_entry_nested(struct ftrace_graph_ent_entry *entry,
218                         struct trace_seq *s)
219 {
220         int i;
221         int ret;
222         struct ftrace_graph_ent *call = &entry->graph_ent;
223
224         /* No overhead */
225         if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
226                 ret = trace_seq_printf(s, "  ");
227                 if (!ret)
228                         return TRACE_TYPE_PARTIAL_LINE;
229         }
230
231         /* No time */
232         ret = trace_seq_printf(s, "        |     ");
233
234         /* Function */
235         for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
236                 ret = trace_seq_printf(s, " ");
237                 if (!ret)
238                         return TRACE_TYPE_PARTIAL_LINE;
239         }
240
241         ret = seq_print_ip_sym(s, call->func, 0);
242         if (!ret)
243                 return TRACE_TYPE_PARTIAL_LINE;
244
245         ret = trace_seq_printf(s, "() {\n");
246         if (!ret)
247                 return TRACE_TYPE_PARTIAL_LINE;
248
249         return TRACE_TYPE_HANDLED;
250 }
251
252 static enum print_line_t
253 print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
254                         struct trace_iterator *iter, int cpu)
255 {
256         int ret;
257         struct trace_entry *ent = iter->ent;
258
259         /* Pid */
260         if (!verif_pid(s, ent->pid, cpu))
261                 return TRACE_TYPE_PARTIAL_LINE;
262
263         /* Cpu */
264         if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
265                 ret = print_graph_cpu(s, cpu);
266                 if (!ret)
267                         return TRACE_TYPE_PARTIAL_LINE;
268         }
269
270         if (trace_branch_is_leaf(iter, field))
271                 return print_graph_entry_leaf(iter, field, s);
272         else
273                 return print_graph_entry_nested(field, s);
274
275 }
276
277 static enum print_line_t
278 print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
279                    struct trace_entry *ent, int cpu)
280 {
281         int i;
282         int ret;
283         unsigned long long duration = trace->rettime - trace->calltime;
284
285         /* Must not exceed 8 characters: xxxx.yyy us */
286         if (duration > 10000000ULL)
287                 duration = 9999999ULL;
288
289         /* Pid */
290         if (!verif_pid(s, ent->pid, cpu))
291                 return TRACE_TYPE_PARTIAL_LINE;
292
293         /* Cpu */
294         if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
295                 ret = print_graph_cpu(s, cpu);
296                 if (!ret)
297                         return TRACE_TYPE_PARTIAL_LINE;
298         }
299
300         /* Overhead */
301         if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
302                 ret = print_graph_overhead(duration, s);
303                 if (!ret)
304                         return TRACE_TYPE_PARTIAL_LINE;
305         }
306
307         /* Duration */
308         ret = print_graph_duration(duration, s);
309         if (!ret)
310                 return TRACE_TYPE_PARTIAL_LINE;
311
312         /* Closing brace */
313         for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) {
314                 ret = trace_seq_printf(s, " ");
315                 if (!ret)
316                         return TRACE_TYPE_PARTIAL_LINE;
317         }
318
319         ret = trace_seq_printf(s, "}\n");
320         if (!ret)
321                 return TRACE_TYPE_PARTIAL_LINE;
322
323         /* Overrun */
324         if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) {
325                 ret = trace_seq_printf(s, " (Overruns: %lu)\n",
326                                         trace->overrun);
327                 if (!ret)
328                         return TRACE_TYPE_PARTIAL_LINE;
329         }
330         return TRACE_TYPE_HANDLED;
331 }
332
333 enum print_line_t
334 print_graph_function(struct trace_iterator *iter)
335 {
336         struct trace_seq *s = &iter->seq;
337         struct trace_entry *entry = iter->ent;
338
339         switch (entry->type) {
340         case TRACE_GRAPH_ENT: {
341                 struct ftrace_graph_ent_entry *field;
342                 trace_assign_type(field, entry);
343                 return print_graph_entry(field, s, iter,
344                                          iter->cpu);
345         }
346         case TRACE_GRAPH_RET: {
347                 struct ftrace_graph_ret_entry *field;
348                 trace_assign_type(field, entry);
349                 return print_graph_return(&field->ret, s, entry, iter->cpu);
350         }
351         default:
352                 return TRACE_TYPE_UNHANDLED;
353         }
354 }
355
356 static struct tracer graph_trace __read_mostly = {
357         .name        = "function_graph",
358         .init        = graph_trace_init,
359         .reset       = graph_trace_reset,
360         .print_line = print_graph_function,
361         .flags          = &tracer_flags,
362 };
363
364 static __init int init_graph_trace(void)
365 {
366         return register_tracer(&graph_trace);
367 }
368
369 device_initcall(init_graph_trace);