function-graph: show binary events as comments
[linux-2.6] / kernel / trace / trace_functions_graph.c
1 /*
2  *
3  * Function graph tracer.
4  * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
5  * Mostly borrowed from function tracer which
6  * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
7  *
8  */
9 #include <linux/debugfs.h>
10 #include <linux/uaccess.h>
11 #include <linux/ftrace.h>
12 #include <linux/fs.h>
13
14 #include "trace.h"
15 #include "trace_output.h"
16
17 struct fgraph_data {
18         pid_t           last_pid;
19         int             depth;
20 };
21
22 #define TRACE_GRAPH_INDENT      2
23
24 /* Flag options */
25 #define TRACE_GRAPH_PRINT_OVERRUN       0x1
26 #define TRACE_GRAPH_PRINT_CPU           0x2
27 #define TRACE_GRAPH_PRINT_OVERHEAD      0x4
28 #define TRACE_GRAPH_PRINT_PROC          0x8
29 #define TRACE_GRAPH_PRINT_DURATION      0x10
30 #define TRACE_GRAPH_PRINT_ABS_TIME      0X20
31
32 static struct tracer_opt trace_opts[] = {
33         /* Display overruns? (for self-debug purpose) */
34         { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
35         /* Display CPU ? */
36         { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
37         /* Display Overhead ? */
38         { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
39         /* Display proc name/pid */
40         { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
41         /* Display duration of execution */
42         { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
43         /* Display absolute time of an entry */
44         { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
45         { } /* Empty entry */
46 };
47
48 static struct tracer_flags tracer_flags = {
49         /* Don't display overruns and proc by default */
50         .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
51                TRACE_GRAPH_PRINT_DURATION,
52         .opts = trace_opts
53 };
54
55 /* pid on the last trace processed */
56
57
58 /* Add a function return address to the trace stack on thread info.*/
59 int
60 ftrace_push_return_trace(unsigned long ret, unsigned long long time,
61                          unsigned long func, int *depth)
62 {
63         int index;
64
65         if (!current->ret_stack)
66                 return -EBUSY;
67
68         /* The return trace stack is full */
69         if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
70                 atomic_inc(&current->trace_overrun);
71                 return -EBUSY;
72         }
73
74         index = ++current->curr_ret_stack;
75         barrier();
76         current->ret_stack[index].ret = ret;
77         current->ret_stack[index].func = func;
78         current->ret_stack[index].calltime = time;
79         *depth = index;
80
81         return 0;
82 }
83
84 /* Retrieve a function return address to the trace stack on thread info.*/
85 void
86 ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret)
87 {
88         int index;
89
90         index = current->curr_ret_stack;
91
92         if (unlikely(index < 0)) {
93                 ftrace_graph_stop();
94                 WARN_ON(1);
95                 /* Might as well panic, otherwise we have no where to go */
96                 *ret = (unsigned long)panic;
97                 return;
98         }
99
100         *ret = current->ret_stack[index].ret;
101         trace->func = current->ret_stack[index].func;
102         trace->calltime = current->ret_stack[index].calltime;
103         trace->overrun = atomic_read(&current->trace_overrun);
104         trace->depth = index;
105         barrier();
106         current->curr_ret_stack--;
107
108 }
109
110 /*
111  * Send the trace to the ring-buffer.
112  * @return the original return address.
113  */
114 unsigned long ftrace_return_to_handler(void)
115 {
116         struct ftrace_graph_ret trace;
117         unsigned long ret;
118
119         ftrace_pop_return_trace(&trace, &ret);
120         trace.rettime = trace_clock_local();
121         ftrace_graph_return(&trace);
122
123         if (unlikely(!ret)) {
124                 ftrace_graph_stop();
125                 WARN_ON(1);
126                 /* Might as well panic. What else to do? */
127                 ret = (unsigned long)panic;
128         }
129
130         return ret;
131 }
132
133 static int graph_trace_init(struct trace_array *tr)
134 {
135         int ret = register_ftrace_graph(&trace_graph_return,
136                                         &trace_graph_entry);
137         if (ret)
138                 return ret;
139         tracing_start_cmdline_record();
140
141         return 0;
142 }
143
144 static void graph_trace_reset(struct trace_array *tr)
145 {
146         tracing_stop_cmdline_record();
147         unregister_ftrace_graph();
148 }
149
150 static inline int log10_cpu(int nb)
151 {
152         if (nb / 100)
153                 return 3;
154         if (nb / 10)
155                 return 2;
156         return 1;
157 }
158
159 static enum print_line_t
160 print_graph_cpu(struct trace_seq *s, int cpu)
161 {
162         int i;
163         int ret;
164         int log10_this = log10_cpu(cpu);
165         int log10_all = log10_cpu(cpumask_weight(cpu_online_mask));
166
167
168         /*
169          * Start with a space character - to make it stand out
170          * to the right a bit when trace output is pasted into
171          * email:
172          */
173         ret = trace_seq_printf(s, " ");
174
175         /*
176          * Tricky - we space the CPU field according to the max
177          * number of online CPUs. On a 2-cpu system it would take
178          * a maximum of 1 digit - on a 128 cpu system it would
179          * take up to 3 digits:
180          */
181         for (i = 0; i < log10_all - log10_this; i++) {
182                 ret = trace_seq_printf(s, " ");
183                 if (!ret)
184                         return TRACE_TYPE_PARTIAL_LINE;
185         }
186         ret = trace_seq_printf(s, "%d) ", cpu);
187         if (!ret)
188                 return TRACE_TYPE_PARTIAL_LINE;
189
190         return TRACE_TYPE_HANDLED;
191 }
192
193 #define TRACE_GRAPH_PROCINFO_LENGTH     14
194
195 static enum print_line_t
196 print_graph_proc(struct trace_seq *s, pid_t pid)
197 {
198         char comm[TASK_COMM_LEN];
199         /* sign + log10(MAX_INT) + '\0' */
200         char pid_str[11];
201         int spaces = 0;
202         int ret;
203         int len;
204         int i;
205
206         trace_find_cmdline(pid, comm);
207         comm[7] = '\0';
208         sprintf(pid_str, "%d", pid);
209
210         /* 1 stands for the "-" character */
211         len = strlen(comm) + strlen(pid_str) + 1;
212
213         if (len < TRACE_GRAPH_PROCINFO_LENGTH)
214                 spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
215
216         /* First spaces to align center */
217         for (i = 0; i < spaces / 2; i++) {
218                 ret = trace_seq_printf(s, " ");
219                 if (!ret)
220                         return TRACE_TYPE_PARTIAL_LINE;
221         }
222
223         ret = trace_seq_printf(s, "%s-%s", comm, pid_str);
224         if (!ret)
225                 return TRACE_TYPE_PARTIAL_LINE;
226
227         /* Last spaces to align center */
228         for (i = 0; i < spaces - (spaces / 2); i++) {
229                 ret = trace_seq_printf(s, " ");
230                 if (!ret)
231                         return TRACE_TYPE_PARTIAL_LINE;
232         }
233         return TRACE_TYPE_HANDLED;
234 }
235
236
237 /* If the pid changed since the last trace, output this event */
238 static enum print_line_t
239 verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
240 {
241         pid_t prev_pid;
242         pid_t *last_pid;
243         int ret;
244
245         if (!data)
246                 return TRACE_TYPE_HANDLED;
247
248         last_pid = &(per_cpu_ptr(data, cpu)->last_pid);
249
250         if (*last_pid == pid)
251                 return TRACE_TYPE_HANDLED;
252
253         prev_pid = *last_pid;
254         *last_pid = pid;
255
256         if (prev_pid == -1)
257                 return TRACE_TYPE_HANDLED;
258 /*
259  * Context-switch trace line:
260
261  ------------------------------------------
262  | 1)  migration/0--1  =>  sshd-1755
263  ------------------------------------------
264
265  */
266         ret = trace_seq_printf(s,
267                 " ------------------------------------------\n");
268         if (!ret)
269                 return TRACE_TYPE_PARTIAL_LINE;
270
271         ret = print_graph_cpu(s, cpu);
272         if (ret == TRACE_TYPE_PARTIAL_LINE)
273                 return TRACE_TYPE_PARTIAL_LINE;
274
275         ret = print_graph_proc(s, prev_pid);
276         if (ret == TRACE_TYPE_PARTIAL_LINE)
277                 return TRACE_TYPE_PARTIAL_LINE;
278
279         ret = trace_seq_printf(s, " => ");
280         if (!ret)
281                 return TRACE_TYPE_PARTIAL_LINE;
282
283         ret = print_graph_proc(s, pid);
284         if (ret == TRACE_TYPE_PARTIAL_LINE)
285                 return TRACE_TYPE_PARTIAL_LINE;
286
287         ret = trace_seq_printf(s,
288                 "\n ------------------------------------------\n\n");
289         if (!ret)
290                 return TRACE_TYPE_PARTIAL_LINE;
291
292         return TRACE_TYPE_HANDLED;
293 }
294
295 static struct ftrace_graph_ret_entry *
296 get_return_for_leaf(struct trace_iterator *iter,
297                 struct ftrace_graph_ent_entry *curr)
298 {
299         struct ring_buffer_iter *ring_iter;
300         struct ring_buffer_event *event;
301         struct ftrace_graph_ret_entry *next;
302
303         ring_iter = iter->buffer_iter[iter->cpu];
304
305         /* First peek to compare current entry and the next one */
306         if (ring_iter)
307                 event = ring_buffer_iter_peek(ring_iter, NULL);
308         else {
309         /* We need to consume the current entry to see the next one */
310                 ring_buffer_consume(iter->tr->buffer, iter->cpu, NULL);
311                 event = ring_buffer_peek(iter->tr->buffer, iter->cpu,
312                                         NULL);
313         }
314
315         if (!event)
316                 return NULL;
317
318         next = ring_buffer_event_data(event);
319
320         if (next->ent.type != TRACE_GRAPH_RET)
321                 return NULL;
322
323         if (curr->ent.pid != next->ent.pid ||
324                         curr->graph_ent.func != next->ret.func)
325                 return NULL;
326
327         /* this is a leaf, now advance the iterator */
328         if (ring_iter)
329                 ring_buffer_read(ring_iter, NULL);
330
331         return next;
332 }
333
334 /* Signal a overhead of time execution to the output */
335 static int
336 print_graph_overhead(unsigned long long duration, struct trace_seq *s)
337 {
338         /* If duration disappear, we don't need anything */
339         if (!(tracer_flags.val & TRACE_GRAPH_PRINT_DURATION))
340                 return 1;
341
342         /* Non nested entry or return */
343         if (duration == -1)
344                 return trace_seq_printf(s, "  ");
345
346         if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
347                 /* Duration exceeded 100 msecs */
348                 if (duration > 100000ULL)
349                         return trace_seq_printf(s, "! ");
350
351                 /* Duration exceeded 10 msecs */
352                 if (duration > 10000ULL)
353                         return trace_seq_printf(s, "+ ");
354         }
355
356         return trace_seq_printf(s, "  ");
357 }
358
359 static int print_graph_abs_time(u64 t, struct trace_seq *s)
360 {
361         unsigned long usecs_rem;
362
363         usecs_rem = do_div(t, NSEC_PER_SEC);
364         usecs_rem /= 1000;
365
366         return trace_seq_printf(s, "%5lu.%06lu |  ",
367                         (unsigned long)t, usecs_rem);
368 }
369
370 static enum print_line_t
371 print_graph_irq(struct trace_iterator *iter, unsigned long addr,
372                 enum trace_type type, int cpu, pid_t pid)
373 {
374         int ret;
375         struct trace_seq *s = &iter->seq;
376
377         if (addr < (unsigned long)__irqentry_text_start ||
378                 addr >= (unsigned long)__irqentry_text_end)
379                 return TRACE_TYPE_UNHANDLED;
380
381         /* Absolute time */
382         if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) {
383                 ret = print_graph_abs_time(iter->ts, s);
384                 if (!ret)
385                         return TRACE_TYPE_PARTIAL_LINE;
386         }
387
388         /* Cpu */
389         if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
390                 ret = print_graph_cpu(s, cpu);
391                 if (ret == TRACE_TYPE_PARTIAL_LINE)
392                         return TRACE_TYPE_PARTIAL_LINE;
393         }
394         /* Proc */
395         if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) {
396                 ret = print_graph_proc(s, pid);
397                 if (ret == TRACE_TYPE_PARTIAL_LINE)
398                         return TRACE_TYPE_PARTIAL_LINE;
399                 ret = trace_seq_printf(s, " | ");
400                 if (!ret)
401                         return TRACE_TYPE_PARTIAL_LINE;
402         }
403
404         /* No overhead */
405         ret = print_graph_overhead(-1, s);
406         if (!ret)
407                 return TRACE_TYPE_PARTIAL_LINE;
408
409         if (type == TRACE_GRAPH_ENT)
410                 ret = trace_seq_printf(s, "==========>");
411         else
412                 ret = trace_seq_printf(s, "<==========");
413
414         if (!ret)
415                 return TRACE_TYPE_PARTIAL_LINE;
416
417         /* Don't close the duration column if haven't one */
418         if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION)
419                 trace_seq_printf(s, " |");
420         ret = trace_seq_printf(s, "\n");
421
422         if (!ret)
423                 return TRACE_TYPE_PARTIAL_LINE;
424         return TRACE_TYPE_HANDLED;
425 }
426
427 static enum print_line_t
428 print_graph_duration(unsigned long long duration, struct trace_seq *s)
429 {
430         unsigned long nsecs_rem = do_div(duration, 1000);
431         /* log10(ULONG_MAX) + '\0' */
432         char msecs_str[21];
433         char nsecs_str[5];
434         int ret, len;
435         int i;
436
437         sprintf(msecs_str, "%lu", (unsigned long) duration);
438
439         /* Print msecs */
440         ret = trace_seq_printf(s, "%s", msecs_str);
441         if (!ret)
442                 return TRACE_TYPE_PARTIAL_LINE;
443
444         len = strlen(msecs_str);
445
446         /* Print nsecs (we don't want to exceed 7 numbers) */
447         if (len < 7) {
448                 snprintf(nsecs_str, 8 - len, "%03lu", nsecs_rem);
449                 ret = trace_seq_printf(s, ".%s", nsecs_str);
450                 if (!ret)
451                         return TRACE_TYPE_PARTIAL_LINE;
452                 len += strlen(nsecs_str);
453         }
454
455         ret = trace_seq_printf(s, " us ");
456         if (!ret)
457                 return TRACE_TYPE_PARTIAL_LINE;
458
459         /* Print remaining spaces to fit the row's width */
460         for (i = len; i < 7; i++) {
461                 ret = trace_seq_printf(s, " ");
462                 if (!ret)
463                         return TRACE_TYPE_PARTIAL_LINE;
464         }
465
466         ret = trace_seq_printf(s, "|  ");
467         if (!ret)
468                 return TRACE_TYPE_PARTIAL_LINE;
469         return TRACE_TYPE_HANDLED;
470
471 }
472
473 /* Case of a leaf function on its call entry */
474 static enum print_line_t
475 print_graph_entry_leaf(struct trace_iterator *iter,
476                 struct ftrace_graph_ent_entry *entry,
477                 struct ftrace_graph_ret_entry *ret_entry, struct trace_seq *s)
478 {
479         struct fgraph_data *data = iter->private;
480         struct ftrace_graph_ret *graph_ret;
481         struct ftrace_graph_ent *call;
482         unsigned long long duration;
483         int ret;
484         int i;
485
486         graph_ret = &ret_entry->ret;
487         call = &entry->graph_ent;
488         duration = graph_ret->rettime - graph_ret->calltime;
489
490         if (data) {
491                 int cpu = iter->cpu;
492                 int *depth = &(per_cpu_ptr(data, cpu)->depth);
493
494                 /*
495                  * Comments display at + 1 to depth. Since
496                  * this is a leaf function, keep the comments
497                  * equal to this depth.
498                  */
499                 *depth = call->depth - 1;
500         }
501
502         /* Overhead */
503         ret = print_graph_overhead(duration, s);
504         if (!ret)
505                 return TRACE_TYPE_PARTIAL_LINE;
506
507         /* Duration */
508         if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) {
509                 ret = print_graph_duration(duration, s);
510                 if (ret == TRACE_TYPE_PARTIAL_LINE)
511                         return TRACE_TYPE_PARTIAL_LINE;
512         }
513
514         /* Function */
515         for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
516                 ret = trace_seq_printf(s, " ");
517                 if (!ret)
518                         return TRACE_TYPE_PARTIAL_LINE;
519         }
520
521         ret = seq_print_ip_sym(s, call->func, 0);
522         if (!ret)
523                 return TRACE_TYPE_PARTIAL_LINE;
524
525         ret = trace_seq_printf(s, "();\n");
526         if (!ret)
527                 return TRACE_TYPE_PARTIAL_LINE;
528
529         return TRACE_TYPE_HANDLED;
530 }
531
532 static enum print_line_t
533 print_graph_entry_nested(struct trace_iterator *iter,
534                          struct ftrace_graph_ent_entry *entry,
535                          struct trace_seq *s, int cpu)
536 {
537         struct ftrace_graph_ent *call = &entry->graph_ent;
538         struct fgraph_data *data = iter->private;
539         int ret;
540         int i;
541
542         if (data) {
543                 int cpu = iter->cpu;
544                 int *depth = &(per_cpu_ptr(data, cpu)->depth);
545
546                 *depth = call->depth;
547         }
548
549         /* No overhead */
550         ret = print_graph_overhead(-1, s);
551         if (!ret)
552                 return TRACE_TYPE_PARTIAL_LINE;
553
554         /* No time */
555         if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) {
556                 ret = trace_seq_printf(s, "            |  ");
557                 if (!ret)
558                         return TRACE_TYPE_PARTIAL_LINE;
559         }
560
561         /* Function */
562         for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
563                 ret = trace_seq_printf(s, " ");
564                 if (!ret)
565                         return TRACE_TYPE_PARTIAL_LINE;
566         }
567
568         ret = seq_print_ip_sym(s, call->func, 0);
569         if (!ret)
570                 return TRACE_TYPE_PARTIAL_LINE;
571
572         ret = trace_seq_printf(s, "() {\n");
573         if (!ret)
574                 return TRACE_TYPE_PARTIAL_LINE;
575
576         /*
577          * we already consumed the current entry to check the next one
578          * and see if this is a leaf.
579          */
580         return TRACE_TYPE_NO_CONSUME;
581 }
582
583 static enum print_line_t
584 print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
585                      int type, unsigned long addr)
586 {
587         struct fgraph_data *data = iter->private;
588         struct trace_entry *ent = iter->ent;
589         int cpu = iter->cpu;
590         int ret;
591
592         /* Pid */
593         if (verif_pid(s, ent->pid, cpu, data) == TRACE_TYPE_PARTIAL_LINE)
594                 return TRACE_TYPE_PARTIAL_LINE;
595
596         if (type) {
597                 /* Interrupt */
598                 ret = print_graph_irq(iter, addr, type, cpu, ent->pid);
599                 if (ret == TRACE_TYPE_PARTIAL_LINE)
600                         return TRACE_TYPE_PARTIAL_LINE;
601         }
602
603         /* Absolute time */
604         if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) {
605                 ret = print_graph_abs_time(iter->ts, s);
606                 if (!ret)
607                         return TRACE_TYPE_PARTIAL_LINE;
608         }
609
610         /* Cpu */
611         if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
612                 ret = print_graph_cpu(s, cpu);
613                 if (ret == TRACE_TYPE_PARTIAL_LINE)
614                         return TRACE_TYPE_PARTIAL_LINE;
615         }
616
617         /* Proc */
618         if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) {
619                 ret = print_graph_proc(s, ent->pid);
620                 if (ret == TRACE_TYPE_PARTIAL_LINE)
621                         return TRACE_TYPE_PARTIAL_LINE;
622
623                 ret = trace_seq_printf(s, " | ");
624                 if (!ret)
625                         return TRACE_TYPE_PARTIAL_LINE;
626         }
627
628         return 0;
629 }
630
631 static enum print_line_t
632 print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
633                         struct trace_iterator *iter)
634 {
635         int cpu = iter->cpu;
636         struct ftrace_graph_ent *call = &field->graph_ent;
637         struct ftrace_graph_ret_entry *leaf_ret;
638
639         if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func))
640                 return TRACE_TYPE_PARTIAL_LINE;
641
642         leaf_ret = get_return_for_leaf(iter, field);
643         if (leaf_ret)
644                 return print_graph_entry_leaf(iter, field, leaf_ret, s);
645         else
646                 return print_graph_entry_nested(iter, field, s, cpu);
647
648 }
649
650 static enum print_line_t
651 print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
652                    struct trace_entry *ent, struct trace_iterator *iter)
653 {
654         unsigned long long duration = trace->rettime - trace->calltime;
655         struct fgraph_data *data = iter->private;
656         pid_t pid = ent->pid;
657         int cpu = iter->cpu;
658         int ret;
659         int i;
660
661         if (data) {
662                 int cpu = iter->cpu;
663                 int *depth = &(per_cpu_ptr(data, cpu)->depth);
664
665                 /*
666                  * Comments display at + 1 to depth. This is the
667                  * return from a function, we now want the comments
668                  * to display at the same level of the bracket.
669                  */
670                 *depth = trace->depth - 1;
671         }
672
673         if (print_graph_prologue(iter, s, 0, 0))
674                 return TRACE_TYPE_PARTIAL_LINE;
675
676         /* Overhead */
677         ret = print_graph_overhead(duration, s);
678         if (!ret)
679                 return TRACE_TYPE_PARTIAL_LINE;
680
681         /* Duration */
682         if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) {
683                 ret = print_graph_duration(duration, s);
684                 if (ret == TRACE_TYPE_PARTIAL_LINE)
685                         return TRACE_TYPE_PARTIAL_LINE;
686         }
687
688         /* Closing brace */
689         for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) {
690                 ret = trace_seq_printf(s, " ");
691                 if (!ret)
692                         return TRACE_TYPE_PARTIAL_LINE;
693         }
694
695         ret = trace_seq_printf(s, "}\n");
696         if (!ret)
697                 return TRACE_TYPE_PARTIAL_LINE;
698
699         /* Overrun */
700         if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) {
701                 ret = trace_seq_printf(s, " (Overruns: %lu)\n",
702                                         trace->overrun);
703                 if (!ret)
704                         return TRACE_TYPE_PARTIAL_LINE;
705         }
706
707         ret = print_graph_irq(iter, trace->func, TRACE_GRAPH_RET, cpu, pid);
708         if (ret == TRACE_TYPE_PARTIAL_LINE)
709                 return TRACE_TYPE_PARTIAL_LINE;
710
711         return TRACE_TYPE_HANDLED;
712 }
713
714 static enum print_line_t
715 print_graph_comment(struct trace_seq *s,  struct trace_entry *ent,
716                     struct trace_iterator *iter)
717 {
718         unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
719         struct fgraph_data *data = iter->private;
720         struct trace_event *event;
721         int depth = 0;
722         int ret;
723         int i;
724
725         if (data)
726                 depth = per_cpu_ptr(data, iter->cpu)->depth;
727
728         if (print_graph_prologue(iter, s, 0, 0))
729                 return TRACE_TYPE_PARTIAL_LINE;
730
731         /* No overhead */
732         ret = print_graph_overhead(-1, s);
733         if (!ret)
734                 return TRACE_TYPE_PARTIAL_LINE;
735
736         /* No time */
737         if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) {
738                 ret = trace_seq_printf(s, "            |  ");
739                 if (!ret)
740                         return TRACE_TYPE_PARTIAL_LINE;
741         }
742
743         /* Indentation */
744         if (depth > 0)
745                 for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) {
746                         ret = trace_seq_printf(s, " ");
747                         if (!ret)
748                                 return TRACE_TYPE_PARTIAL_LINE;
749                 }
750
751         /* The comment */
752         ret = trace_seq_printf(s, "/* ");
753         if (!ret)
754                 return TRACE_TYPE_PARTIAL_LINE;
755
756         switch (iter->ent->type) {
757         case TRACE_BPRINT:
758                 ret = trace_print_bprintk_msg_only(iter);
759                 if (ret != TRACE_TYPE_HANDLED)
760                         return ret;
761                 break;
762         case TRACE_PRINT:
763                 ret = trace_print_printk_msg_only(iter);
764                 if (ret != TRACE_TYPE_HANDLED)
765                         return ret;
766                 break;
767         default:
768                 event = ftrace_find_event(ent->type);
769                 if (!event)
770                         return TRACE_TYPE_UNHANDLED;
771
772                 ret = event->trace(iter, sym_flags);
773                 if (ret != TRACE_TYPE_HANDLED)
774                         return ret;
775         }
776
777         /* Strip ending newline */
778         if (s->buffer[s->len - 1] == '\n') {
779                 s->buffer[s->len - 1] = '\0';
780                 s->len--;
781         }
782
783         ret = trace_seq_printf(s, " */\n");
784         if (!ret)
785                 return TRACE_TYPE_PARTIAL_LINE;
786
787         return TRACE_TYPE_HANDLED;
788 }
789
790
791 enum print_line_t
792 print_graph_function(struct trace_iterator *iter)
793 {
794         struct trace_entry *entry = iter->ent;
795         struct trace_seq *s = &iter->seq;
796
797         switch (entry->type) {
798         case TRACE_GRAPH_ENT: {
799                 struct ftrace_graph_ent_entry *field;
800                 trace_assign_type(field, entry);
801                 return print_graph_entry(field, s, iter);
802         }
803         case TRACE_GRAPH_RET: {
804                 struct ftrace_graph_ret_entry *field;
805                 trace_assign_type(field, entry);
806                 return print_graph_return(&field->ret, s, entry, iter);
807         }
808         default:
809                 return print_graph_comment(s, entry, iter);
810         }
811
812         return TRACE_TYPE_HANDLED;
813 }
814
815 static void print_graph_headers(struct seq_file *s)
816 {
817         /* 1st line */
818         seq_printf(s, "# ");
819         if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME)
820                 seq_printf(s, "     TIME       ");
821         if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU)
822                 seq_printf(s, "CPU");
823         if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC)
824                 seq_printf(s, "  TASK/PID      ");
825         if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION)
826                 seq_printf(s, "  DURATION   ");
827         seq_printf(s, "               FUNCTION CALLS\n");
828
829         /* 2nd line */
830         seq_printf(s, "# ");
831         if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME)
832                 seq_printf(s, "      |         ");
833         if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU)
834                 seq_printf(s, "|  ");
835         if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC)
836                 seq_printf(s, "  |    |        ");
837         if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION)
838                 seq_printf(s, "   |   |      ");
839         seq_printf(s, "               |   |   |   |\n");
840 }
841
842 static void graph_trace_open(struct trace_iterator *iter)
843 {
844         /* pid and depth on the last trace processed */
845         struct fgraph_data *data = alloc_percpu(struct fgraph_data);
846         int cpu;
847
848         if (!data)
849                 pr_warning("function graph tracer: not enough memory\n");
850         else
851                 for_each_possible_cpu(cpu) {
852                         pid_t *pid = &(per_cpu_ptr(data, cpu)->last_pid);
853                         int *depth = &(per_cpu_ptr(data, cpu)->depth);
854                         *pid = -1;
855                         *depth = 0;
856                 }
857
858         iter->private = data;
859 }
860
861 static void graph_trace_close(struct trace_iterator *iter)
862 {
863         free_percpu(iter->private);
864 }
865
866 static struct tracer graph_trace __read_mostly = {
867         .name           = "function_graph",
868         .open           = graph_trace_open,
869         .close          = graph_trace_close,
870         .wait_pipe      = poll_wait_pipe,
871         .init           = graph_trace_init,
872         .reset          = graph_trace_reset,
873         .print_line     = print_graph_function,
874         .print_header   = print_graph_headers,
875         .flags          = &tracer_flags,
876 #ifdef CONFIG_FTRACE_SELFTEST
877         .selftest       = trace_selftest_startup_function_graph,
878 #endif
879 };
880
881 static __init int init_graph_trace(void)
882 {
883         return register_tracer(&graph_trace);
884 }
885
886 device_initcall(init_graph_trace);