Merge git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable
[linux-2.6] / kernel / trace / trace_functions_graph.c
1 /*
2  *
3  * Function graph tracer.
4  * Copyright (c) 2008 Frederic Weisbecker <fweisbec@gmail.com>
5  * Mostly borrowed from function tracer which
6  * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
7  *
8  */
9 #include <linux/debugfs.h>
10 #include <linux/uaccess.h>
11 #include <linux/ftrace.h>
12 #include <linux/fs.h>
13
14 #include "trace.h"
15
16 #define TRACE_GRAPH_INDENT      2
17
18 /* Flag options */
19 #define TRACE_GRAPH_PRINT_OVERRUN       0x1
20 #define TRACE_GRAPH_PRINT_CPU           0x2
21 #define TRACE_GRAPH_PRINT_OVERHEAD      0x4
22 #define TRACE_GRAPH_PRINT_PROC          0x8
23
24 static struct tracer_opt trace_opts[] = {
25         /* Display overruns ? */
26         { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
27         /* Display CPU ? */
28         { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
29         /* Display Overhead ? */
30         { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
31         /* Display proc name/pid */
32         { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
33         { } /* Empty entry */
34 };
35
36 static struct tracer_flags tracer_flags = {
37         /* Don't display overruns and proc by default */
38         .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD,
39         .opts = trace_opts
40 };
41
42 /* pid on the last trace processed */
43 static pid_t last_pid[NR_CPUS] = { [0 ... NR_CPUS-1] = -1 };
44
45 /* Add a function return address to the trace stack on thread info.*/
46 int
47 ftrace_push_return_trace(unsigned long ret, unsigned long long time,
48                          unsigned long func, int *depth)
49 {
50         int index;
51
52         if (!current->ret_stack)
53                 return -EBUSY;
54
55         /* The return trace stack is full */
56         if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
57                 atomic_inc(&current->trace_overrun);
58                 return -EBUSY;
59         }
60
61         index = ++current->curr_ret_stack;
62         barrier();
63         current->ret_stack[index].ret = ret;
64         current->ret_stack[index].func = func;
65         current->ret_stack[index].calltime = time;
66         *depth = index;
67
68         return 0;
69 }
70
71 /* Retrieve a function return address to the trace stack on thread info.*/
72 void
73 ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret)
74 {
75         int index;
76
77         index = current->curr_ret_stack;
78
79         if (unlikely(index < 0)) {
80                 ftrace_graph_stop();
81                 WARN_ON(1);
82                 /* Might as well panic, otherwise we have no where to go */
83                 *ret = (unsigned long)panic;
84                 return;
85         }
86
87         *ret = current->ret_stack[index].ret;
88         trace->func = current->ret_stack[index].func;
89         trace->calltime = current->ret_stack[index].calltime;
90         trace->overrun = atomic_read(&current->trace_overrun);
91         trace->depth = index;
92         barrier();
93         current->curr_ret_stack--;
94
95 }
96
97 /*
98  * Send the trace to the ring-buffer.
99  * @return the original return address.
100  */
101 unsigned long ftrace_return_to_handler(void)
102 {
103         struct ftrace_graph_ret trace;
104         unsigned long ret;
105
106         ftrace_pop_return_trace(&trace, &ret);
107         trace.rettime = cpu_clock(raw_smp_processor_id());
108         ftrace_graph_return(&trace);
109
110         if (unlikely(!ret)) {
111                 ftrace_graph_stop();
112                 WARN_ON(1);
113                 /* Might as well panic. What else to do? */
114                 ret = (unsigned long)panic;
115         }
116
117         return ret;
118 }
119
120 static int graph_trace_init(struct trace_array *tr)
121 {
122         int cpu, ret;
123
124         for_each_online_cpu(cpu)
125                 tracing_reset(tr, cpu);
126
127         ret = register_ftrace_graph(&trace_graph_return,
128                                         &trace_graph_entry);
129         if (ret)
130                 return ret;
131         tracing_start_cmdline_record();
132
133         return 0;
134 }
135
136 static void graph_trace_reset(struct trace_array *tr)
137 {
138         tracing_stop_cmdline_record();
139         unregister_ftrace_graph();
140 }
141
142 static inline int log10_cpu(int nb)
143 {
144         if (nb / 100)
145                 return 3;
146         if (nb / 10)
147                 return 2;
148         return 1;
149 }
150
151 static enum print_line_t
152 print_graph_cpu(struct trace_seq *s, int cpu)
153 {
154         int i;
155         int ret;
156         int log10_this = log10_cpu(cpu);
157         int log10_all = log10_cpu(cpumask_weight(cpu_online_mask));
158
159
160         /*
161          * Start with a space character - to make it stand out
162          * to the right a bit when trace output is pasted into
163          * email:
164          */
165         ret = trace_seq_printf(s, " ");
166
167         /*
168          * Tricky - we space the CPU field according to the max
169          * number of online CPUs. On a 2-cpu system it would take
170          * a maximum of 1 digit - on a 128 cpu system it would
171          * take up to 3 digits:
172          */
173         for (i = 0; i < log10_all - log10_this; i++) {
174                 ret = trace_seq_printf(s, " ");
175                 if (!ret)
176                         return TRACE_TYPE_PARTIAL_LINE;
177         }
178         ret = trace_seq_printf(s, "%d) ", cpu);
179         if (!ret)
180                 return TRACE_TYPE_PARTIAL_LINE;
181
182         return TRACE_TYPE_HANDLED;
183 }
184
185 #define TRACE_GRAPH_PROCINFO_LENGTH     14
186
187 static enum print_line_t
188 print_graph_proc(struct trace_seq *s, pid_t pid)
189 {
190         int i;
191         int ret;
192         int len;
193         char comm[8];
194         int spaces = 0;
195         /* sign + log10(MAX_INT) + '\0' */
196         char pid_str[11];
197
198         strncpy(comm, trace_find_cmdline(pid), 7);
199         comm[7] = '\0';
200         sprintf(pid_str, "%d", pid);
201
202         /* 1 stands for the "-" character */
203         len = strlen(comm) + strlen(pid_str) + 1;
204
205         if (len < TRACE_GRAPH_PROCINFO_LENGTH)
206                 spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
207
208         /* First spaces to align center */
209         for (i = 0; i < spaces / 2; i++) {
210                 ret = trace_seq_printf(s, " ");
211                 if (!ret)
212                         return TRACE_TYPE_PARTIAL_LINE;
213         }
214
215         ret = trace_seq_printf(s, "%s-%s", comm, pid_str);
216         if (!ret)
217                 return TRACE_TYPE_PARTIAL_LINE;
218
219         /* Last spaces to align center */
220         for (i = 0; i < spaces - (spaces / 2); i++) {
221                 ret = trace_seq_printf(s, " ");
222                 if (!ret)
223                         return TRACE_TYPE_PARTIAL_LINE;
224         }
225         return TRACE_TYPE_HANDLED;
226 }
227
228
229 /* If the pid changed since the last trace, output this event */
230 static enum print_line_t
231 verif_pid(struct trace_seq *s, pid_t pid, int cpu)
232 {
233         pid_t prev_pid;
234         int ret;
235
236         if (last_pid[cpu] != -1 && last_pid[cpu] == pid)
237                 return TRACE_TYPE_HANDLED;
238
239         prev_pid = last_pid[cpu];
240         last_pid[cpu] = pid;
241
242 /*
243  * Context-switch trace line:
244
245  ------------------------------------------
246  | 1)  migration/0--1  =>  sshd-1755
247  ------------------------------------------
248
249  */
250         ret = trace_seq_printf(s,
251                 " ------------------------------------------\n");
252         if (!ret)
253                 TRACE_TYPE_PARTIAL_LINE;
254
255         ret = print_graph_cpu(s, cpu);
256         if (ret == TRACE_TYPE_PARTIAL_LINE)
257                 TRACE_TYPE_PARTIAL_LINE;
258
259         ret = print_graph_proc(s, prev_pid);
260         if (ret == TRACE_TYPE_PARTIAL_LINE)
261                 TRACE_TYPE_PARTIAL_LINE;
262
263         ret = trace_seq_printf(s, " => ");
264         if (!ret)
265                 TRACE_TYPE_PARTIAL_LINE;
266
267         ret = print_graph_proc(s, pid);
268         if (ret == TRACE_TYPE_PARTIAL_LINE)
269                 TRACE_TYPE_PARTIAL_LINE;
270
271         ret = trace_seq_printf(s,
272                 "\n ------------------------------------------\n\n");
273         if (!ret)
274                 TRACE_TYPE_PARTIAL_LINE;
275
276         return ret;
277 }
278
279 static bool
280 trace_branch_is_leaf(struct trace_iterator *iter,
281                 struct ftrace_graph_ent_entry *curr)
282 {
283         struct ring_buffer_iter *ring_iter;
284         struct ring_buffer_event *event;
285         struct ftrace_graph_ret_entry *next;
286
287         ring_iter = iter->buffer_iter[iter->cpu];
288
289         if (!ring_iter)
290                 return false;
291
292         event = ring_buffer_iter_peek(ring_iter, NULL);
293
294         if (!event)
295                 return false;
296
297         next = ring_buffer_event_data(event);
298
299         if (next->ent.type != TRACE_GRAPH_RET)
300                 return false;
301
302         if (curr->ent.pid != next->ent.pid ||
303                         curr->graph_ent.func != next->ret.func)
304                 return false;
305
306         return true;
307 }
308
309 static enum print_line_t
310 print_graph_irq(struct trace_seq *s, unsigned long addr,
311                                 enum trace_type type, int cpu, pid_t pid)
312 {
313         int ret;
314
315         if (addr < (unsigned long)__irqentry_text_start ||
316                 addr >= (unsigned long)__irqentry_text_end)
317                 return TRACE_TYPE_UNHANDLED;
318
319         if (type == TRACE_GRAPH_ENT) {
320                 ret = trace_seq_printf(s, "==========> |  ");
321         } else {
322                 /* Cpu */
323                 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
324                         ret = print_graph_cpu(s, cpu);
325                         if (ret == TRACE_TYPE_PARTIAL_LINE)
326                                 return TRACE_TYPE_PARTIAL_LINE;
327                 }
328                 /* Proc */
329                 if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) {
330                         ret = print_graph_proc(s, pid);
331                         if (ret == TRACE_TYPE_PARTIAL_LINE)
332                                 return TRACE_TYPE_PARTIAL_LINE;
333
334                         ret = trace_seq_printf(s, " | ");
335                         if (!ret)
336                                 return TRACE_TYPE_PARTIAL_LINE;
337                 }
338
339                 /* No overhead */
340                 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
341                         ret = trace_seq_printf(s, "  ");
342                         if (!ret)
343                                 return TRACE_TYPE_PARTIAL_LINE;
344                 }
345
346                 ret = trace_seq_printf(s, "<========== |\n");
347         }
348         if (!ret)
349                 return TRACE_TYPE_PARTIAL_LINE;
350         return TRACE_TYPE_HANDLED;
351 }
352
353 static enum print_line_t
354 print_graph_duration(unsigned long long duration, struct trace_seq *s)
355 {
356         unsigned long nsecs_rem = do_div(duration, 1000);
357         /* log10(ULONG_MAX) + '\0' */
358         char msecs_str[21];
359         char nsecs_str[5];
360         int ret, len;
361         int i;
362
363         sprintf(msecs_str, "%lu", (unsigned long) duration);
364
365         /* Print msecs */
366         ret = trace_seq_printf(s, msecs_str);
367         if (!ret)
368                 return TRACE_TYPE_PARTIAL_LINE;
369
370         len = strlen(msecs_str);
371
372         /* Print nsecs (we don't want to exceed 7 numbers) */
373         if (len < 7) {
374                 snprintf(nsecs_str, 8 - len, "%03lu", nsecs_rem);
375                 ret = trace_seq_printf(s, ".%s", nsecs_str);
376                 if (!ret)
377                         return TRACE_TYPE_PARTIAL_LINE;
378                 len += strlen(nsecs_str);
379         }
380
381         ret = trace_seq_printf(s, " us ");
382         if (!ret)
383                 return TRACE_TYPE_PARTIAL_LINE;
384
385         /* Print remaining spaces to fit the row's width */
386         for (i = len; i < 7; i++) {
387                 ret = trace_seq_printf(s, " ");
388                 if (!ret)
389                         return TRACE_TYPE_PARTIAL_LINE;
390         }
391
392         ret = trace_seq_printf(s, "|  ");
393         if (!ret)
394                 return TRACE_TYPE_PARTIAL_LINE;
395         return TRACE_TYPE_HANDLED;
396
397 }
398
399 /* Signal a overhead of time execution to the output */
400 static int
401 print_graph_overhead(unsigned long long duration, struct trace_seq *s)
402 {
403         /* Duration exceeded 100 msecs */
404         if (duration > 100000ULL)
405                 return trace_seq_printf(s, "! ");
406
407         /* Duration exceeded 10 msecs */
408         if (duration > 10000ULL)
409                 return trace_seq_printf(s, "+ ");
410
411         return trace_seq_printf(s, "  ");
412 }
413
414 /* Case of a leaf function on its call entry */
415 static enum print_line_t
416 print_graph_entry_leaf(struct trace_iterator *iter,
417                 struct ftrace_graph_ent_entry *entry, struct trace_seq *s)
418 {
419         struct ftrace_graph_ret_entry *ret_entry;
420         struct ftrace_graph_ret *graph_ret;
421         struct ring_buffer_event *event;
422         struct ftrace_graph_ent *call;
423         unsigned long long duration;
424         int ret;
425         int i;
426
427         event = ring_buffer_read(iter->buffer_iter[iter->cpu], NULL);
428         ret_entry = ring_buffer_event_data(event);
429         graph_ret = &ret_entry->ret;
430         call = &entry->graph_ent;
431         duration = graph_ret->rettime - graph_ret->calltime;
432
433         /* Overhead */
434         if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
435                 ret = print_graph_overhead(duration, s);
436                 if (!ret)
437                         return TRACE_TYPE_PARTIAL_LINE;
438         }
439
440         /* Duration */
441         ret = print_graph_duration(duration, s);
442         if (ret == TRACE_TYPE_PARTIAL_LINE)
443                 return TRACE_TYPE_PARTIAL_LINE;
444
445         /* Function */
446         for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
447                 ret = trace_seq_printf(s, " ");
448                 if (!ret)
449                         return TRACE_TYPE_PARTIAL_LINE;
450         }
451
452         ret = seq_print_ip_sym(s, call->func, 0);
453         if (!ret)
454                 return TRACE_TYPE_PARTIAL_LINE;
455
456         ret = trace_seq_printf(s, "();\n");
457         if (!ret)
458                 return TRACE_TYPE_PARTIAL_LINE;
459
460         return TRACE_TYPE_HANDLED;
461 }
462
463 static enum print_line_t
464 print_graph_entry_nested(struct ftrace_graph_ent_entry *entry,
465                         struct trace_seq *s, pid_t pid, int cpu)
466 {
467         int i;
468         int ret;
469         struct ftrace_graph_ent *call = &entry->graph_ent;
470
471         /* No overhead */
472         if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
473                 ret = trace_seq_printf(s, "  ");
474                 if (!ret)
475                         return TRACE_TYPE_PARTIAL_LINE;
476         }
477
478         /* Interrupt */
479         ret = print_graph_irq(s, call->func, TRACE_GRAPH_ENT, cpu, pid);
480         if (ret == TRACE_TYPE_UNHANDLED) {
481                 /* No time */
482                 ret = trace_seq_printf(s, "            |  ");
483                 if (!ret)
484                         return TRACE_TYPE_PARTIAL_LINE;
485         } else {
486                 if (ret == TRACE_TYPE_PARTIAL_LINE)
487                         return TRACE_TYPE_PARTIAL_LINE;
488         }
489
490
491         /* Function */
492         for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
493                 ret = trace_seq_printf(s, " ");
494                 if (!ret)
495                         return TRACE_TYPE_PARTIAL_LINE;
496         }
497
498         ret = seq_print_ip_sym(s, call->func, 0);
499         if (!ret)
500                 return TRACE_TYPE_PARTIAL_LINE;
501
502         ret = trace_seq_printf(s, "() {\n");
503         if (!ret)
504                 return TRACE_TYPE_PARTIAL_LINE;
505
506         return TRACE_TYPE_HANDLED;
507 }
508
509 static enum print_line_t
510 print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
511                         struct trace_iterator *iter, int cpu)
512 {
513         int ret;
514         struct trace_entry *ent = iter->ent;
515
516         /* Pid */
517         if (verif_pid(s, ent->pid, cpu) == TRACE_TYPE_PARTIAL_LINE)
518                 return TRACE_TYPE_PARTIAL_LINE;
519
520         /* Cpu */
521         if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
522                 ret = print_graph_cpu(s, cpu);
523                 if (ret == TRACE_TYPE_PARTIAL_LINE)
524                         return TRACE_TYPE_PARTIAL_LINE;
525         }
526
527         /* Proc */
528         if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) {
529                 ret = print_graph_proc(s, ent->pid);
530                 if (ret == TRACE_TYPE_PARTIAL_LINE)
531                         return TRACE_TYPE_PARTIAL_LINE;
532
533                 ret = trace_seq_printf(s, " | ");
534                 if (!ret)
535                         return TRACE_TYPE_PARTIAL_LINE;
536         }
537
538         if (trace_branch_is_leaf(iter, field))
539                 return print_graph_entry_leaf(iter, field, s);
540         else
541                 return print_graph_entry_nested(field, s, iter->ent->pid, cpu);
542
543 }
544
545 static enum print_line_t
546 print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
547                    struct trace_entry *ent, int cpu)
548 {
549         int i;
550         int ret;
551         unsigned long long duration = trace->rettime - trace->calltime;
552
553         /* Pid */
554         if (verif_pid(s, ent->pid, cpu) == TRACE_TYPE_PARTIAL_LINE)
555                 return TRACE_TYPE_PARTIAL_LINE;
556
557         /* Cpu */
558         if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
559                 ret = print_graph_cpu(s, cpu);
560                 if (ret == TRACE_TYPE_PARTIAL_LINE)
561                         return TRACE_TYPE_PARTIAL_LINE;
562         }
563
564         /* Proc */
565         if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) {
566                 ret = print_graph_proc(s, ent->pid);
567                 if (ret == TRACE_TYPE_PARTIAL_LINE)
568                         return TRACE_TYPE_PARTIAL_LINE;
569
570                 ret = trace_seq_printf(s, " | ");
571                 if (!ret)
572                         return TRACE_TYPE_PARTIAL_LINE;
573         }
574
575         /* Overhead */
576         if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
577                 ret = print_graph_overhead(duration, s);
578                 if (!ret)
579                         return TRACE_TYPE_PARTIAL_LINE;
580         }
581
582         /* Duration */
583         ret = print_graph_duration(duration, s);
584         if (ret == TRACE_TYPE_PARTIAL_LINE)
585                 return TRACE_TYPE_PARTIAL_LINE;
586
587         /* Closing brace */
588         for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) {
589                 ret = trace_seq_printf(s, " ");
590                 if (!ret)
591                         return TRACE_TYPE_PARTIAL_LINE;
592         }
593
594         ret = trace_seq_printf(s, "}\n");
595         if (!ret)
596                 return TRACE_TYPE_PARTIAL_LINE;
597
598         /* Overrun */
599         if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) {
600                 ret = trace_seq_printf(s, " (Overruns: %lu)\n",
601                                         trace->overrun);
602                 if (!ret)
603                         return TRACE_TYPE_PARTIAL_LINE;
604         }
605
606         ret = print_graph_irq(s, trace->func, TRACE_GRAPH_RET, cpu, ent->pid);
607         if (ret == TRACE_TYPE_PARTIAL_LINE)
608                 return TRACE_TYPE_PARTIAL_LINE;
609
610         return TRACE_TYPE_HANDLED;
611 }
612
613 static enum print_line_t
614 print_graph_comment(struct print_entry *trace, struct trace_seq *s,
615                    struct trace_entry *ent, struct trace_iterator *iter)
616 {
617         int i;
618         int ret;
619
620         /* Pid */
621         if (verif_pid(s, ent->pid, iter->cpu) == TRACE_TYPE_PARTIAL_LINE)
622                 return TRACE_TYPE_PARTIAL_LINE;
623
624         /* Cpu */
625         if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
626                 ret = print_graph_cpu(s, iter->cpu);
627                 if (ret == TRACE_TYPE_PARTIAL_LINE)
628                         return TRACE_TYPE_PARTIAL_LINE;
629         }
630
631         /* Proc */
632         if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) {
633                 ret = print_graph_proc(s, ent->pid);
634                 if (ret == TRACE_TYPE_PARTIAL_LINE)
635                         return TRACE_TYPE_PARTIAL_LINE;
636
637                 ret = trace_seq_printf(s, " | ");
638                 if (!ret)
639                         return TRACE_TYPE_PARTIAL_LINE;
640         }
641
642         /* No overhead */
643         if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
644                 ret = trace_seq_printf(s, "  ");
645                 if (!ret)
646                         return TRACE_TYPE_PARTIAL_LINE;
647         }
648
649         /* No time */
650         ret = trace_seq_printf(s, "            |  ");
651         if (!ret)
652                 return TRACE_TYPE_PARTIAL_LINE;
653
654         /* Indentation */
655         if (trace->depth > 0)
656                 for (i = 0; i < (trace->depth + 1) * TRACE_GRAPH_INDENT; i++) {
657                         ret = trace_seq_printf(s, " ");
658                         if (!ret)
659                                 return TRACE_TYPE_PARTIAL_LINE;
660                 }
661
662         /* The comment */
663         ret = trace_seq_printf(s, "/* %s", trace->buf);
664         if (!ret)
665                 return TRACE_TYPE_PARTIAL_LINE;
666
667         if (ent->flags & TRACE_FLAG_CONT)
668                 trace_seq_print_cont(s, iter);
669
670         ret = trace_seq_printf(s, " */\n");
671         if (!ret)
672                 return TRACE_TYPE_PARTIAL_LINE;
673
674         return TRACE_TYPE_HANDLED;
675 }
676
677
678 enum print_line_t
679 print_graph_function(struct trace_iterator *iter)
680 {
681         struct trace_seq *s = &iter->seq;
682         struct trace_entry *entry = iter->ent;
683
684         switch (entry->type) {
685         case TRACE_GRAPH_ENT: {
686                 struct ftrace_graph_ent_entry *field;
687                 trace_assign_type(field, entry);
688                 return print_graph_entry(field, s, iter,
689                                          iter->cpu);
690         }
691         case TRACE_GRAPH_RET: {
692                 struct ftrace_graph_ret_entry *field;
693                 trace_assign_type(field, entry);
694                 return print_graph_return(&field->ret, s, entry, iter->cpu);
695         }
696         case TRACE_PRINT: {
697                 struct print_entry *field;
698                 trace_assign_type(field, entry);
699                 return print_graph_comment(field, s, entry, iter);
700         }
701         default:
702                 return TRACE_TYPE_UNHANDLED;
703         }
704 }
705
706 static void print_graph_headers(struct seq_file *s)
707 {
708         /* 1st line */
709         seq_printf(s, "# ");
710         if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU)
711                 seq_printf(s, "CPU ");
712         if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC)
713                 seq_printf(s, "TASK/PID     ");
714         if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD)
715                 seq_printf(s, "OVERHEAD/");
716         seq_printf(s, "DURATION            FUNCTION CALLS\n");
717
718         /* 2nd line */
719         seq_printf(s, "# ");
720         if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU)
721                 seq_printf(s, "|   ");
722         if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC)
723                 seq_printf(s, "|      |     ");
724         if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
725                 seq_printf(s, "|        ");
726                 seq_printf(s, "|                   |   |   |   |\n");
727         } else
728                 seq_printf(s, "    |               |   |   |   |\n");
729 }
730 static struct tracer graph_trace __read_mostly = {
731         .name           = "function_graph",
732         .init           = graph_trace_init,
733         .reset          = graph_trace_reset,
734         .print_line     = print_graph_function,
735         .print_header   = print_graph_headers,
736         .flags          = &tracer_flags,
737 };
738
739 static __init int init_graph_trace(void)
740 {
741         return register_tracer(&graph_trace);
742 }
743
744 device_initcall(init_graph_trace);