Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound-2.6
[linux-2.6] / kernel / trace / trace_output.c
1 /*
2  * trace_output.c
3  *
4  * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
5  *
6  */
7
8 #include <linux/module.h>
9 #include <linux/mutex.h>
10 #include <linux/ftrace.h>
11
12 #include "trace_output.h"
13
14 /* must be a power of 2 */
15 #define EVENT_HASHSIZE  128
16
17 DECLARE_RWSEM(trace_event_mutex);
18
19 DEFINE_PER_CPU(struct trace_seq, ftrace_event_seq);
20 EXPORT_PER_CPU_SYMBOL(ftrace_event_seq);
21
22 static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly;
23
24 static int next_event_type = __TRACE_LAST_TYPE + 1;
25
26 void trace_print_seq(struct seq_file *m, struct trace_seq *s)
27 {
28         int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len;
29
30         s->buffer[len] = 0;
31         seq_puts(m, s->buffer);
32
33         trace_seq_init(s);
34 }
35
36 enum print_line_t trace_print_bprintk_msg_only(struct trace_iterator *iter)
37 {
38         struct trace_seq *s = &iter->seq;
39         struct trace_entry *entry = iter->ent;
40         struct bprint_entry *field;
41         int ret;
42
43         trace_assign_type(field, entry);
44
45         ret = trace_seq_bprintf(s, field->fmt, field->buf);
46         if (!ret)
47                 return TRACE_TYPE_PARTIAL_LINE;
48
49         return TRACE_TYPE_HANDLED;
50 }
51
52 enum print_line_t trace_print_printk_msg_only(struct trace_iterator *iter)
53 {
54         struct trace_seq *s = &iter->seq;
55         struct trace_entry *entry = iter->ent;
56         struct print_entry *field;
57         int ret;
58
59         trace_assign_type(field, entry);
60
61         ret = trace_seq_printf(s, "%s", field->buf);
62         if (!ret)
63                 return TRACE_TYPE_PARTIAL_LINE;
64
65         return TRACE_TYPE_HANDLED;
66 }
67
68 /**
69  * trace_seq_printf - sequence printing of trace information
70  * @s: trace sequence descriptor
71  * @fmt: printf format string
72  *
73  * The tracer may use either sequence operations or its own
74  * copy to user routines. To simplify formating of a trace
75  * trace_seq_printf is used to store strings into a special
76  * buffer (@s). Then the output may be either used by
77  * the sequencer or pulled into another buffer.
78  */
79 int
80 trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
81 {
82         int len = (PAGE_SIZE - 1) - s->len;
83         va_list ap;
84         int ret;
85
86         if (!len)
87                 return 0;
88
89         va_start(ap, fmt);
90         ret = vsnprintf(s->buffer + s->len, len, fmt, ap);
91         va_end(ap);
92
93         /* If we can't write it all, don't bother writing anything */
94         if (ret >= len)
95                 return 0;
96
97         s->len += ret;
98
99         return len;
100 }
101 EXPORT_SYMBOL_GPL(trace_seq_printf);
102
103 /**
104  * trace_seq_vprintf - sequence printing of trace information
105  * @s: trace sequence descriptor
106  * @fmt: printf format string
107  *
108  * The tracer may use either sequence operations or its own
109  * copy to user routines. To simplify formating of a trace
110  * trace_seq_printf is used to store strings into a special
111  * buffer (@s). Then the output may be either used by
112  * the sequencer or pulled into another buffer.
113  */
114 int
115 trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args)
116 {
117         int len = (PAGE_SIZE - 1) - s->len;
118         int ret;
119
120         if (!len)
121                 return 0;
122
123         ret = vsnprintf(s->buffer + s->len, len, fmt, args);
124
125         /* If we can't write it all, don't bother writing anything */
126         if (ret >= len)
127                 return 0;
128
129         s->len += ret;
130
131         return len;
132 }
133 EXPORT_SYMBOL_GPL(trace_seq_vprintf);
134
135 int trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary)
136 {
137         int len = (PAGE_SIZE - 1) - s->len;
138         int ret;
139
140         if (!len)
141                 return 0;
142
143         ret = bstr_printf(s->buffer + s->len, len, fmt, binary);
144
145         /* If we can't write it all, don't bother writing anything */
146         if (ret >= len)
147                 return 0;
148
149         s->len += ret;
150
151         return len;
152 }
153
154 /**
155  * trace_seq_puts - trace sequence printing of simple string
156  * @s: trace sequence descriptor
157  * @str: simple string to record
158  *
159  * The tracer may use either the sequence operations or its own
160  * copy to user routines. This function records a simple string
161  * into a special buffer (@s) for later retrieval by a sequencer
162  * or other mechanism.
163  */
164 int trace_seq_puts(struct trace_seq *s, const char *str)
165 {
166         int len = strlen(str);
167
168         if (len > ((PAGE_SIZE - 1) - s->len))
169                 return 0;
170
171         memcpy(s->buffer + s->len, str, len);
172         s->len += len;
173
174         return len;
175 }
176
177 int trace_seq_putc(struct trace_seq *s, unsigned char c)
178 {
179         if (s->len >= (PAGE_SIZE - 1))
180                 return 0;
181
182         s->buffer[s->len++] = c;
183
184         return 1;
185 }
186
187 int trace_seq_putmem(struct trace_seq *s, const void *mem, size_t len)
188 {
189         if (len > ((PAGE_SIZE - 1) - s->len))
190                 return 0;
191
192         memcpy(s->buffer + s->len, mem, len);
193         s->len += len;
194
195         return len;
196 }
197
198 int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, size_t len)
199 {
200         unsigned char hex[HEX_CHARS];
201         const unsigned char *data = mem;
202         int i, j;
203
204 #ifdef __BIG_ENDIAN
205         for (i = 0, j = 0; i < len; i++) {
206 #else
207         for (i = len-1, j = 0; i >= 0; i--) {
208 #endif
209                 hex[j++] = hex_asc_hi(data[i]);
210                 hex[j++] = hex_asc_lo(data[i]);
211         }
212         hex[j++] = ' ';
213
214         return trace_seq_putmem(s, hex, j);
215 }
216
217 void *trace_seq_reserve(struct trace_seq *s, size_t len)
218 {
219         void *ret;
220
221         if (len > ((PAGE_SIZE - 1) - s->len))
222                 return NULL;
223
224         ret = s->buffer + s->len;
225         s->len += len;
226
227         return ret;
228 }
229
230 int trace_seq_path(struct trace_seq *s, struct path *path)
231 {
232         unsigned char *p;
233
234         if (s->len >= (PAGE_SIZE - 1))
235                 return 0;
236         p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
237         if (!IS_ERR(p)) {
238                 p = mangle_path(s->buffer + s->len, p, "\n");
239                 if (p) {
240                         s->len = p - s->buffer;
241                         return 1;
242                 }
243         } else {
244                 s->buffer[s->len++] = '?';
245                 return 1;
246         }
247
248         return 0;
249 }
250
251 const char *
252 ftrace_print_flags_seq(struct trace_seq *p, const char *delim,
253                        unsigned long flags,
254                        const struct trace_print_flags *flag_array)
255 {
256         unsigned long mask;
257         const char *str;
258         const char *ret = p->buffer + p->len;
259         int i;
260
261         for (i = 0;  flag_array[i].name && flags; i++) {
262
263                 mask = flag_array[i].mask;
264                 if ((flags & mask) != mask)
265                         continue;
266
267                 str = flag_array[i].name;
268                 flags &= ~mask;
269                 if (p->len && delim)
270                         trace_seq_puts(p, delim);
271                 trace_seq_puts(p, str);
272         }
273
274         /* check for left over flags */
275         if (flags) {
276                 if (p->len && delim)
277                         trace_seq_puts(p, delim);
278                 trace_seq_printf(p, "0x%lx", flags);
279         }
280
281         trace_seq_putc(p, 0);
282
283         return ret;
284 }
285 EXPORT_SYMBOL(ftrace_print_flags_seq);
286
287 const char *
288 ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val,
289                          const struct trace_print_flags *symbol_array)
290 {
291         int i;
292         const char *ret = p->buffer + p->len;
293
294         for (i = 0;  symbol_array[i].name; i++) {
295
296                 if (val != symbol_array[i].mask)
297                         continue;
298
299                 trace_seq_puts(p, symbol_array[i].name);
300                 break;
301         }
302
303         if (!p->len)
304                 trace_seq_printf(p, "0x%lx", val);
305                 
306         trace_seq_putc(p, 0);
307
308         return ret;
309 }
310 EXPORT_SYMBOL(ftrace_print_symbols_seq);
311
312 #ifdef CONFIG_KRETPROBES
313 static inline const char *kretprobed(const char *name)
314 {
315         static const char tramp_name[] = "kretprobe_trampoline";
316         int size = sizeof(tramp_name);
317
318         if (strncmp(tramp_name, name, size) == 0)
319                 return "[unknown/kretprobe'd]";
320         return name;
321 }
322 #else
323 static inline const char *kretprobed(const char *name)
324 {
325         return name;
326 }
327 #endif /* CONFIG_KRETPROBES */
328
329 static int
330 seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address)
331 {
332 #ifdef CONFIG_KALLSYMS
333         char str[KSYM_SYMBOL_LEN];
334         const char *name;
335
336         kallsyms_lookup(address, NULL, NULL, NULL, str);
337
338         name = kretprobed(str);
339
340         return trace_seq_printf(s, fmt, name);
341 #endif
342         return 1;
343 }
344
345 static int
346 seq_print_sym_offset(struct trace_seq *s, const char *fmt,
347                      unsigned long address)
348 {
349 #ifdef CONFIG_KALLSYMS
350         char str[KSYM_SYMBOL_LEN];
351         const char *name;
352
353         sprint_symbol(str, address);
354         name = kretprobed(str);
355
356         return trace_seq_printf(s, fmt, name);
357 #endif
358         return 1;
359 }
360
361 #ifndef CONFIG_64BIT
362 # define IP_FMT "%08lx"
363 #else
364 # define IP_FMT "%016lx"
365 #endif
366
367 int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm,
368                       unsigned long ip, unsigned long sym_flags)
369 {
370         struct file *file = NULL;
371         unsigned long vmstart = 0;
372         int ret = 1;
373
374         if (mm) {
375                 const struct vm_area_struct *vma;
376
377                 down_read(&mm->mmap_sem);
378                 vma = find_vma(mm, ip);
379                 if (vma) {
380                         file = vma->vm_file;
381                         vmstart = vma->vm_start;
382                 }
383                 if (file) {
384                         ret = trace_seq_path(s, &file->f_path);
385                         if (ret)
386                                 ret = trace_seq_printf(s, "[+0x%lx]",
387                                                        ip - vmstart);
388                 }
389                 up_read(&mm->mmap_sem);
390         }
391         if (ret && ((sym_flags & TRACE_ITER_SYM_ADDR) || !file))
392                 ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
393         return ret;
394 }
395
396 int
397 seq_print_userip_objs(const struct userstack_entry *entry, struct trace_seq *s,
398                       unsigned long sym_flags)
399 {
400         struct mm_struct *mm = NULL;
401         int ret = 1;
402         unsigned int i;
403
404         if (trace_flags & TRACE_ITER_SYM_USEROBJ) {
405                 struct task_struct *task;
406                 /*
407                  * we do the lookup on the thread group leader,
408                  * since individual threads might have already quit!
409                  */
410                 rcu_read_lock();
411                 task = find_task_by_vpid(entry->ent.tgid);
412                 if (task)
413                         mm = get_task_mm(task);
414                 rcu_read_unlock();
415         }
416
417         for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
418                 unsigned long ip = entry->caller[i];
419
420                 if (ip == ULONG_MAX || !ret)
421                         break;
422                 if (ret)
423                         ret = trace_seq_puts(s, " => ");
424                 if (!ip) {
425                         if (ret)
426                                 ret = trace_seq_puts(s, "??");
427                         if (ret)
428                                 ret = trace_seq_puts(s, "\n");
429                         continue;
430                 }
431                 if (!ret)
432                         break;
433                 if (ret)
434                         ret = seq_print_user_ip(s, mm, ip, sym_flags);
435                 ret = trace_seq_puts(s, "\n");
436         }
437
438         if (mm)
439                 mmput(mm);
440         return ret;
441 }
442
443 int
444 seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
445 {
446         int ret;
447
448         if (!ip)
449                 return trace_seq_printf(s, "0");
450
451         if (sym_flags & TRACE_ITER_SYM_OFFSET)
452                 ret = seq_print_sym_offset(s, "%s", ip);
453         else
454                 ret = seq_print_sym_short(s, "%s", ip);
455
456         if (!ret)
457                 return 0;
458
459         if (sym_flags & TRACE_ITER_SYM_ADDR)
460                 ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
461         return ret;
462 }
463
464 static int
465 lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
466 {
467         int hardirq, softirq;
468         char comm[TASK_COMM_LEN];
469
470         trace_find_cmdline(entry->pid, comm);
471         hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
472         softirq = entry->flags & TRACE_FLAG_SOFTIRQ;
473
474         if (!trace_seq_printf(s, "%8.8s-%-5d %3d%c%c%c",
475                               comm, entry->pid, cpu,
476                               (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' :
477                                 (entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ?
478                                   'X' : '.',
479                               (entry->flags & TRACE_FLAG_NEED_RESCHED) ?
480                                 'N' : '.',
481                               (hardirq && softirq) ? 'H' :
482                                 hardirq ? 'h' : softirq ? 's' : '.'))
483                 return 0;
484
485         if (entry->preempt_count)
486                 return trace_seq_printf(s, "%x", entry->preempt_count);
487         return trace_seq_puts(s, ".");
488 }
489
490 static unsigned long preempt_mark_thresh = 100;
491
492 static int
493 lat_print_timestamp(struct trace_seq *s, u64 abs_usecs,
494                     unsigned long rel_usecs)
495 {
496         return trace_seq_printf(s, " %4lldus%c: ", abs_usecs,
497                                 rel_usecs > preempt_mark_thresh ? '!' :
498                                   rel_usecs > 1 ? '+' : ' ');
499 }
500
501 int trace_print_context(struct trace_iterator *iter)
502 {
503         struct trace_seq *s = &iter->seq;
504         struct trace_entry *entry = iter->ent;
505         unsigned long long t = ns2usecs(iter->ts);
506         unsigned long usec_rem = do_div(t, USEC_PER_SEC);
507         unsigned long secs = (unsigned long)t;
508         char comm[TASK_COMM_LEN];
509
510         trace_find_cmdline(entry->pid, comm);
511
512         return trace_seq_printf(s, "%16s-%-5d [%03d] %5lu.%06lu: ",
513                                 comm, entry->pid, iter->cpu, secs, usec_rem);
514 }
515
516 int trace_print_lat_context(struct trace_iterator *iter)
517 {
518         u64 next_ts;
519         int ret;
520         struct trace_seq *s = &iter->seq;
521         struct trace_entry *entry = iter->ent,
522                            *next_entry = trace_find_next_entry(iter, NULL,
523                                                                &next_ts);
524         unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE);
525         unsigned long abs_usecs = ns2usecs(iter->ts - iter->tr->time_start);
526         unsigned long rel_usecs;
527
528         if (!next_entry)
529                 next_ts = iter->ts;
530         rel_usecs = ns2usecs(next_ts - iter->ts);
531
532         if (verbose) {
533                 char comm[TASK_COMM_LEN];
534
535                 trace_find_cmdline(entry->pid, comm);
536
537                 ret = trace_seq_printf(s, "%16s %5d %3d %d %08x %08lx [%08llx]"
538                                        " %ld.%03ldms (+%ld.%03ldms): ", comm,
539                                        entry->pid, iter->cpu, entry->flags,
540                                        entry->preempt_count, iter->idx,
541                                        ns2usecs(iter->ts),
542                                        abs_usecs / USEC_PER_MSEC,
543                                        abs_usecs % USEC_PER_MSEC,
544                                        rel_usecs / USEC_PER_MSEC,
545                                        rel_usecs % USEC_PER_MSEC);
546         } else {
547                 ret = lat_print_generic(s, entry, iter->cpu);
548                 if (ret)
549                         ret = lat_print_timestamp(s, abs_usecs, rel_usecs);
550         }
551
552         return ret;
553 }
554
555 static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
556
557 static int task_state_char(unsigned long state)
558 {
559         int bit = state ? __ffs(state) + 1 : 0;
560
561         return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?';
562 }
563
564 /**
565  * ftrace_find_event - find a registered event
566  * @type: the type of event to look for
567  *
568  * Returns an event of type @type otherwise NULL
569  * Called with trace_event_read_lock() held.
570  */
571 struct trace_event *ftrace_find_event(int type)
572 {
573         struct trace_event *event;
574         struct hlist_node *n;
575         unsigned key;
576
577         key = type & (EVENT_HASHSIZE - 1);
578
579         hlist_for_each_entry(event, n, &event_hash[key], node) {
580                 if (event->type == type)
581                         return event;
582         }
583
584         return NULL;
585 }
586
587 static LIST_HEAD(ftrace_event_list);
588
589 static int trace_search_list(struct list_head **list)
590 {
591         struct trace_event *e;
592         int last = __TRACE_LAST_TYPE;
593
594         if (list_empty(&ftrace_event_list)) {
595                 *list = &ftrace_event_list;
596                 return last + 1;
597         }
598
599         /*
600          * We used up all possible max events,
601          * lets see if somebody freed one.
602          */
603         list_for_each_entry(e, &ftrace_event_list, list) {
604                 if (e->type != last + 1)
605                         break;
606                 last++;
607         }
608
609         /* Did we used up all 65 thousand events??? */
610         if ((last + 1) > FTRACE_MAX_EVENT)
611                 return 0;
612
613         *list = &e->list;
614         return last + 1;
615 }
616
617 void trace_event_read_lock(void)
618 {
619         down_read(&trace_event_mutex);
620 }
621
622 void trace_event_read_unlock(void)
623 {
624         up_read(&trace_event_mutex);
625 }
626
627 /**
628  * register_ftrace_event - register output for an event type
629  * @event: the event type to register
630  *
631  * Event types are stored in a hash and this hash is used to
632  * find a way to print an event. If the @event->type is set
633  * then it will use that type, otherwise it will assign a
634  * type to use.
635  *
636  * If you assign your own type, please make sure it is added
637  * to the trace_type enum in trace.h, to avoid collisions
638  * with the dynamic types.
639  *
640  * Returns the event type number or zero on error.
641  */
642 int register_ftrace_event(struct trace_event *event)
643 {
644         unsigned key;
645         int ret = 0;
646
647         down_write(&trace_event_mutex);
648
649         if (WARN_ON(!event))
650                 goto out;
651
652         INIT_LIST_HEAD(&event->list);
653
654         if (!event->type) {
655                 struct list_head *list = NULL;
656
657                 if (next_event_type > FTRACE_MAX_EVENT) {
658
659                         event->type = trace_search_list(&list);
660                         if (!event->type)
661                                 goto out;
662
663                 } else {
664                         
665                         event->type = next_event_type++;
666                         list = &ftrace_event_list;
667                 }
668
669                 if (WARN_ON(ftrace_find_event(event->type)))
670                         goto out;
671
672                 list_add_tail(&event->list, list);
673
674         } else if (event->type > __TRACE_LAST_TYPE) {
675                 printk(KERN_WARNING "Need to add type to trace.h\n");
676                 WARN_ON(1);
677                 goto out;
678         } else {
679                 /* Is this event already used */
680                 if (ftrace_find_event(event->type))
681                         goto out;
682         }
683
684         if (event->trace == NULL)
685                 event->trace = trace_nop_print;
686         if (event->raw == NULL)
687                 event->raw = trace_nop_print;
688         if (event->hex == NULL)
689                 event->hex = trace_nop_print;
690         if (event->binary == NULL)
691                 event->binary = trace_nop_print;
692
693         key = event->type & (EVENT_HASHSIZE - 1);
694
695         hlist_add_head(&event->node, &event_hash[key]);
696
697         ret = event->type;
698  out:
699         up_write(&trace_event_mutex);
700
701         return ret;
702 }
703 EXPORT_SYMBOL_GPL(register_ftrace_event);
704
705 /*
706  * Used by module code with the trace_event_mutex held for write.
707  */
708 int __unregister_ftrace_event(struct trace_event *event)
709 {
710         hlist_del(&event->node);
711         list_del(&event->list);
712         return 0;
713 }
714
715 /**
716  * unregister_ftrace_event - remove a no longer used event
717  * @event: the event to remove
718  */
719 int unregister_ftrace_event(struct trace_event *event)
720 {
721         down_write(&trace_event_mutex);
722         __unregister_ftrace_event(event);
723         up_write(&trace_event_mutex);
724
725         return 0;
726 }
727 EXPORT_SYMBOL_GPL(unregister_ftrace_event);
728
729 /*
730  * Standard events
731  */
732
733 enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags)
734 {
735         return TRACE_TYPE_HANDLED;
736 }
737
738 /* TRACE_FN */
739 static enum print_line_t trace_fn_trace(struct trace_iterator *iter, int flags)
740 {
741         struct ftrace_entry *field;
742         struct trace_seq *s = &iter->seq;
743
744         trace_assign_type(field, iter->ent);
745
746         if (!seq_print_ip_sym(s, field->ip, flags))
747                 goto partial;
748
749         if ((flags & TRACE_ITER_PRINT_PARENT) && field->parent_ip) {
750                 if (!trace_seq_printf(s, " <-"))
751                         goto partial;
752                 if (!seq_print_ip_sym(s,
753                                       field->parent_ip,
754                                       flags))
755                         goto partial;
756         }
757         if (!trace_seq_printf(s, "\n"))
758                 goto partial;
759
760         return TRACE_TYPE_HANDLED;
761
762  partial:
763         return TRACE_TYPE_PARTIAL_LINE;
764 }
765
766 static enum print_line_t trace_fn_raw(struct trace_iterator *iter, int flags)
767 {
768         struct ftrace_entry *field;
769
770         trace_assign_type(field, iter->ent);
771
772         if (!trace_seq_printf(&iter->seq, "%lx %lx\n",
773                               field->ip,
774                               field->parent_ip))
775                 return TRACE_TYPE_PARTIAL_LINE;
776
777         return TRACE_TYPE_HANDLED;
778 }
779
780 static enum print_line_t trace_fn_hex(struct trace_iterator *iter, int flags)
781 {
782         struct ftrace_entry *field;
783         struct trace_seq *s = &iter->seq;
784
785         trace_assign_type(field, iter->ent);
786
787         SEQ_PUT_HEX_FIELD_RET(s, field->ip);
788         SEQ_PUT_HEX_FIELD_RET(s, field->parent_ip);
789
790         return TRACE_TYPE_HANDLED;
791 }
792
793 static enum print_line_t trace_fn_bin(struct trace_iterator *iter, int flags)
794 {
795         struct ftrace_entry *field;
796         struct trace_seq *s = &iter->seq;
797
798         trace_assign_type(field, iter->ent);
799
800         SEQ_PUT_FIELD_RET(s, field->ip);
801         SEQ_PUT_FIELD_RET(s, field->parent_ip);
802
803         return TRACE_TYPE_HANDLED;
804 }
805
806 static struct trace_event trace_fn_event = {
807         .type           = TRACE_FN,
808         .trace          = trace_fn_trace,
809         .raw            = trace_fn_raw,
810         .hex            = trace_fn_hex,
811         .binary         = trace_fn_bin,
812 };
813
814 /* TRACE_CTX an TRACE_WAKE */
815 static enum print_line_t trace_ctxwake_print(struct trace_iterator *iter,
816                                              char *delim)
817 {
818         struct ctx_switch_entry *field;
819         char comm[TASK_COMM_LEN];
820         int S, T;
821
822
823         trace_assign_type(field, iter->ent);
824
825         T = task_state_char(field->next_state);
826         S = task_state_char(field->prev_state);
827         trace_find_cmdline(field->next_pid, comm);
828         if (!trace_seq_printf(&iter->seq,
829                               " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n",
830                               field->prev_pid,
831                               field->prev_prio,
832                               S, delim,
833                               field->next_cpu,
834                               field->next_pid,
835                               field->next_prio,
836                               T, comm))
837                 return TRACE_TYPE_PARTIAL_LINE;
838
839         return TRACE_TYPE_HANDLED;
840 }
841
842 static enum print_line_t trace_ctx_print(struct trace_iterator *iter, int flags)
843 {
844         return trace_ctxwake_print(iter, "==>");
845 }
846
847 static enum print_line_t trace_wake_print(struct trace_iterator *iter,
848                                           int flags)
849 {
850         return trace_ctxwake_print(iter, "  +");
851 }
852
853 static int trace_ctxwake_raw(struct trace_iterator *iter, char S)
854 {
855         struct ctx_switch_entry *field;
856         int T;
857
858         trace_assign_type(field, iter->ent);
859
860         if (!S)
861                 task_state_char(field->prev_state);
862         T = task_state_char(field->next_state);
863         if (!trace_seq_printf(&iter->seq, "%d %d %c %d %d %d %c\n",
864                               field->prev_pid,
865                               field->prev_prio,
866                               S,
867                               field->next_cpu,
868                               field->next_pid,
869                               field->next_prio,
870                               T))
871                 return TRACE_TYPE_PARTIAL_LINE;
872
873         return TRACE_TYPE_HANDLED;
874 }
875
876 static enum print_line_t trace_ctx_raw(struct trace_iterator *iter, int flags)
877 {
878         return trace_ctxwake_raw(iter, 0);
879 }
880
881 static enum print_line_t trace_wake_raw(struct trace_iterator *iter, int flags)
882 {
883         return trace_ctxwake_raw(iter, '+');
884 }
885
886
887 static int trace_ctxwake_hex(struct trace_iterator *iter, char S)
888 {
889         struct ctx_switch_entry *field;
890         struct trace_seq *s = &iter->seq;
891         int T;
892
893         trace_assign_type(field, iter->ent);
894
895         if (!S)
896                 task_state_char(field->prev_state);
897         T = task_state_char(field->next_state);
898
899         SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid);
900         SEQ_PUT_HEX_FIELD_RET(s, field->prev_prio);
901         SEQ_PUT_HEX_FIELD_RET(s, S);
902         SEQ_PUT_HEX_FIELD_RET(s, field->next_cpu);
903         SEQ_PUT_HEX_FIELD_RET(s, field->next_pid);
904         SEQ_PUT_HEX_FIELD_RET(s, field->next_prio);
905         SEQ_PUT_HEX_FIELD_RET(s, T);
906
907         return TRACE_TYPE_HANDLED;
908 }
909
910 static enum print_line_t trace_ctx_hex(struct trace_iterator *iter, int flags)
911 {
912         return trace_ctxwake_hex(iter, 0);
913 }
914
915 static enum print_line_t trace_wake_hex(struct trace_iterator *iter, int flags)
916 {
917         return trace_ctxwake_hex(iter, '+');
918 }
919
920 static enum print_line_t trace_ctxwake_bin(struct trace_iterator *iter,
921                                            int flags)
922 {
923         struct ctx_switch_entry *field;
924         struct trace_seq *s = &iter->seq;
925
926         trace_assign_type(field, iter->ent);
927
928         SEQ_PUT_FIELD_RET(s, field->prev_pid);
929         SEQ_PUT_FIELD_RET(s, field->prev_prio);
930         SEQ_PUT_FIELD_RET(s, field->prev_state);
931         SEQ_PUT_FIELD_RET(s, field->next_pid);
932         SEQ_PUT_FIELD_RET(s, field->next_prio);
933         SEQ_PUT_FIELD_RET(s, field->next_state);
934
935         return TRACE_TYPE_HANDLED;
936 }
937
938 static struct trace_event trace_ctx_event = {
939         .type           = TRACE_CTX,
940         .trace          = trace_ctx_print,
941         .raw            = trace_ctx_raw,
942         .hex            = trace_ctx_hex,
943         .binary         = trace_ctxwake_bin,
944 };
945
946 static struct trace_event trace_wake_event = {
947         .type           = TRACE_WAKE,
948         .trace          = trace_wake_print,
949         .raw            = trace_wake_raw,
950         .hex            = trace_wake_hex,
951         .binary         = trace_ctxwake_bin,
952 };
953
954 /* TRACE_SPECIAL */
955 static enum print_line_t trace_special_print(struct trace_iterator *iter,
956                                              int flags)
957 {
958         struct special_entry *field;
959
960         trace_assign_type(field, iter->ent);
961
962         if (!trace_seq_printf(&iter->seq, "# %ld %ld %ld\n",
963                               field->arg1,
964                               field->arg2,
965                               field->arg3))
966                 return TRACE_TYPE_PARTIAL_LINE;
967
968         return TRACE_TYPE_HANDLED;
969 }
970
971 static enum print_line_t trace_special_hex(struct trace_iterator *iter,
972                                            int flags)
973 {
974         struct special_entry *field;
975         struct trace_seq *s = &iter->seq;
976
977         trace_assign_type(field, iter->ent);
978
979         SEQ_PUT_HEX_FIELD_RET(s, field->arg1);
980         SEQ_PUT_HEX_FIELD_RET(s, field->arg2);
981         SEQ_PUT_HEX_FIELD_RET(s, field->arg3);
982
983         return TRACE_TYPE_HANDLED;
984 }
985
986 static enum print_line_t trace_special_bin(struct trace_iterator *iter,
987                                            int flags)
988 {
989         struct special_entry *field;
990         struct trace_seq *s = &iter->seq;
991
992         trace_assign_type(field, iter->ent);
993
994         SEQ_PUT_FIELD_RET(s, field->arg1);
995         SEQ_PUT_FIELD_RET(s, field->arg2);
996         SEQ_PUT_FIELD_RET(s, field->arg3);
997
998         return TRACE_TYPE_HANDLED;
999 }
1000
1001 static struct trace_event trace_special_event = {
1002         .type           = TRACE_SPECIAL,
1003         .trace          = trace_special_print,
1004         .raw            = trace_special_print,
1005         .hex            = trace_special_hex,
1006         .binary         = trace_special_bin,
1007 };
1008
1009 /* TRACE_STACK */
1010
1011 static enum print_line_t trace_stack_print(struct trace_iterator *iter,
1012                                            int flags)
1013 {
1014         struct stack_entry *field;
1015         struct trace_seq *s = &iter->seq;
1016         int i;
1017
1018         trace_assign_type(field, iter->ent);
1019
1020         if (!trace_seq_puts(s, "<stack trace>\n"))
1021                 goto partial;
1022         for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
1023                 if (!field->caller[i] || (field->caller[i] == ULONG_MAX))
1024                         break;
1025                 if (!trace_seq_puts(s, " => "))
1026                         goto partial;
1027
1028                 if (!seq_print_ip_sym(s, field->caller[i], flags))
1029                         goto partial;
1030                 if (!trace_seq_puts(s, "\n"))
1031                         goto partial;
1032         }
1033
1034         return TRACE_TYPE_HANDLED;
1035
1036  partial:
1037         return TRACE_TYPE_PARTIAL_LINE;
1038 }
1039
1040 static struct trace_event trace_stack_event = {
1041         .type           = TRACE_STACK,
1042         .trace          = trace_stack_print,
1043         .raw            = trace_special_print,
1044         .hex            = trace_special_hex,
1045         .binary         = trace_special_bin,
1046 };
1047
1048 /* TRACE_USER_STACK */
1049 static enum print_line_t trace_user_stack_print(struct trace_iterator *iter,
1050                                                 int flags)
1051 {
1052         struct userstack_entry *field;
1053         struct trace_seq *s = &iter->seq;
1054
1055         trace_assign_type(field, iter->ent);
1056
1057         if (!trace_seq_puts(s, "<user stack trace>\n"))
1058                 goto partial;
1059
1060         if (!seq_print_userip_objs(field, s, flags))
1061                 goto partial;
1062
1063         return TRACE_TYPE_HANDLED;
1064
1065  partial:
1066         return TRACE_TYPE_PARTIAL_LINE;
1067 }
1068
1069 static struct trace_event trace_user_stack_event = {
1070         .type           = TRACE_USER_STACK,
1071         .trace          = trace_user_stack_print,
1072         .raw            = trace_special_print,
1073         .hex            = trace_special_hex,
1074         .binary         = trace_special_bin,
1075 };
1076
1077 /* TRACE_BPRINT */
1078 static enum print_line_t
1079 trace_bprint_print(struct trace_iterator *iter, int flags)
1080 {
1081         struct trace_entry *entry = iter->ent;
1082         struct trace_seq *s = &iter->seq;
1083         struct bprint_entry *field;
1084
1085         trace_assign_type(field, entry);
1086
1087         if (!seq_print_ip_sym(s, field->ip, flags))
1088                 goto partial;
1089
1090         if (!trace_seq_puts(s, ": "))
1091                 goto partial;
1092
1093         if (!trace_seq_bprintf(s, field->fmt, field->buf))
1094                 goto partial;
1095
1096         return TRACE_TYPE_HANDLED;
1097
1098  partial:
1099         return TRACE_TYPE_PARTIAL_LINE;
1100 }
1101
1102
1103 static enum print_line_t
1104 trace_bprint_raw(struct trace_iterator *iter, int flags)
1105 {
1106         struct bprint_entry *field;
1107         struct trace_seq *s = &iter->seq;
1108
1109         trace_assign_type(field, iter->ent);
1110
1111         if (!trace_seq_printf(s, ": %lx : ", field->ip))
1112                 goto partial;
1113
1114         if (!trace_seq_bprintf(s, field->fmt, field->buf))
1115                 goto partial;
1116
1117         return TRACE_TYPE_HANDLED;
1118
1119  partial:
1120         return TRACE_TYPE_PARTIAL_LINE;
1121 }
1122
1123
1124 static struct trace_event trace_bprint_event = {
1125         .type           = TRACE_BPRINT,
1126         .trace          = trace_bprint_print,
1127         .raw            = trace_bprint_raw,
1128 };
1129
1130 /* TRACE_PRINT */
1131 static enum print_line_t trace_print_print(struct trace_iterator *iter,
1132                                            int flags)
1133 {
1134         struct print_entry *field;
1135         struct trace_seq *s = &iter->seq;
1136
1137         trace_assign_type(field, iter->ent);
1138
1139         if (!seq_print_ip_sym(s, field->ip, flags))
1140                 goto partial;
1141
1142         if (!trace_seq_printf(s, ": %s", field->buf))
1143                 goto partial;
1144
1145         return TRACE_TYPE_HANDLED;
1146
1147  partial:
1148         return TRACE_TYPE_PARTIAL_LINE;
1149 }
1150
1151 static enum print_line_t trace_print_raw(struct trace_iterator *iter, int flags)
1152 {
1153         struct print_entry *field;
1154
1155         trace_assign_type(field, iter->ent);
1156
1157         if (!trace_seq_printf(&iter->seq, "# %lx %s", field->ip, field->buf))
1158                 goto partial;
1159
1160         return TRACE_TYPE_HANDLED;
1161
1162  partial:
1163         return TRACE_TYPE_PARTIAL_LINE;
1164 }
1165
1166 static struct trace_event trace_print_event = {
1167         .type           = TRACE_PRINT,
1168         .trace          = trace_print_print,
1169         .raw            = trace_print_raw,
1170 };
1171
1172
1173 static struct trace_event *events[] __initdata = {
1174         &trace_fn_event,
1175         &trace_ctx_event,
1176         &trace_wake_event,
1177         &trace_special_event,
1178         &trace_stack_event,
1179         &trace_user_stack_event,
1180         &trace_bprint_event,
1181         &trace_print_event,
1182         NULL
1183 };
1184
1185 __init static int init_events(void)
1186 {
1187         struct trace_event *event;
1188         int i, ret;
1189
1190         for (i = 0; events[i]; i++) {
1191                 event = events[i];
1192
1193                 ret = register_ftrace_event(event);
1194                 if (!ret) {
1195                         printk(KERN_WARNING "event %d failed to register\n",
1196                                event->type);
1197                         WARN_ON_ONCE(1);
1198                 }
1199         }
1200
1201         return 0;
1202 }
1203 device_initcall(init_events);