tracing/events: fix output format of user stack
[linux-2.6] / kernel / trace / trace_output.c
1 /*
2  * trace_output.c
3  *
4  * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
5  *
6  */
7
8 #include <linux/module.h>
9 #include <linux/mutex.h>
10 #include <linux/ftrace.h>
11
12 #include "trace_output.h"
13
14 /* must be a power of 2 */
15 #define EVENT_HASHSIZE  128
16
17 static DECLARE_RWSEM(trace_event_mutex);
18
19 DEFINE_PER_CPU(struct trace_seq, ftrace_event_seq);
20 EXPORT_PER_CPU_SYMBOL(ftrace_event_seq);
21
22 static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly;
23
24 static int next_event_type = __TRACE_LAST_TYPE + 1;
25
26 void trace_print_seq(struct seq_file *m, struct trace_seq *s)
27 {
28         int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len;
29
30         s->buffer[len] = 0;
31         seq_puts(m, s->buffer);
32
33         trace_seq_init(s);
34 }
35
36 enum print_line_t trace_print_bprintk_msg_only(struct trace_iterator *iter)
37 {
38         struct trace_seq *s = &iter->seq;
39         struct trace_entry *entry = iter->ent;
40         struct bprint_entry *field;
41         int ret;
42
43         trace_assign_type(field, entry);
44
45         ret = trace_seq_bprintf(s, field->fmt, field->buf);
46         if (!ret)
47                 return TRACE_TYPE_PARTIAL_LINE;
48
49         return TRACE_TYPE_HANDLED;
50 }
51
52 enum print_line_t trace_print_printk_msg_only(struct trace_iterator *iter)
53 {
54         struct trace_seq *s = &iter->seq;
55         struct trace_entry *entry = iter->ent;
56         struct print_entry *field;
57         int ret;
58
59         trace_assign_type(field, entry);
60
61         ret = trace_seq_printf(s, "%s", field->buf);
62         if (!ret)
63                 return TRACE_TYPE_PARTIAL_LINE;
64
65         return TRACE_TYPE_HANDLED;
66 }
67
68 /**
69  * trace_seq_printf - sequence printing of trace information
70  * @s: trace sequence descriptor
71  * @fmt: printf format string
72  *
73  * The tracer may use either sequence operations or its own
74  * copy to user routines. To simplify formating of a trace
75  * trace_seq_printf is used to store strings into a special
76  * buffer (@s). Then the output may be either used by
77  * the sequencer or pulled into another buffer.
78  */
79 int
80 trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
81 {
82         int len = (PAGE_SIZE - 1) - s->len;
83         va_list ap;
84         int ret;
85
86         if (!len)
87                 return 0;
88
89         va_start(ap, fmt);
90         ret = vsnprintf(s->buffer + s->len, len, fmt, ap);
91         va_end(ap);
92
93         /* If we can't write it all, don't bother writing anything */
94         if (ret >= len)
95                 return 0;
96
97         s->len += ret;
98
99         return len;
100 }
101 EXPORT_SYMBOL_GPL(trace_seq_printf);
102
103 int trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary)
104 {
105         int len = (PAGE_SIZE - 1) - s->len;
106         int ret;
107
108         if (!len)
109                 return 0;
110
111         ret = bstr_printf(s->buffer + s->len, len, fmt, binary);
112
113         /* If we can't write it all, don't bother writing anything */
114         if (ret >= len)
115                 return 0;
116
117         s->len += ret;
118
119         return len;
120 }
121
122 /**
123  * trace_seq_puts - trace sequence printing of simple string
124  * @s: trace sequence descriptor
125  * @str: simple string to record
126  *
127  * The tracer may use either the sequence operations or its own
128  * copy to user routines. This function records a simple string
129  * into a special buffer (@s) for later retrieval by a sequencer
130  * or other mechanism.
131  */
132 int trace_seq_puts(struct trace_seq *s, const char *str)
133 {
134         int len = strlen(str);
135
136         if (len > ((PAGE_SIZE - 1) - s->len))
137                 return 0;
138
139         memcpy(s->buffer + s->len, str, len);
140         s->len += len;
141
142         return len;
143 }
144
145 int trace_seq_putc(struct trace_seq *s, unsigned char c)
146 {
147         if (s->len >= (PAGE_SIZE - 1))
148                 return 0;
149
150         s->buffer[s->len++] = c;
151
152         return 1;
153 }
154
155 int trace_seq_putmem(struct trace_seq *s, const void *mem, size_t len)
156 {
157         if (len > ((PAGE_SIZE - 1) - s->len))
158                 return 0;
159
160         memcpy(s->buffer + s->len, mem, len);
161         s->len += len;
162
163         return len;
164 }
165
166 int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, size_t len)
167 {
168         unsigned char hex[HEX_CHARS];
169         const unsigned char *data = mem;
170         int i, j;
171
172 #ifdef __BIG_ENDIAN
173         for (i = 0, j = 0; i < len; i++) {
174 #else
175         for (i = len-1, j = 0; i >= 0; i--) {
176 #endif
177                 hex[j++] = hex_asc_hi(data[i]);
178                 hex[j++] = hex_asc_lo(data[i]);
179         }
180         hex[j++] = ' ';
181
182         return trace_seq_putmem(s, hex, j);
183 }
184
185 void *trace_seq_reserve(struct trace_seq *s, size_t len)
186 {
187         void *ret;
188
189         if (len > ((PAGE_SIZE - 1) - s->len))
190                 return NULL;
191
192         ret = s->buffer + s->len;
193         s->len += len;
194
195         return ret;
196 }
197
198 int trace_seq_path(struct trace_seq *s, struct path *path)
199 {
200         unsigned char *p;
201
202         if (s->len >= (PAGE_SIZE - 1))
203                 return 0;
204         p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
205         if (!IS_ERR(p)) {
206                 p = mangle_path(s->buffer + s->len, p, "\n");
207                 if (p) {
208                         s->len = p - s->buffer;
209                         return 1;
210                 }
211         } else {
212                 s->buffer[s->len++] = '?';
213                 return 1;
214         }
215
216         return 0;
217 }
218
219 const char *
220 ftrace_print_flags_seq(struct trace_seq *p, const char *delim,
221                        unsigned long flags,
222                        const struct trace_print_flags *flag_array)
223 {
224         unsigned long mask;
225         const char *str;
226         int i;
227
228         trace_seq_init(p);
229
230         for (i = 0;  flag_array[i].name && flags; i++) {
231
232                 mask = flag_array[i].mask;
233                 if ((flags & mask) != mask)
234                         continue;
235
236                 str = flag_array[i].name;
237                 flags &= ~mask;
238                 if (p->len && delim)
239                         trace_seq_puts(p, delim);
240                 trace_seq_puts(p, str);
241         }
242
243         /* check for left over flags */
244         if (flags) {
245                 if (p->len && delim)
246                         trace_seq_puts(p, delim);
247                 trace_seq_printf(p, "0x%lx", flags);
248         }
249
250         trace_seq_putc(p, 0);
251
252         return p->buffer;
253 }
254 EXPORT_SYMBOL(ftrace_print_flags_seq);
255
256 const char *
257 ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val,
258                          const struct trace_print_flags *symbol_array)
259 {
260         int i;
261
262         trace_seq_init(p);
263
264         for (i = 0;  symbol_array[i].name; i++) {
265
266                 if (val != symbol_array[i].mask)
267                         continue;
268
269                 trace_seq_puts(p, symbol_array[i].name);
270                 break;
271         }
272
273         if (!p->len)
274                 trace_seq_printf(p, "0x%lx", val);
275                 
276         trace_seq_putc(p, 0);
277
278         return p->buffer;
279 }
280 EXPORT_SYMBOL(ftrace_print_symbols_seq);
281
282 #ifdef CONFIG_KRETPROBES
283 static inline const char *kretprobed(const char *name)
284 {
285         static const char tramp_name[] = "kretprobe_trampoline";
286         int size = sizeof(tramp_name);
287
288         if (strncmp(tramp_name, name, size) == 0)
289                 return "[unknown/kretprobe'd]";
290         return name;
291 }
292 #else
293 static inline const char *kretprobed(const char *name)
294 {
295         return name;
296 }
297 #endif /* CONFIG_KRETPROBES */
298
299 static int
300 seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address)
301 {
302 #ifdef CONFIG_KALLSYMS
303         char str[KSYM_SYMBOL_LEN];
304         const char *name;
305
306         kallsyms_lookup(address, NULL, NULL, NULL, str);
307
308         name = kretprobed(str);
309
310         return trace_seq_printf(s, fmt, name);
311 #endif
312         return 1;
313 }
314
315 static int
316 seq_print_sym_offset(struct trace_seq *s, const char *fmt,
317                      unsigned long address)
318 {
319 #ifdef CONFIG_KALLSYMS
320         char str[KSYM_SYMBOL_LEN];
321         const char *name;
322
323         sprint_symbol(str, address);
324         name = kretprobed(str);
325
326         return trace_seq_printf(s, fmt, name);
327 #endif
328         return 1;
329 }
330
331 #ifndef CONFIG_64BIT
332 # define IP_FMT "%08lx"
333 #else
334 # define IP_FMT "%016lx"
335 #endif
336
337 int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm,
338                       unsigned long ip, unsigned long sym_flags)
339 {
340         struct file *file = NULL;
341         unsigned long vmstart = 0;
342         int ret = 1;
343
344         if (mm) {
345                 const struct vm_area_struct *vma;
346
347                 down_read(&mm->mmap_sem);
348                 vma = find_vma(mm, ip);
349                 if (vma) {
350                         file = vma->vm_file;
351                         vmstart = vma->vm_start;
352                 }
353                 if (file) {
354                         ret = trace_seq_path(s, &file->f_path);
355                         if (ret)
356                                 ret = trace_seq_printf(s, "[+0x%lx]",
357                                                        ip - vmstart);
358                 }
359                 up_read(&mm->mmap_sem);
360         }
361         if (ret && ((sym_flags & TRACE_ITER_SYM_ADDR) || !file))
362                 ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
363         return ret;
364 }
365
366 int
367 seq_print_userip_objs(const struct userstack_entry *entry, struct trace_seq *s,
368                       unsigned long sym_flags)
369 {
370         struct mm_struct *mm = NULL;
371         int ret = 1;
372         unsigned int i;
373
374         if (trace_flags & TRACE_ITER_SYM_USEROBJ) {
375                 struct task_struct *task;
376                 /*
377                  * we do the lookup on the thread group leader,
378                  * since individual threads might have already quit!
379                  */
380                 rcu_read_lock();
381                 task = find_task_by_vpid(entry->ent.tgid);
382                 if (task)
383                         mm = get_task_mm(task);
384                 rcu_read_unlock();
385         }
386
387         for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
388                 unsigned long ip = entry->caller[i];
389
390                 if (ip == ULONG_MAX || !ret)
391                         break;
392                 if (ret)
393                         ret = trace_seq_puts(s, " => ");
394                 if (!ip) {
395                         if (ret)
396                                 ret = trace_seq_puts(s, "??");
397                         if (ret)
398                                 ret = trace_seq_puts(s, "\n");
399                         continue;
400                 }
401                 if (!ret)
402                         break;
403                 if (ret)
404                         ret = seq_print_user_ip(s, mm, ip, sym_flags);
405                 ret = trace_seq_puts(s, "\n");
406         }
407
408         if (mm)
409                 mmput(mm);
410         return ret;
411 }
412
413 int
414 seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
415 {
416         int ret;
417
418         if (!ip)
419                 return trace_seq_printf(s, "0");
420
421         if (sym_flags & TRACE_ITER_SYM_OFFSET)
422                 ret = seq_print_sym_offset(s, "%s", ip);
423         else
424                 ret = seq_print_sym_short(s, "%s", ip);
425
426         if (!ret)
427                 return 0;
428
429         if (sym_flags & TRACE_ITER_SYM_ADDR)
430                 ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
431         return ret;
432 }
433
434 static int
435 lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
436 {
437         int hardirq, softirq;
438         char comm[TASK_COMM_LEN];
439
440         trace_find_cmdline(entry->pid, comm);
441         hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
442         softirq = entry->flags & TRACE_FLAG_SOFTIRQ;
443
444         if (!trace_seq_printf(s, "%8.8s-%-5d %3d%c%c%c",
445                               comm, entry->pid, cpu,
446                               (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' :
447                                 (entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ?
448                                   'X' : '.',
449                               (entry->flags & TRACE_FLAG_NEED_RESCHED) ?
450                                 'N' : '.',
451                               (hardirq && softirq) ? 'H' :
452                                 hardirq ? 'h' : softirq ? 's' : '.'))
453                 return 0;
454
455         if (entry->preempt_count)
456                 return trace_seq_printf(s, "%x", entry->preempt_count);
457         return trace_seq_puts(s, ".");
458 }
459
460 static unsigned long preempt_mark_thresh = 100;
461
462 static int
463 lat_print_timestamp(struct trace_seq *s, u64 abs_usecs,
464                     unsigned long rel_usecs)
465 {
466         return trace_seq_printf(s, " %4lldus%c: ", abs_usecs,
467                                 rel_usecs > preempt_mark_thresh ? '!' :
468                                   rel_usecs > 1 ? '+' : ' ');
469 }
470
471 int trace_print_context(struct trace_iterator *iter)
472 {
473         struct trace_seq *s = &iter->seq;
474         struct trace_entry *entry = iter->ent;
475         unsigned long long t = ns2usecs(iter->ts);
476         unsigned long usec_rem = do_div(t, USEC_PER_SEC);
477         unsigned long secs = (unsigned long)t;
478         char comm[TASK_COMM_LEN];
479
480         trace_find_cmdline(entry->pid, comm);
481
482         return trace_seq_printf(s, "%16s-%-5d [%03d] %5lu.%06lu: ",
483                                 comm, entry->pid, iter->cpu, secs, usec_rem);
484 }
485
486 int trace_print_lat_context(struct trace_iterator *iter)
487 {
488         u64 next_ts;
489         int ret;
490         struct trace_seq *s = &iter->seq;
491         struct trace_entry *entry = iter->ent,
492                            *next_entry = trace_find_next_entry(iter, NULL,
493                                                                &next_ts);
494         unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE);
495         unsigned long abs_usecs = ns2usecs(iter->ts - iter->tr->time_start);
496         unsigned long rel_usecs;
497
498         if (!next_entry)
499                 next_ts = iter->ts;
500         rel_usecs = ns2usecs(next_ts - iter->ts);
501
502         if (verbose) {
503                 char comm[TASK_COMM_LEN];
504
505                 trace_find_cmdline(entry->pid, comm);
506
507                 ret = trace_seq_printf(s, "%16s %5d %3d %d %08x %08lx [%08llx]"
508                                        " %ld.%03ldms (+%ld.%03ldms): ", comm,
509                                        entry->pid, iter->cpu, entry->flags,
510                                        entry->preempt_count, iter->idx,
511                                        ns2usecs(iter->ts),
512                                        abs_usecs / USEC_PER_MSEC,
513                                        abs_usecs % USEC_PER_MSEC,
514                                        rel_usecs / USEC_PER_MSEC,
515                                        rel_usecs % USEC_PER_MSEC);
516         } else {
517                 ret = lat_print_generic(s, entry, iter->cpu);
518                 if (ret)
519                         ret = lat_print_timestamp(s, abs_usecs, rel_usecs);
520         }
521
522         return ret;
523 }
524
525 static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
526
527 static int task_state_char(unsigned long state)
528 {
529         int bit = state ? __ffs(state) + 1 : 0;
530
531         return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?';
532 }
533
534 /**
535  * ftrace_find_event - find a registered event
536  * @type: the type of event to look for
537  *
538  * Returns an event of type @type otherwise NULL
539  * Called with trace_event_read_lock() held.
540  */
541 struct trace_event *ftrace_find_event(int type)
542 {
543         struct trace_event *event;
544         struct hlist_node *n;
545         unsigned key;
546
547         key = type & (EVENT_HASHSIZE - 1);
548
549         hlist_for_each_entry(event, n, &event_hash[key], node) {
550                 if (event->type == type)
551                         return event;
552         }
553
554         return NULL;
555 }
556
557 static LIST_HEAD(ftrace_event_list);
558
559 static int trace_search_list(struct list_head **list)
560 {
561         struct trace_event *e;
562         int last = __TRACE_LAST_TYPE;
563
564         if (list_empty(&ftrace_event_list)) {
565                 *list = &ftrace_event_list;
566                 return last + 1;
567         }
568
569         /*
570          * We used up all possible max events,
571          * lets see if somebody freed one.
572          */
573         list_for_each_entry(e, &ftrace_event_list, list) {
574                 if (e->type != last + 1)
575                         break;
576                 last++;
577         }
578
579         /* Did we used up all 65 thousand events??? */
580         if ((last + 1) > FTRACE_MAX_EVENT)
581                 return 0;
582
583         *list = &e->list;
584         return last + 1;
585 }
586
587 void trace_event_read_lock(void)
588 {
589         down_read(&trace_event_mutex);
590 }
591
592 void trace_event_read_unlock(void)
593 {
594         up_read(&trace_event_mutex);
595 }
596
597 /**
598  * register_ftrace_event - register output for an event type
599  * @event: the event type to register
600  *
601  * Event types are stored in a hash and this hash is used to
602  * find a way to print an event. If the @event->type is set
603  * then it will use that type, otherwise it will assign a
604  * type to use.
605  *
606  * If you assign your own type, please make sure it is added
607  * to the trace_type enum in trace.h, to avoid collisions
608  * with the dynamic types.
609  *
610  * Returns the event type number or zero on error.
611  */
612 int register_ftrace_event(struct trace_event *event)
613 {
614         unsigned key;
615         int ret = 0;
616
617         down_write(&trace_event_mutex);
618
619         if (WARN_ON(!event))
620                 goto out;
621
622         INIT_LIST_HEAD(&event->list);
623
624         if (!event->type) {
625                 struct list_head *list = NULL;
626
627                 if (next_event_type > FTRACE_MAX_EVENT) {
628
629                         event->type = trace_search_list(&list);
630                         if (!event->type)
631                                 goto out;
632
633                 } else {
634                         
635                         event->type = next_event_type++;
636                         list = &ftrace_event_list;
637                 }
638
639                 if (WARN_ON(ftrace_find_event(event->type)))
640                         goto out;
641
642                 list_add_tail(&event->list, list);
643
644         } else if (event->type > __TRACE_LAST_TYPE) {
645                 printk(KERN_WARNING "Need to add type to trace.h\n");
646                 WARN_ON(1);
647                 goto out;
648         } else {
649                 /* Is this event already used */
650                 if (ftrace_find_event(event->type))
651                         goto out;
652         }
653
654         if (event->trace == NULL)
655                 event->trace = trace_nop_print;
656         if (event->raw == NULL)
657                 event->raw = trace_nop_print;
658         if (event->hex == NULL)
659                 event->hex = trace_nop_print;
660         if (event->binary == NULL)
661                 event->binary = trace_nop_print;
662
663         key = event->type & (EVENT_HASHSIZE - 1);
664
665         hlist_add_head(&event->node, &event_hash[key]);
666
667         ret = event->type;
668  out:
669         up_write(&trace_event_mutex);
670
671         return ret;
672 }
673 EXPORT_SYMBOL_GPL(register_ftrace_event);
674
675 /**
676  * unregister_ftrace_event - remove a no longer used event
677  * @event: the event to remove
678  */
679 int unregister_ftrace_event(struct trace_event *event)
680 {
681         down_write(&trace_event_mutex);
682         hlist_del(&event->node);
683         list_del(&event->list);
684         up_write(&trace_event_mutex);
685
686         return 0;
687 }
688 EXPORT_SYMBOL_GPL(unregister_ftrace_event);
689
690 /*
691  * Standard events
692  */
693
694 enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags)
695 {
696         return TRACE_TYPE_HANDLED;
697 }
698
699 /* TRACE_FN */
700 static enum print_line_t trace_fn_trace(struct trace_iterator *iter, int flags)
701 {
702         struct ftrace_entry *field;
703         struct trace_seq *s = &iter->seq;
704
705         trace_assign_type(field, iter->ent);
706
707         if (!seq_print_ip_sym(s, field->ip, flags))
708                 goto partial;
709
710         if ((flags & TRACE_ITER_PRINT_PARENT) && field->parent_ip) {
711                 if (!trace_seq_printf(s, " <-"))
712                         goto partial;
713                 if (!seq_print_ip_sym(s,
714                                       field->parent_ip,
715                                       flags))
716                         goto partial;
717         }
718         if (!trace_seq_printf(s, "\n"))
719                 goto partial;
720
721         return TRACE_TYPE_HANDLED;
722
723  partial:
724         return TRACE_TYPE_PARTIAL_LINE;
725 }
726
727 static enum print_line_t trace_fn_raw(struct trace_iterator *iter, int flags)
728 {
729         struct ftrace_entry *field;
730
731         trace_assign_type(field, iter->ent);
732
733         if (!trace_seq_printf(&iter->seq, "%lx %lx\n",
734                               field->ip,
735                               field->parent_ip))
736                 return TRACE_TYPE_PARTIAL_LINE;
737
738         return TRACE_TYPE_HANDLED;
739 }
740
741 static enum print_line_t trace_fn_hex(struct trace_iterator *iter, int flags)
742 {
743         struct ftrace_entry *field;
744         struct trace_seq *s = &iter->seq;
745
746         trace_assign_type(field, iter->ent);
747
748         SEQ_PUT_HEX_FIELD_RET(s, field->ip);
749         SEQ_PUT_HEX_FIELD_RET(s, field->parent_ip);
750
751         return TRACE_TYPE_HANDLED;
752 }
753
754 static enum print_line_t trace_fn_bin(struct trace_iterator *iter, int flags)
755 {
756         struct ftrace_entry *field;
757         struct trace_seq *s = &iter->seq;
758
759         trace_assign_type(field, iter->ent);
760
761         SEQ_PUT_FIELD_RET(s, field->ip);
762         SEQ_PUT_FIELD_RET(s, field->parent_ip);
763
764         return TRACE_TYPE_HANDLED;
765 }
766
767 static struct trace_event trace_fn_event = {
768         .type           = TRACE_FN,
769         .trace          = trace_fn_trace,
770         .raw            = trace_fn_raw,
771         .hex            = trace_fn_hex,
772         .binary         = trace_fn_bin,
773 };
774
775 /* TRACE_CTX an TRACE_WAKE */
776 static enum print_line_t trace_ctxwake_print(struct trace_iterator *iter,
777                                              char *delim)
778 {
779         struct ctx_switch_entry *field;
780         char comm[TASK_COMM_LEN];
781         int S, T;
782
783
784         trace_assign_type(field, iter->ent);
785
786         T = task_state_char(field->next_state);
787         S = task_state_char(field->prev_state);
788         trace_find_cmdline(field->next_pid, comm);
789         if (!trace_seq_printf(&iter->seq,
790                               " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n",
791                               field->prev_pid,
792                               field->prev_prio,
793                               S, delim,
794                               field->next_cpu,
795                               field->next_pid,
796                               field->next_prio,
797                               T, comm))
798                 return TRACE_TYPE_PARTIAL_LINE;
799
800         return TRACE_TYPE_HANDLED;
801 }
802
803 static enum print_line_t trace_ctx_print(struct trace_iterator *iter, int flags)
804 {
805         return trace_ctxwake_print(iter, "==>");
806 }
807
808 static enum print_line_t trace_wake_print(struct trace_iterator *iter,
809                                           int flags)
810 {
811         return trace_ctxwake_print(iter, "  +");
812 }
813
814 static int trace_ctxwake_raw(struct trace_iterator *iter, char S)
815 {
816         struct ctx_switch_entry *field;
817         int T;
818
819         trace_assign_type(field, iter->ent);
820
821         if (!S)
822                 task_state_char(field->prev_state);
823         T = task_state_char(field->next_state);
824         if (!trace_seq_printf(&iter->seq, "%d %d %c %d %d %d %c\n",
825                               field->prev_pid,
826                               field->prev_prio,
827                               S,
828                               field->next_cpu,
829                               field->next_pid,
830                               field->next_prio,
831                               T))
832                 return TRACE_TYPE_PARTIAL_LINE;
833
834         return TRACE_TYPE_HANDLED;
835 }
836
837 static enum print_line_t trace_ctx_raw(struct trace_iterator *iter, int flags)
838 {
839         return trace_ctxwake_raw(iter, 0);
840 }
841
842 static enum print_line_t trace_wake_raw(struct trace_iterator *iter, int flags)
843 {
844         return trace_ctxwake_raw(iter, '+');
845 }
846
847
848 static int trace_ctxwake_hex(struct trace_iterator *iter, char S)
849 {
850         struct ctx_switch_entry *field;
851         struct trace_seq *s = &iter->seq;
852         int T;
853
854         trace_assign_type(field, iter->ent);
855
856         if (!S)
857                 task_state_char(field->prev_state);
858         T = task_state_char(field->next_state);
859
860         SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid);
861         SEQ_PUT_HEX_FIELD_RET(s, field->prev_prio);
862         SEQ_PUT_HEX_FIELD_RET(s, S);
863         SEQ_PUT_HEX_FIELD_RET(s, field->next_cpu);
864         SEQ_PUT_HEX_FIELD_RET(s, field->next_pid);
865         SEQ_PUT_HEX_FIELD_RET(s, field->next_prio);
866         SEQ_PUT_HEX_FIELD_RET(s, T);
867
868         return TRACE_TYPE_HANDLED;
869 }
870
871 static enum print_line_t trace_ctx_hex(struct trace_iterator *iter, int flags)
872 {
873         return trace_ctxwake_hex(iter, 0);
874 }
875
876 static enum print_line_t trace_wake_hex(struct trace_iterator *iter, int flags)
877 {
878         return trace_ctxwake_hex(iter, '+');
879 }
880
881 static enum print_line_t trace_ctxwake_bin(struct trace_iterator *iter,
882                                            int flags)
883 {
884         struct ctx_switch_entry *field;
885         struct trace_seq *s = &iter->seq;
886
887         trace_assign_type(field, iter->ent);
888
889         SEQ_PUT_FIELD_RET(s, field->prev_pid);
890         SEQ_PUT_FIELD_RET(s, field->prev_prio);
891         SEQ_PUT_FIELD_RET(s, field->prev_state);
892         SEQ_PUT_FIELD_RET(s, field->next_pid);
893         SEQ_PUT_FIELD_RET(s, field->next_prio);
894         SEQ_PUT_FIELD_RET(s, field->next_state);
895
896         return TRACE_TYPE_HANDLED;
897 }
898
899 static struct trace_event trace_ctx_event = {
900         .type           = TRACE_CTX,
901         .trace          = trace_ctx_print,
902         .raw            = trace_ctx_raw,
903         .hex            = trace_ctx_hex,
904         .binary         = trace_ctxwake_bin,
905 };
906
907 static struct trace_event trace_wake_event = {
908         .type           = TRACE_WAKE,
909         .trace          = trace_wake_print,
910         .raw            = trace_wake_raw,
911         .hex            = trace_wake_hex,
912         .binary         = trace_ctxwake_bin,
913 };
914
915 /* TRACE_SPECIAL */
916 static enum print_line_t trace_special_print(struct trace_iterator *iter,
917                                              int flags)
918 {
919         struct special_entry *field;
920
921         trace_assign_type(field, iter->ent);
922
923         if (!trace_seq_printf(&iter->seq, "# %ld %ld %ld\n",
924                               field->arg1,
925                               field->arg2,
926                               field->arg3))
927                 return TRACE_TYPE_PARTIAL_LINE;
928
929         return TRACE_TYPE_HANDLED;
930 }
931
932 static enum print_line_t trace_special_hex(struct trace_iterator *iter,
933                                            int flags)
934 {
935         struct special_entry *field;
936         struct trace_seq *s = &iter->seq;
937
938         trace_assign_type(field, iter->ent);
939
940         SEQ_PUT_HEX_FIELD_RET(s, field->arg1);
941         SEQ_PUT_HEX_FIELD_RET(s, field->arg2);
942         SEQ_PUT_HEX_FIELD_RET(s, field->arg3);
943
944         return TRACE_TYPE_HANDLED;
945 }
946
947 static enum print_line_t trace_special_bin(struct trace_iterator *iter,
948                                            int flags)
949 {
950         struct special_entry *field;
951         struct trace_seq *s = &iter->seq;
952
953         trace_assign_type(field, iter->ent);
954
955         SEQ_PUT_FIELD_RET(s, field->arg1);
956         SEQ_PUT_FIELD_RET(s, field->arg2);
957         SEQ_PUT_FIELD_RET(s, field->arg3);
958
959         return TRACE_TYPE_HANDLED;
960 }
961
962 static struct trace_event trace_special_event = {
963         .type           = TRACE_SPECIAL,
964         .trace          = trace_special_print,
965         .raw            = trace_special_print,
966         .hex            = trace_special_hex,
967         .binary         = trace_special_bin,
968 };
969
970 /* TRACE_STACK */
971
972 static enum print_line_t trace_stack_print(struct trace_iterator *iter,
973                                            int flags)
974 {
975         struct stack_entry *field;
976         struct trace_seq *s = &iter->seq;
977         int i;
978
979         trace_assign_type(field, iter->ent);
980
981         if (!trace_seq_puts(s, "\n"))
982                 goto partial;
983         for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
984                 if (!field->caller[i] || (field->caller[i] == ULONG_MAX))
985                         break;
986                 if (!trace_seq_puts(s, " => "))
987                         goto partial;
988
989                 if (!seq_print_ip_sym(s, field->caller[i], flags))
990                         goto partial;
991                 if (!trace_seq_puts(s, "\n"))
992                         goto partial;
993         }
994
995         return TRACE_TYPE_HANDLED;
996
997  partial:
998         return TRACE_TYPE_PARTIAL_LINE;
999 }
1000
1001 static struct trace_event trace_stack_event = {
1002         .type           = TRACE_STACK,
1003         .trace          = trace_stack_print,
1004         .raw            = trace_special_print,
1005         .hex            = trace_special_hex,
1006         .binary         = trace_special_bin,
1007 };
1008
1009 /* TRACE_USER_STACK */
1010 static enum print_line_t trace_user_stack_print(struct trace_iterator *iter,
1011                                                 int flags)
1012 {
1013         struct userstack_entry *field;
1014         struct trace_seq *s = &iter->seq;
1015
1016         trace_assign_type(field, iter->ent);
1017
1018         if (!trace_seq_putc(s, '\n'))
1019                 goto partial;
1020
1021         if (!seq_print_userip_objs(field, s, flags))
1022                 goto partial;
1023
1024         return TRACE_TYPE_HANDLED;
1025
1026  partial:
1027         return TRACE_TYPE_PARTIAL_LINE;
1028 }
1029
1030 static struct trace_event trace_user_stack_event = {
1031         .type           = TRACE_USER_STACK,
1032         .trace          = trace_user_stack_print,
1033         .raw            = trace_special_print,
1034         .hex            = trace_special_hex,
1035         .binary         = trace_special_bin,
1036 };
1037
1038 /* TRACE_BPRINT */
1039 static enum print_line_t
1040 trace_bprint_print(struct trace_iterator *iter, int flags)
1041 {
1042         struct trace_entry *entry = iter->ent;
1043         struct trace_seq *s = &iter->seq;
1044         struct bprint_entry *field;
1045
1046         trace_assign_type(field, entry);
1047
1048         if (!seq_print_ip_sym(s, field->ip, flags))
1049                 goto partial;
1050
1051         if (!trace_seq_puts(s, ": "))
1052                 goto partial;
1053
1054         if (!trace_seq_bprintf(s, field->fmt, field->buf))
1055                 goto partial;
1056
1057         return TRACE_TYPE_HANDLED;
1058
1059  partial:
1060         return TRACE_TYPE_PARTIAL_LINE;
1061 }
1062
1063
1064 static enum print_line_t
1065 trace_bprint_raw(struct trace_iterator *iter, int flags)
1066 {
1067         struct bprint_entry *field;
1068         struct trace_seq *s = &iter->seq;
1069
1070         trace_assign_type(field, iter->ent);
1071
1072         if (!trace_seq_printf(s, ": %lx : ", field->ip))
1073                 goto partial;
1074
1075         if (!trace_seq_bprintf(s, field->fmt, field->buf))
1076                 goto partial;
1077
1078         return TRACE_TYPE_HANDLED;
1079
1080  partial:
1081         return TRACE_TYPE_PARTIAL_LINE;
1082 }
1083
1084
1085 static struct trace_event trace_bprint_event = {
1086         .type           = TRACE_BPRINT,
1087         .trace          = trace_bprint_print,
1088         .raw            = trace_bprint_raw,
1089 };
1090
1091 /* TRACE_PRINT */
1092 static enum print_line_t trace_print_print(struct trace_iterator *iter,
1093                                            int flags)
1094 {
1095         struct print_entry *field;
1096         struct trace_seq *s = &iter->seq;
1097
1098         trace_assign_type(field, iter->ent);
1099
1100         if (!seq_print_ip_sym(s, field->ip, flags))
1101                 goto partial;
1102
1103         if (!trace_seq_printf(s, ": %s", field->buf))
1104                 goto partial;
1105
1106         return TRACE_TYPE_HANDLED;
1107
1108  partial:
1109         return TRACE_TYPE_PARTIAL_LINE;
1110 }
1111
1112 static enum print_line_t trace_print_raw(struct trace_iterator *iter, int flags)
1113 {
1114         struct print_entry *field;
1115
1116         trace_assign_type(field, iter->ent);
1117
1118         if (!trace_seq_printf(&iter->seq, "# %lx %s", field->ip, field->buf))
1119                 goto partial;
1120
1121         return TRACE_TYPE_HANDLED;
1122
1123  partial:
1124         return TRACE_TYPE_PARTIAL_LINE;
1125 }
1126
1127 static struct trace_event trace_print_event = {
1128         .type           = TRACE_PRINT,
1129         .trace          = trace_print_print,
1130         .raw            = trace_print_raw,
1131 };
1132
1133
1134 static struct trace_event *events[] __initdata = {
1135         &trace_fn_event,
1136         &trace_ctx_event,
1137         &trace_wake_event,
1138         &trace_special_event,
1139         &trace_stack_event,
1140         &trace_user_stack_event,
1141         &trace_bprint_event,
1142         &trace_print_event,
1143         NULL
1144 };
1145
1146 __init static int init_events(void)
1147 {
1148         struct trace_event *event;
1149         int i, ret;
1150
1151         for (i = 0; events[i]; i++) {
1152                 event = events[i];
1153
1154                 ret = register_ftrace_event(event);
1155                 if (!ret) {
1156                         printk(KERN_WARNING "event %d failed to register\n",
1157                                event->type);
1158                         WARN_ON_ONCE(1);
1159                 }
1160         }
1161
1162         return 0;
1163 }
1164 device_initcall(init_events);