tracing: add annotation to what type of stack trace is recorded
[linux-2.6] / kernel / trace / trace_output.c
1 /*
2  * trace_output.c
3  *
4  * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
5  *
6  */
7
8 #include <linux/module.h>
9 #include <linux/mutex.h>
10 #include <linux/ftrace.h>
11
12 #include "trace_output.h"
13
14 /* must be a power of 2 */
15 #define EVENT_HASHSIZE  128
16
17 static DECLARE_RWSEM(trace_event_mutex);
18
19 DEFINE_PER_CPU(struct trace_seq, ftrace_event_seq);
20 EXPORT_PER_CPU_SYMBOL(ftrace_event_seq);
21
22 static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly;
23
24 static int next_event_type = __TRACE_LAST_TYPE + 1;
25
26 void trace_print_seq(struct seq_file *m, struct trace_seq *s)
27 {
28         int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len;
29
30         s->buffer[len] = 0;
31         seq_puts(m, s->buffer);
32
33         trace_seq_init(s);
34 }
35
36 enum print_line_t trace_print_bprintk_msg_only(struct trace_iterator *iter)
37 {
38         struct trace_seq *s = &iter->seq;
39         struct trace_entry *entry = iter->ent;
40         struct bprint_entry *field;
41         int ret;
42
43         trace_assign_type(field, entry);
44
45         ret = trace_seq_bprintf(s, field->fmt, field->buf);
46         if (!ret)
47                 return TRACE_TYPE_PARTIAL_LINE;
48
49         return TRACE_TYPE_HANDLED;
50 }
51
52 enum print_line_t trace_print_printk_msg_only(struct trace_iterator *iter)
53 {
54         struct trace_seq *s = &iter->seq;
55         struct trace_entry *entry = iter->ent;
56         struct print_entry *field;
57         int ret;
58
59         trace_assign_type(field, entry);
60
61         ret = trace_seq_printf(s, "%s", field->buf);
62         if (!ret)
63                 return TRACE_TYPE_PARTIAL_LINE;
64
65         return TRACE_TYPE_HANDLED;
66 }
67
68 /**
69  * trace_seq_printf - sequence printing of trace information
70  * @s: trace sequence descriptor
71  * @fmt: printf format string
72  *
73  * The tracer may use either sequence operations or its own
74  * copy to user routines. To simplify formating of a trace
75  * trace_seq_printf is used to store strings into a special
76  * buffer (@s). Then the output may be either used by
77  * the sequencer or pulled into another buffer.
78  */
79 int
80 trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
81 {
82         int len = (PAGE_SIZE - 1) - s->len;
83         va_list ap;
84         int ret;
85
86         if (!len)
87                 return 0;
88
89         va_start(ap, fmt);
90         ret = vsnprintf(s->buffer + s->len, len, fmt, ap);
91         va_end(ap);
92
93         /* If we can't write it all, don't bother writing anything */
94         if (ret >= len)
95                 return 0;
96
97         s->len += ret;
98
99         return len;
100 }
101 EXPORT_SYMBOL_GPL(trace_seq_printf);
102
103 int trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary)
104 {
105         int len = (PAGE_SIZE - 1) - s->len;
106         int ret;
107
108         if (!len)
109                 return 0;
110
111         ret = bstr_printf(s->buffer + s->len, len, fmt, binary);
112
113         /* If we can't write it all, don't bother writing anything */
114         if (ret >= len)
115                 return 0;
116
117         s->len += ret;
118
119         return len;
120 }
121
122 /**
123  * trace_seq_puts - trace sequence printing of simple string
124  * @s: trace sequence descriptor
125  * @str: simple string to record
126  *
127  * The tracer may use either the sequence operations or its own
128  * copy to user routines. This function records a simple string
129  * into a special buffer (@s) for later retrieval by a sequencer
130  * or other mechanism.
131  */
132 int trace_seq_puts(struct trace_seq *s, const char *str)
133 {
134         int len = strlen(str);
135
136         if (len > ((PAGE_SIZE - 1) - s->len))
137                 return 0;
138
139         memcpy(s->buffer + s->len, str, len);
140         s->len += len;
141
142         return len;
143 }
144
145 int trace_seq_putc(struct trace_seq *s, unsigned char c)
146 {
147         if (s->len >= (PAGE_SIZE - 1))
148                 return 0;
149
150         s->buffer[s->len++] = c;
151
152         return 1;
153 }
154
155 int trace_seq_putmem(struct trace_seq *s, const void *mem, size_t len)
156 {
157         if (len > ((PAGE_SIZE - 1) - s->len))
158                 return 0;
159
160         memcpy(s->buffer + s->len, mem, len);
161         s->len += len;
162
163         return len;
164 }
165
166 int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, size_t len)
167 {
168         unsigned char hex[HEX_CHARS];
169         const unsigned char *data = mem;
170         int i, j;
171
172 #ifdef __BIG_ENDIAN
173         for (i = 0, j = 0; i < len; i++) {
174 #else
175         for (i = len-1, j = 0; i >= 0; i--) {
176 #endif
177                 hex[j++] = hex_asc_hi(data[i]);
178                 hex[j++] = hex_asc_lo(data[i]);
179         }
180         hex[j++] = ' ';
181
182         return trace_seq_putmem(s, hex, j);
183 }
184
185 void *trace_seq_reserve(struct trace_seq *s, size_t len)
186 {
187         void *ret;
188
189         if (len > ((PAGE_SIZE - 1) - s->len))
190                 return NULL;
191
192         ret = s->buffer + s->len;
193         s->len += len;
194
195         return ret;
196 }
197
198 int trace_seq_path(struct trace_seq *s, struct path *path)
199 {
200         unsigned char *p;
201
202         if (s->len >= (PAGE_SIZE - 1))
203                 return 0;
204         p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
205         if (!IS_ERR(p)) {
206                 p = mangle_path(s->buffer + s->len, p, "\n");
207                 if (p) {
208                         s->len = p - s->buffer;
209                         return 1;
210                 }
211         } else {
212                 s->buffer[s->len++] = '?';
213                 return 1;
214         }
215
216         return 0;
217 }
218
219 const char *
220 ftrace_print_flags_seq(struct trace_seq *p, const char *delim,
221                        unsigned long flags,
222                        const struct trace_print_flags *flag_array)
223 {
224         unsigned long mask;
225         const char *str;
226         const char *ret = p->buffer + p->len;
227         int i;
228
229         for (i = 0;  flag_array[i].name && flags; i++) {
230
231                 mask = flag_array[i].mask;
232                 if ((flags & mask) != mask)
233                         continue;
234
235                 str = flag_array[i].name;
236                 flags &= ~mask;
237                 if (p->len && delim)
238                         trace_seq_puts(p, delim);
239                 trace_seq_puts(p, str);
240         }
241
242         /* check for left over flags */
243         if (flags) {
244                 if (p->len && delim)
245                         trace_seq_puts(p, delim);
246                 trace_seq_printf(p, "0x%lx", flags);
247         }
248
249         trace_seq_putc(p, 0);
250
251         return ret;
252 }
253 EXPORT_SYMBOL(ftrace_print_flags_seq);
254
255 const char *
256 ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val,
257                          const struct trace_print_flags *symbol_array)
258 {
259         int i;
260         const char *ret = p->buffer + p->len;
261
262         for (i = 0;  symbol_array[i].name; i++) {
263
264                 if (val != symbol_array[i].mask)
265                         continue;
266
267                 trace_seq_puts(p, symbol_array[i].name);
268                 break;
269         }
270
271         if (!p->len)
272                 trace_seq_printf(p, "0x%lx", val);
273                 
274         trace_seq_putc(p, 0);
275
276         return ret;
277 }
278 EXPORT_SYMBOL(ftrace_print_symbols_seq);
279
280 #ifdef CONFIG_KRETPROBES
281 static inline const char *kretprobed(const char *name)
282 {
283         static const char tramp_name[] = "kretprobe_trampoline";
284         int size = sizeof(tramp_name);
285
286         if (strncmp(tramp_name, name, size) == 0)
287                 return "[unknown/kretprobe'd]";
288         return name;
289 }
290 #else
291 static inline const char *kretprobed(const char *name)
292 {
293         return name;
294 }
295 #endif /* CONFIG_KRETPROBES */
296
297 static int
298 seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address)
299 {
300 #ifdef CONFIG_KALLSYMS
301         char str[KSYM_SYMBOL_LEN];
302         const char *name;
303
304         kallsyms_lookup(address, NULL, NULL, NULL, str);
305
306         name = kretprobed(str);
307
308         return trace_seq_printf(s, fmt, name);
309 #endif
310         return 1;
311 }
312
313 static int
314 seq_print_sym_offset(struct trace_seq *s, const char *fmt,
315                      unsigned long address)
316 {
317 #ifdef CONFIG_KALLSYMS
318         char str[KSYM_SYMBOL_LEN];
319         const char *name;
320
321         sprint_symbol(str, address);
322         name = kretprobed(str);
323
324         return trace_seq_printf(s, fmt, name);
325 #endif
326         return 1;
327 }
328
329 #ifndef CONFIG_64BIT
330 # define IP_FMT "%08lx"
331 #else
332 # define IP_FMT "%016lx"
333 #endif
334
335 int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm,
336                       unsigned long ip, unsigned long sym_flags)
337 {
338         struct file *file = NULL;
339         unsigned long vmstart = 0;
340         int ret = 1;
341
342         if (mm) {
343                 const struct vm_area_struct *vma;
344
345                 down_read(&mm->mmap_sem);
346                 vma = find_vma(mm, ip);
347                 if (vma) {
348                         file = vma->vm_file;
349                         vmstart = vma->vm_start;
350                 }
351                 if (file) {
352                         ret = trace_seq_path(s, &file->f_path);
353                         if (ret)
354                                 ret = trace_seq_printf(s, "[+0x%lx]",
355                                                        ip - vmstart);
356                 }
357                 up_read(&mm->mmap_sem);
358         }
359         if (ret && ((sym_flags & TRACE_ITER_SYM_ADDR) || !file))
360                 ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
361         return ret;
362 }
363
364 int
365 seq_print_userip_objs(const struct userstack_entry *entry, struct trace_seq *s,
366                       unsigned long sym_flags)
367 {
368         struct mm_struct *mm = NULL;
369         int ret = 1;
370         unsigned int i;
371
372         if (trace_flags & TRACE_ITER_SYM_USEROBJ) {
373                 struct task_struct *task;
374                 /*
375                  * we do the lookup on the thread group leader,
376                  * since individual threads might have already quit!
377                  */
378                 rcu_read_lock();
379                 task = find_task_by_vpid(entry->ent.tgid);
380                 if (task)
381                         mm = get_task_mm(task);
382                 rcu_read_unlock();
383         }
384
385         for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
386                 unsigned long ip = entry->caller[i];
387
388                 if (ip == ULONG_MAX || !ret)
389                         break;
390                 if (ret)
391                         ret = trace_seq_puts(s, " => ");
392                 if (!ip) {
393                         if (ret)
394                                 ret = trace_seq_puts(s, "??");
395                         if (ret)
396                                 ret = trace_seq_puts(s, "\n");
397                         continue;
398                 }
399                 if (!ret)
400                         break;
401                 if (ret)
402                         ret = seq_print_user_ip(s, mm, ip, sym_flags);
403                 ret = trace_seq_puts(s, "\n");
404         }
405
406         if (mm)
407                 mmput(mm);
408         return ret;
409 }
410
411 int
412 seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
413 {
414         int ret;
415
416         if (!ip)
417                 return trace_seq_printf(s, "0");
418
419         if (sym_flags & TRACE_ITER_SYM_OFFSET)
420                 ret = seq_print_sym_offset(s, "%s", ip);
421         else
422                 ret = seq_print_sym_short(s, "%s", ip);
423
424         if (!ret)
425                 return 0;
426
427         if (sym_flags & TRACE_ITER_SYM_ADDR)
428                 ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
429         return ret;
430 }
431
432 static int
433 lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
434 {
435         int hardirq, softirq;
436         char comm[TASK_COMM_LEN];
437
438         trace_find_cmdline(entry->pid, comm);
439         hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
440         softirq = entry->flags & TRACE_FLAG_SOFTIRQ;
441
442         if (!trace_seq_printf(s, "%8.8s-%-5d %3d%c%c%c",
443                               comm, entry->pid, cpu,
444                               (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' :
445                                 (entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ?
446                                   'X' : '.',
447                               (entry->flags & TRACE_FLAG_NEED_RESCHED) ?
448                                 'N' : '.',
449                               (hardirq && softirq) ? 'H' :
450                                 hardirq ? 'h' : softirq ? 's' : '.'))
451                 return 0;
452
453         if (entry->preempt_count)
454                 return trace_seq_printf(s, "%x", entry->preempt_count);
455         return trace_seq_puts(s, ".");
456 }
457
458 static unsigned long preempt_mark_thresh = 100;
459
460 static int
461 lat_print_timestamp(struct trace_seq *s, u64 abs_usecs,
462                     unsigned long rel_usecs)
463 {
464         return trace_seq_printf(s, " %4lldus%c: ", abs_usecs,
465                                 rel_usecs > preempt_mark_thresh ? '!' :
466                                   rel_usecs > 1 ? '+' : ' ');
467 }
468
469 int trace_print_context(struct trace_iterator *iter)
470 {
471         struct trace_seq *s = &iter->seq;
472         struct trace_entry *entry = iter->ent;
473         unsigned long long t = ns2usecs(iter->ts);
474         unsigned long usec_rem = do_div(t, USEC_PER_SEC);
475         unsigned long secs = (unsigned long)t;
476         char comm[TASK_COMM_LEN];
477
478         trace_find_cmdline(entry->pid, comm);
479
480         return trace_seq_printf(s, "%16s-%-5d [%03d] %5lu.%06lu: ",
481                                 comm, entry->pid, iter->cpu, secs, usec_rem);
482 }
483
484 int trace_print_lat_context(struct trace_iterator *iter)
485 {
486         u64 next_ts;
487         int ret;
488         struct trace_seq *s = &iter->seq;
489         struct trace_entry *entry = iter->ent,
490                            *next_entry = trace_find_next_entry(iter, NULL,
491                                                                &next_ts);
492         unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE);
493         unsigned long abs_usecs = ns2usecs(iter->ts - iter->tr->time_start);
494         unsigned long rel_usecs;
495
496         if (!next_entry)
497                 next_ts = iter->ts;
498         rel_usecs = ns2usecs(next_ts - iter->ts);
499
500         if (verbose) {
501                 char comm[TASK_COMM_LEN];
502
503                 trace_find_cmdline(entry->pid, comm);
504
505                 ret = trace_seq_printf(s, "%16s %5d %3d %d %08x %08lx [%08llx]"
506                                        " %ld.%03ldms (+%ld.%03ldms): ", comm,
507                                        entry->pid, iter->cpu, entry->flags,
508                                        entry->preempt_count, iter->idx,
509                                        ns2usecs(iter->ts),
510                                        abs_usecs / USEC_PER_MSEC,
511                                        abs_usecs % USEC_PER_MSEC,
512                                        rel_usecs / USEC_PER_MSEC,
513                                        rel_usecs % USEC_PER_MSEC);
514         } else {
515                 ret = lat_print_generic(s, entry, iter->cpu);
516                 if (ret)
517                         ret = lat_print_timestamp(s, abs_usecs, rel_usecs);
518         }
519
520         return ret;
521 }
522
523 static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
524
525 static int task_state_char(unsigned long state)
526 {
527         int bit = state ? __ffs(state) + 1 : 0;
528
529         return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?';
530 }
531
532 /**
533  * ftrace_find_event - find a registered event
534  * @type: the type of event to look for
535  *
536  * Returns an event of type @type otherwise NULL
537  * Called with trace_event_read_lock() held.
538  */
539 struct trace_event *ftrace_find_event(int type)
540 {
541         struct trace_event *event;
542         struct hlist_node *n;
543         unsigned key;
544
545         key = type & (EVENT_HASHSIZE - 1);
546
547         hlist_for_each_entry(event, n, &event_hash[key], node) {
548                 if (event->type == type)
549                         return event;
550         }
551
552         return NULL;
553 }
554
555 static LIST_HEAD(ftrace_event_list);
556
557 static int trace_search_list(struct list_head **list)
558 {
559         struct trace_event *e;
560         int last = __TRACE_LAST_TYPE;
561
562         if (list_empty(&ftrace_event_list)) {
563                 *list = &ftrace_event_list;
564                 return last + 1;
565         }
566
567         /*
568          * We used up all possible max events,
569          * lets see if somebody freed one.
570          */
571         list_for_each_entry(e, &ftrace_event_list, list) {
572                 if (e->type != last + 1)
573                         break;
574                 last++;
575         }
576
577         /* Did we used up all 65 thousand events??? */
578         if ((last + 1) > FTRACE_MAX_EVENT)
579                 return 0;
580
581         *list = &e->list;
582         return last + 1;
583 }
584
585 void trace_event_read_lock(void)
586 {
587         down_read(&trace_event_mutex);
588 }
589
590 void trace_event_read_unlock(void)
591 {
592         up_read(&trace_event_mutex);
593 }
594
595 /**
596  * register_ftrace_event - register output for an event type
597  * @event: the event type to register
598  *
599  * Event types are stored in a hash and this hash is used to
600  * find a way to print an event. If the @event->type is set
601  * then it will use that type, otherwise it will assign a
602  * type to use.
603  *
604  * If you assign your own type, please make sure it is added
605  * to the trace_type enum in trace.h, to avoid collisions
606  * with the dynamic types.
607  *
608  * Returns the event type number or zero on error.
609  */
610 int register_ftrace_event(struct trace_event *event)
611 {
612         unsigned key;
613         int ret = 0;
614
615         down_write(&trace_event_mutex);
616
617         if (WARN_ON(!event))
618                 goto out;
619
620         INIT_LIST_HEAD(&event->list);
621
622         if (!event->type) {
623                 struct list_head *list = NULL;
624
625                 if (next_event_type > FTRACE_MAX_EVENT) {
626
627                         event->type = trace_search_list(&list);
628                         if (!event->type)
629                                 goto out;
630
631                 } else {
632                         
633                         event->type = next_event_type++;
634                         list = &ftrace_event_list;
635                 }
636
637                 if (WARN_ON(ftrace_find_event(event->type)))
638                         goto out;
639
640                 list_add_tail(&event->list, list);
641
642         } else if (event->type > __TRACE_LAST_TYPE) {
643                 printk(KERN_WARNING "Need to add type to trace.h\n");
644                 WARN_ON(1);
645                 goto out;
646         } else {
647                 /* Is this event already used */
648                 if (ftrace_find_event(event->type))
649                         goto out;
650         }
651
652         if (event->trace == NULL)
653                 event->trace = trace_nop_print;
654         if (event->raw == NULL)
655                 event->raw = trace_nop_print;
656         if (event->hex == NULL)
657                 event->hex = trace_nop_print;
658         if (event->binary == NULL)
659                 event->binary = trace_nop_print;
660
661         key = event->type & (EVENT_HASHSIZE - 1);
662
663         hlist_add_head(&event->node, &event_hash[key]);
664
665         ret = event->type;
666  out:
667         up_write(&trace_event_mutex);
668
669         return ret;
670 }
671 EXPORT_SYMBOL_GPL(register_ftrace_event);
672
673 /**
674  * unregister_ftrace_event - remove a no longer used event
675  * @event: the event to remove
676  */
677 int unregister_ftrace_event(struct trace_event *event)
678 {
679         down_write(&trace_event_mutex);
680         hlist_del(&event->node);
681         list_del(&event->list);
682         up_write(&trace_event_mutex);
683
684         return 0;
685 }
686 EXPORT_SYMBOL_GPL(unregister_ftrace_event);
687
688 /*
689  * Standard events
690  */
691
692 enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags)
693 {
694         return TRACE_TYPE_HANDLED;
695 }
696
697 /* TRACE_FN */
698 static enum print_line_t trace_fn_trace(struct trace_iterator *iter, int flags)
699 {
700         struct ftrace_entry *field;
701         struct trace_seq *s = &iter->seq;
702
703         trace_assign_type(field, iter->ent);
704
705         if (!seq_print_ip_sym(s, field->ip, flags))
706                 goto partial;
707
708         if ((flags & TRACE_ITER_PRINT_PARENT) && field->parent_ip) {
709                 if (!trace_seq_printf(s, " <-"))
710                         goto partial;
711                 if (!seq_print_ip_sym(s,
712                                       field->parent_ip,
713                                       flags))
714                         goto partial;
715         }
716         if (!trace_seq_printf(s, "\n"))
717                 goto partial;
718
719         return TRACE_TYPE_HANDLED;
720
721  partial:
722         return TRACE_TYPE_PARTIAL_LINE;
723 }
724
725 static enum print_line_t trace_fn_raw(struct trace_iterator *iter, int flags)
726 {
727         struct ftrace_entry *field;
728
729         trace_assign_type(field, iter->ent);
730
731         if (!trace_seq_printf(&iter->seq, "%lx %lx\n",
732                               field->ip,
733                               field->parent_ip))
734                 return TRACE_TYPE_PARTIAL_LINE;
735
736         return TRACE_TYPE_HANDLED;
737 }
738
739 static enum print_line_t trace_fn_hex(struct trace_iterator *iter, int flags)
740 {
741         struct ftrace_entry *field;
742         struct trace_seq *s = &iter->seq;
743
744         trace_assign_type(field, iter->ent);
745
746         SEQ_PUT_HEX_FIELD_RET(s, field->ip);
747         SEQ_PUT_HEX_FIELD_RET(s, field->parent_ip);
748
749         return TRACE_TYPE_HANDLED;
750 }
751
752 static enum print_line_t trace_fn_bin(struct trace_iterator *iter, int flags)
753 {
754         struct ftrace_entry *field;
755         struct trace_seq *s = &iter->seq;
756
757         trace_assign_type(field, iter->ent);
758
759         SEQ_PUT_FIELD_RET(s, field->ip);
760         SEQ_PUT_FIELD_RET(s, field->parent_ip);
761
762         return TRACE_TYPE_HANDLED;
763 }
764
765 static struct trace_event trace_fn_event = {
766         .type           = TRACE_FN,
767         .trace          = trace_fn_trace,
768         .raw            = trace_fn_raw,
769         .hex            = trace_fn_hex,
770         .binary         = trace_fn_bin,
771 };
772
773 /* TRACE_CTX an TRACE_WAKE */
774 static enum print_line_t trace_ctxwake_print(struct trace_iterator *iter,
775                                              char *delim)
776 {
777         struct ctx_switch_entry *field;
778         char comm[TASK_COMM_LEN];
779         int S, T;
780
781
782         trace_assign_type(field, iter->ent);
783
784         T = task_state_char(field->next_state);
785         S = task_state_char(field->prev_state);
786         trace_find_cmdline(field->next_pid, comm);
787         if (!trace_seq_printf(&iter->seq,
788                               " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n",
789                               field->prev_pid,
790                               field->prev_prio,
791                               S, delim,
792                               field->next_cpu,
793                               field->next_pid,
794                               field->next_prio,
795                               T, comm))
796                 return TRACE_TYPE_PARTIAL_LINE;
797
798         return TRACE_TYPE_HANDLED;
799 }
800
801 static enum print_line_t trace_ctx_print(struct trace_iterator *iter, int flags)
802 {
803         return trace_ctxwake_print(iter, "==>");
804 }
805
806 static enum print_line_t trace_wake_print(struct trace_iterator *iter,
807                                           int flags)
808 {
809         return trace_ctxwake_print(iter, "  +");
810 }
811
812 static int trace_ctxwake_raw(struct trace_iterator *iter, char S)
813 {
814         struct ctx_switch_entry *field;
815         int T;
816
817         trace_assign_type(field, iter->ent);
818
819         if (!S)
820                 task_state_char(field->prev_state);
821         T = task_state_char(field->next_state);
822         if (!trace_seq_printf(&iter->seq, "%d %d %c %d %d %d %c\n",
823                               field->prev_pid,
824                               field->prev_prio,
825                               S,
826                               field->next_cpu,
827                               field->next_pid,
828                               field->next_prio,
829                               T))
830                 return TRACE_TYPE_PARTIAL_LINE;
831
832         return TRACE_TYPE_HANDLED;
833 }
834
835 static enum print_line_t trace_ctx_raw(struct trace_iterator *iter, int flags)
836 {
837         return trace_ctxwake_raw(iter, 0);
838 }
839
840 static enum print_line_t trace_wake_raw(struct trace_iterator *iter, int flags)
841 {
842         return trace_ctxwake_raw(iter, '+');
843 }
844
845
846 static int trace_ctxwake_hex(struct trace_iterator *iter, char S)
847 {
848         struct ctx_switch_entry *field;
849         struct trace_seq *s = &iter->seq;
850         int T;
851
852         trace_assign_type(field, iter->ent);
853
854         if (!S)
855                 task_state_char(field->prev_state);
856         T = task_state_char(field->next_state);
857
858         SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid);
859         SEQ_PUT_HEX_FIELD_RET(s, field->prev_prio);
860         SEQ_PUT_HEX_FIELD_RET(s, S);
861         SEQ_PUT_HEX_FIELD_RET(s, field->next_cpu);
862         SEQ_PUT_HEX_FIELD_RET(s, field->next_pid);
863         SEQ_PUT_HEX_FIELD_RET(s, field->next_prio);
864         SEQ_PUT_HEX_FIELD_RET(s, T);
865
866         return TRACE_TYPE_HANDLED;
867 }
868
869 static enum print_line_t trace_ctx_hex(struct trace_iterator *iter, int flags)
870 {
871         return trace_ctxwake_hex(iter, 0);
872 }
873
874 static enum print_line_t trace_wake_hex(struct trace_iterator *iter, int flags)
875 {
876         return trace_ctxwake_hex(iter, '+');
877 }
878
879 static enum print_line_t trace_ctxwake_bin(struct trace_iterator *iter,
880                                            int flags)
881 {
882         struct ctx_switch_entry *field;
883         struct trace_seq *s = &iter->seq;
884
885         trace_assign_type(field, iter->ent);
886
887         SEQ_PUT_FIELD_RET(s, field->prev_pid);
888         SEQ_PUT_FIELD_RET(s, field->prev_prio);
889         SEQ_PUT_FIELD_RET(s, field->prev_state);
890         SEQ_PUT_FIELD_RET(s, field->next_pid);
891         SEQ_PUT_FIELD_RET(s, field->next_prio);
892         SEQ_PUT_FIELD_RET(s, field->next_state);
893
894         return TRACE_TYPE_HANDLED;
895 }
896
897 static struct trace_event trace_ctx_event = {
898         .type           = TRACE_CTX,
899         .trace          = trace_ctx_print,
900         .raw            = trace_ctx_raw,
901         .hex            = trace_ctx_hex,
902         .binary         = trace_ctxwake_bin,
903 };
904
905 static struct trace_event trace_wake_event = {
906         .type           = TRACE_WAKE,
907         .trace          = trace_wake_print,
908         .raw            = trace_wake_raw,
909         .hex            = trace_wake_hex,
910         .binary         = trace_ctxwake_bin,
911 };
912
913 /* TRACE_SPECIAL */
914 static enum print_line_t trace_special_print(struct trace_iterator *iter,
915                                              int flags)
916 {
917         struct special_entry *field;
918
919         trace_assign_type(field, iter->ent);
920
921         if (!trace_seq_printf(&iter->seq, "# %ld %ld %ld\n",
922                               field->arg1,
923                               field->arg2,
924                               field->arg3))
925                 return TRACE_TYPE_PARTIAL_LINE;
926
927         return TRACE_TYPE_HANDLED;
928 }
929
930 static enum print_line_t trace_special_hex(struct trace_iterator *iter,
931                                            int flags)
932 {
933         struct special_entry *field;
934         struct trace_seq *s = &iter->seq;
935
936         trace_assign_type(field, iter->ent);
937
938         SEQ_PUT_HEX_FIELD_RET(s, field->arg1);
939         SEQ_PUT_HEX_FIELD_RET(s, field->arg2);
940         SEQ_PUT_HEX_FIELD_RET(s, field->arg3);
941
942         return TRACE_TYPE_HANDLED;
943 }
944
945 static enum print_line_t trace_special_bin(struct trace_iterator *iter,
946                                            int flags)
947 {
948         struct special_entry *field;
949         struct trace_seq *s = &iter->seq;
950
951         trace_assign_type(field, iter->ent);
952
953         SEQ_PUT_FIELD_RET(s, field->arg1);
954         SEQ_PUT_FIELD_RET(s, field->arg2);
955         SEQ_PUT_FIELD_RET(s, field->arg3);
956
957         return TRACE_TYPE_HANDLED;
958 }
959
960 static struct trace_event trace_special_event = {
961         .type           = TRACE_SPECIAL,
962         .trace          = trace_special_print,
963         .raw            = trace_special_print,
964         .hex            = trace_special_hex,
965         .binary         = trace_special_bin,
966 };
967
968 /* TRACE_STACK */
969
970 static enum print_line_t trace_stack_print(struct trace_iterator *iter,
971                                            int flags)
972 {
973         struct stack_entry *field;
974         struct trace_seq *s = &iter->seq;
975         int i;
976
977         trace_assign_type(field, iter->ent);
978
979         if (!trace_seq_puts(s, "<stack trace>\n"))
980                 goto partial;
981         for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
982                 if (!field->caller[i] || (field->caller[i] == ULONG_MAX))
983                         break;
984                 if (!trace_seq_puts(s, " => "))
985                         goto partial;
986
987                 if (!seq_print_ip_sym(s, field->caller[i], flags))
988                         goto partial;
989                 if (!trace_seq_puts(s, "\n"))
990                         goto partial;
991         }
992
993         return TRACE_TYPE_HANDLED;
994
995  partial:
996         return TRACE_TYPE_PARTIAL_LINE;
997 }
998
999 static struct trace_event trace_stack_event = {
1000         .type           = TRACE_STACK,
1001         .trace          = trace_stack_print,
1002         .raw            = trace_special_print,
1003         .hex            = trace_special_hex,
1004         .binary         = trace_special_bin,
1005 };
1006
1007 /* TRACE_USER_STACK */
1008 static enum print_line_t trace_user_stack_print(struct trace_iterator *iter,
1009                                                 int flags)
1010 {
1011         struct userstack_entry *field;
1012         struct trace_seq *s = &iter->seq;
1013
1014         trace_assign_type(field, iter->ent);
1015
1016         if (!trace_seq_puts(s, "<user stack trace>\n"))
1017                 goto partial;
1018
1019         if (!seq_print_userip_objs(field, s, flags))
1020                 goto partial;
1021
1022         return TRACE_TYPE_HANDLED;
1023
1024  partial:
1025         return TRACE_TYPE_PARTIAL_LINE;
1026 }
1027
1028 static struct trace_event trace_user_stack_event = {
1029         .type           = TRACE_USER_STACK,
1030         .trace          = trace_user_stack_print,
1031         .raw            = trace_special_print,
1032         .hex            = trace_special_hex,
1033         .binary         = trace_special_bin,
1034 };
1035
1036 /* TRACE_BPRINT */
1037 static enum print_line_t
1038 trace_bprint_print(struct trace_iterator *iter, int flags)
1039 {
1040         struct trace_entry *entry = iter->ent;
1041         struct trace_seq *s = &iter->seq;
1042         struct bprint_entry *field;
1043
1044         trace_assign_type(field, entry);
1045
1046         if (!seq_print_ip_sym(s, field->ip, flags))
1047                 goto partial;
1048
1049         if (!trace_seq_puts(s, ": "))
1050                 goto partial;
1051
1052         if (!trace_seq_bprintf(s, field->fmt, field->buf))
1053                 goto partial;
1054
1055         return TRACE_TYPE_HANDLED;
1056
1057  partial:
1058         return TRACE_TYPE_PARTIAL_LINE;
1059 }
1060
1061
1062 static enum print_line_t
1063 trace_bprint_raw(struct trace_iterator *iter, int flags)
1064 {
1065         struct bprint_entry *field;
1066         struct trace_seq *s = &iter->seq;
1067
1068         trace_assign_type(field, iter->ent);
1069
1070         if (!trace_seq_printf(s, ": %lx : ", field->ip))
1071                 goto partial;
1072
1073         if (!trace_seq_bprintf(s, field->fmt, field->buf))
1074                 goto partial;
1075
1076         return TRACE_TYPE_HANDLED;
1077
1078  partial:
1079         return TRACE_TYPE_PARTIAL_LINE;
1080 }
1081
1082
1083 static struct trace_event trace_bprint_event = {
1084         .type           = TRACE_BPRINT,
1085         .trace          = trace_bprint_print,
1086         .raw            = trace_bprint_raw,
1087 };
1088
1089 /* TRACE_PRINT */
1090 static enum print_line_t trace_print_print(struct trace_iterator *iter,
1091                                            int flags)
1092 {
1093         struct print_entry *field;
1094         struct trace_seq *s = &iter->seq;
1095
1096         trace_assign_type(field, iter->ent);
1097
1098         if (!seq_print_ip_sym(s, field->ip, flags))
1099                 goto partial;
1100
1101         if (!trace_seq_printf(s, ": %s", field->buf))
1102                 goto partial;
1103
1104         return TRACE_TYPE_HANDLED;
1105
1106  partial:
1107         return TRACE_TYPE_PARTIAL_LINE;
1108 }
1109
1110 static enum print_line_t trace_print_raw(struct trace_iterator *iter, int flags)
1111 {
1112         struct print_entry *field;
1113
1114         trace_assign_type(field, iter->ent);
1115
1116         if (!trace_seq_printf(&iter->seq, "# %lx %s", field->ip, field->buf))
1117                 goto partial;
1118
1119         return TRACE_TYPE_HANDLED;
1120
1121  partial:
1122         return TRACE_TYPE_PARTIAL_LINE;
1123 }
1124
1125 static struct trace_event trace_print_event = {
1126         .type           = TRACE_PRINT,
1127         .trace          = trace_print_print,
1128         .raw            = trace_print_raw,
1129 };
1130
1131
1132 static struct trace_event *events[] __initdata = {
1133         &trace_fn_event,
1134         &trace_ctx_event,
1135         &trace_wake_event,
1136         &trace_special_event,
1137         &trace_stack_event,
1138         &trace_user_stack_event,
1139         &trace_bprint_event,
1140         &trace_print_event,
1141         NULL
1142 };
1143
1144 __init static int init_events(void)
1145 {
1146         struct trace_event *event;
1147         int i, ret;
1148
1149         for (i = 0; events[i]; i++) {
1150                 event = events[i];
1151
1152                 ret = register_ftrace_event(event);
1153                 if (!ret) {
1154                         printk(KERN_WARNING "event %d failed to register\n",
1155                                event->type);
1156                         WARN_ON_ONCE(1);
1157                 }
1158         }
1159
1160         return 0;
1161 }
1162 device_initcall(init_events);