4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
8 #include <linux/module.h>
9 #include <linux/mutex.h>
10 #include <linux/ftrace.h>
12 #include "trace_output.h"
14 /* must be a power of 2 */
15 #define EVENT_HASHSIZE 128
17 static DECLARE_RWSEM(trace_event_mutex);
19 DEFINE_PER_CPU(struct trace_seq, ftrace_event_seq);
20 EXPORT_PER_CPU_SYMBOL(ftrace_event_seq);
22 static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly;
24 static int next_event_type = __TRACE_LAST_TYPE + 1;
26 void trace_print_seq(struct seq_file *m, struct trace_seq *s)
28 int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len;
31 seq_puts(m, s->buffer);
36 enum print_line_t trace_print_bprintk_msg_only(struct trace_iterator *iter)
38 struct trace_seq *s = &iter->seq;
39 struct trace_entry *entry = iter->ent;
40 struct bprint_entry *field;
43 trace_assign_type(field, entry);
45 ret = trace_seq_bprintf(s, field->fmt, field->buf);
47 return TRACE_TYPE_PARTIAL_LINE;
49 return TRACE_TYPE_HANDLED;
52 enum print_line_t trace_print_printk_msg_only(struct trace_iterator *iter)
54 struct trace_seq *s = &iter->seq;
55 struct trace_entry *entry = iter->ent;
56 struct print_entry *field;
59 trace_assign_type(field, entry);
61 ret = trace_seq_printf(s, "%s", field->buf);
63 return TRACE_TYPE_PARTIAL_LINE;
65 return TRACE_TYPE_HANDLED;
69 * trace_seq_printf - sequence printing of trace information
70 * @s: trace sequence descriptor
71 * @fmt: printf format string
73 * The tracer may use either sequence operations or its own
74 * copy to user routines. To simplify formating of a trace
75 * trace_seq_printf is used to store strings into a special
76 * buffer (@s). Then the output may be either used by
77 * the sequencer or pulled into another buffer.
80 trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
82 int len = (PAGE_SIZE - 1) - s->len;
90 ret = vsnprintf(s->buffer + s->len, len, fmt, ap);
93 /* If we can't write it all, don't bother writing anything */
101 EXPORT_SYMBOL_GPL(trace_seq_printf);
103 int trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary)
105 int len = (PAGE_SIZE - 1) - s->len;
111 ret = bstr_printf(s->buffer + s->len, len, fmt, binary);
113 /* If we can't write it all, don't bother writing anything */
123 * trace_seq_puts - trace sequence printing of simple string
124 * @s: trace sequence descriptor
125 * @str: simple string to record
127 * The tracer may use either the sequence operations or its own
128 * copy to user routines. This function records a simple string
129 * into a special buffer (@s) for later retrieval by a sequencer
130 * or other mechanism.
132 int trace_seq_puts(struct trace_seq *s, const char *str)
134 int len = strlen(str);
136 if (len > ((PAGE_SIZE - 1) - s->len))
139 memcpy(s->buffer + s->len, str, len);
145 int trace_seq_putc(struct trace_seq *s, unsigned char c)
147 if (s->len >= (PAGE_SIZE - 1))
150 s->buffer[s->len++] = c;
155 int trace_seq_putmem(struct trace_seq *s, const void *mem, size_t len)
157 if (len > ((PAGE_SIZE - 1) - s->len))
160 memcpy(s->buffer + s->len, mem, len);
166 int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, size_t len)
168 unsigned char hex[HEX_CHARS];
169 const unsigned char *data = mem;
173 for (i = 0, j = 0; i < len; i++) {
175 for (i = len-1, j = 0; i >= 0; i--) {
177 hex[j++] = hex_asc_hi(data[i]);
178 hex[j++] = hex_asc_lo(data[i]);
182 return trace_seq_putmem(s, hex, j);
185 void *trace_seq_reserve(struct trace_seq *s, size_t len)
189 if (len > ((PAGE_SIZE - 1) - s->len))
192 ret = s->buffer + s->len;
198 int trace_seq_path(struct trace_seq *s, struct path *path)
202 if (s->len >= (PAGE_SIZE - 1))
204 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
206 p = mangle_path(s->buffer + s->len, p, "\n");
208 s->len = p - s->buffer;
212 s->buffer[s->len++] = '?';
220 ftrace_print_flags_seq(struct trace_seq *p, const char *delim,
222 const struct trace_print_flags *flag_array)
226 const char *ret = p->buffer + p->len;
229 for (i = 0; flag_array[i].name && flags; i++) {
231 mask = flag_array[i].mask;
232 if ((flags & mask) != mask)
235 str = flag_array[i].name;
238 trace_seq_puts(p, delim);
239 trace_seq_puts(p, str);
242 /* check for left over flags */
245 trace_seq_puts(p, delim);
246 trace_seq_printf(p, "0x%lx", flags);
249 trace_seq_putc(p, 0);
253 EXPORT_SYMBOL(ftrace_print_flags_seq);
256 ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val,
257 const struct trace_print_flags *symbol_array)
260 const char *ret = p->buffer + p->len;
262 for (i = 0; symbol_array[i].name; i++) {
264 if (val != symbol_array[i].mask)
267 trace_seq_puts(p, symbol_array[i].name);
272 trace_seq_printf(p, "0x%lx", val);
274 trace_seq_putc(p, 0);
278 EXPORT_SYMBOL(ftrace_print_symbols_seq);
280 #ifdef CONFIG_KRETPROBES
281 static inline const char *kretprobed(const char *name)
283 static const char tramp_name[] = "kretprobe_trampoline";
284 int size = sizeof(tramp_name);
286 if (strncmp(tramp_name, name, size) == 0)
287 return "[unknown/kretprobe'd]";
291 static inline const char *kretprobed(const char *name)
295 #endif /* CONFIG_KRETPROBES */
298 seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address)
300 #ifdef CONFIG_KALLSYMS
301 char str[KSYM_SYMBOL_LEN];
304 kallsyms_lookup(address, NULL, NULL, NULL, str);
306 name = kretprobed(str);
308 return trace_seq_printf(s, fmt, name);
314 seq_print_sym_offset(struct trace_seq *s, const char *fmt,
315 unsigned long address)
317 #ifdef CONFIG_KALLSYMS
318 char str[KSYM_SYMBOL_LEN];
321 sprint_symbol(str, address);
322 name = kretprobed(str);
324 return trace_seq_printf(s, fmt, name);
330 # define IP_FMT "%08lx"
332 # define IP_FMT "%016lx"
335 int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm,
336 unsigned long ip, unsigned long sym_flags)
338 struct file *file = NULL;
339 unsigned long vmstart = 0;
343 const struct vm_area_struct *vma;
345 down_read(&mm->mmap_sem);
346 vma = find_vma(mm, ip);
349 vmstart = vma->vm_start;
352 ret = trace_seq_path(s, &file->f_path);
354 ret = trace_seq_printf(s, "[+0x%lx]",
357 up_read(&mm->mmap_sem);
359 if (ret && ((sym_flags & TRACE_ITER_SYM_ADDR) || !file))
360 ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
365 seq_print_userip_objs(const struct userstack_entry *entry, struct trace_seq *s,
366 unsigned long sym_flags)
368 struct mm_struct *mm = NULL;
372 if (trace_flags & TRACE_ITER_SYM_USEROBJ) {
373 struct task_struct *task;
375 * we do the lookup on the thread group leader,
376 * since individual threads might have already quit!
379 task = find_task_by_vpid(entry->ent.tgid);
381 mm = get_task_mm(task);
385 for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
386 unsigned long ip = entry->caller[i];
388 if (ip == ULONG_MAX || !ret)
391 ret = trace_seq_puts(s, " => ");
394 ret = trace_seq_puts(s, "??");
396 ret = trace_seq_puts(s, "\n");
402 ret = seq_print_user_ip(s, mm, ip, sym_flags);
403 ret = trace_seq_puts(s, "\n");
412 seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
417 return trace_seq_printf(s, "0");
419 if (sym_flags & TRACE_ITER_SYM_OFFSET)
420 ret = seq_print_sym_offset(s, "%s", ip);
422 ret = seq_print_sym_short(s, "%s", ip);
427 if (sym_flags & TRACE_ITER_SYM_ADDR)
428 ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
433 lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
435 int hardirq, softirq;
436 char comm[TASK_COMM_LEN];
438 trace_find_cmdline(entry->pid, comm);
439 hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
440 softirq = entry->flags & TRACE_FLAG_SOFTIRQ;
442 if (!trace_seq_printf(s, "%8.8s-%-5d %3d%c%c%c",
443 comm, entry->pid, cpu,
444 (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' :
445 (entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ?
447 (entry->flags & TRACE_FLAG_NEED_RESCHED) ?
449 (hardirq && softirq) ? 'H' :
450 hardirq ? 'h' : softirq ? 's' : '.'))
453 if (entry->preempt_count)
454 return trace_seq_printf(s, "%x", entry->preempt_count);
455 return trace_seq_puts(s, ".");
458 static unsigned long preempt_mark_thresh = 100;
461 lat_print_timestamp(struct trace_seq *s, u64 abs_usecs,
462 unsigned long rel_usecs)
464 return trace_seq_printf(s, " %4lldus%c: ", abs_usecs,
465 rel_usecs > preempt_mark_thresh ? '!' :
466 rel_usecs > 1 ? '+' : ' ');
469 int trace_print_context(struct trace_iterator *iter)
471 struct trace_seq *s = &iter->seq;
472 struct trace_entry *entry = iter->ent;
473 unsigned long long t = ns2usecs(iter->ts);
474 unsigned long usec_rem = do_div(t, USEC_PER_SEC);
475 unsigned long secs = (unsigned long)t;
476 char comm[TASK_COMM_LEN];
478 trace_find_cmdline(entry->pid, comm);
480 return trace_seq_printf(s, "%16s-%-5d [%03d] %5lu.%06lu: ",
481 comm, entry->pid, iter->cpu, secs, usec_rem);
484 int trace_print_lat_context(struct trace_iterator *iter)
488 struct trace_seq *s = &iter->seq;
489 struct trace_entry *entry = iter->ent,
490 *next_entry = trace_find_next_entry(iter, NULL,
492 unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE);
493 unsigned long abs_usecs = ns2usecs(iter->ts - iter->tr->time_start);
494 unsigned long rel_usecs;
498 rel_usecs = ns2usecs(next_ts - iter->ts);
501 char comm[TASK_COMM_LEN];
503 trace_find_cmdline(entry->pid, comm);
505 ret = trace_seq_printf(s, "%16s %5d %3d %d %08x %08lx [%08llx]"
506 " %ld.%03ldms (+%ld.%03ldms): ", comm,
507 entry->pid, iter->cpu, entry->flags,
508 entry->preempt_count, iter->idx,
510 abs_usecs / USEC_PER_MSEC,
511 abs_usecs % USEC_PER_MSEC,
512 rel_usecs / USEC_PER_MSEC,
513 rel_usecs % USEC_PER_MSEC);
515 ret = lat_print_generic(s, entry, iter->cpu);
517 ret = lat_print_timestamp(s, abs_usecs, rel_usecs);
523 static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
525 static int task_state_char(unsigned long state)
527 int bit = state ? __ffs(state) + 1 : 0;
529 return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?';
533 * ftrace_find_event - find a registered event
534 * @type: the type of event to look for
536 * Returns an event of type @type otherwise NULL
537 * Called with trace_event_read_lock() held.
539 struct trace_event *ftrace_find_event(int type)
541 struct trace_event *event;
542 struct hlist_node *n;
545 key = type & (EVENT_HASHSIZE - 1);
547 hlist_for_each_entry(event, n, &event_hash[key], node) {
548 if (event->type == type)
555 static LIST_HEAD(ftrace_event_list);
557 static int trace_search_list(struct list_head **list)
559 struct trace_event *e;
560 int last = __TRACE_LAST_TYPE;
562 if (list_empty(&ftrace_event_list)) {
563 *list = &ftrace_event_list;
568 * We used up all possible max events,
569 * lets see if somebody freed one.
571 list_for_each_entry(e, &ftrace_event_list, list) {
572 if (e->type != last + 1)
577 /* Did we used up all 65 thousand events??? */
578 if ((last + 1) > FTRACE_MAX_EVENT)
585 void trace_event_read_lock(void)
587 down_read(&trace_event_mutex);
590 void trace_event_read_unlock(void)
592 up_read(&trace_event_mutex);
596 * register_ftrace_event - register output for an event type
597 * @event: the event type to register
599 * Event types are stored in a hash and this hash is used to
600 * find a way to print an event. If the @event->type is set
601 * then it will use that type, otherwise it will assign a
604 * If you assign your own type, please make sure it is added
605 * to the trace_type enum in trace.h, to avoid collisions
606 * with the dynamic types.
608 * Returns the event type number or zero on error.
610 int register_ftrace_event(struct trace_event *event)
615 down_write(&trace_event_mutex);
620 INIT_LIST_HEAD(&event->list);
623 struct list_head *list = NULL;
625 if (next_event_type > FTRACE_MAX_EVENT) {
627 event->type = trace_search_list(&list);
633 event->type = next_event_type++;
634 list = &ftrace_event_list;
637 if (WARN_ON(ftrace_find_event(event->type)))
640 list_add_tail(&event->list, list);
642 } else if (event->type > __TRACE_LAST_TYPE) {
643 printk(KERN_WARNING "Need to add type to trace.h\n");
647 /* Is this event already used */
648 if (ftrace_find_event(event->type))
652 if (event->trace == NULL)
653 event->trace = trace_nop_print;
654 if (event->raw == NULL)
655 event->raw = trace_nop_print;
656 if (event->hex == NULL)
657 event->hex = trace_nop_print;
658 if (event->binary == NULL)
659 event->binary = trace_nop_print;
661 key = event->type & (EVENT_HASHSIZE - 1);
663 hlist_add_head(&event->node, &event_hash[key]);
667 up_write(&trace_event_mutex);
671 EXPORT_SYMBOL_GPL(register_ftrace_event);
674 * unregister_ftrace_event - remove a no longer used event
675 * @event: the event to remove
677 int unregister_ftrace_event(struct trace_event *event)
679 down_write(&trace_event_mutex);
680 hlist_del(&event->node);
681 list_del(&event->list);
682 up_write(&trace_event_mutex);
686 EXPORT_SYMBOL_GPL(unregister_ftrace_event);
692 enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags)
694 return TRACE_TYPE_HANDLED;
698 static enum print_line_t trace_fn_trace(struct trace_iterator *iter, int flags)
700 struct ftrace_entry *field;
701 struct trace_seq *s = &iter->seq;
703 trace_assign_type(field, iter->ent);
705 if (!seq_print_ip_sym(s, field->ip, flags))
708 if ((flags & TRACE_ITER_PRINT_PARENT) && field->parent_ip) {
709 if (!trace_seq_printf(s, " <-"))
711 if (!seq_print_ip_sym(s,
716 if (!trace_seq_printf(s, "\n"))
719 return TRACE_TYPE_HANDLED;
722 return TRACE_TYPE_PARTIAL_LINE;
725 static enum print_line_t trace_fn_raw(struct trace_iterator *iter, int flags)
727 struct ftrace_entry *field;
729 trace_assign_type(field, iter->ent);
731 if (!trace_seq_printf(&iter->seq, "%lx %lx\n",
734 return TRACE_TYPE_PARTIAL_LINE;
736 return TRACE_TYPE_HANDLED;
739 static enum print_line_t trace_fn_hex(struct trace_iterator *iter, int flags)
741 struct ftrace_entry *field;
742 struct trace_seq *s = &iter->seq;
744 trace_assign_type(field, iter->ent);
746 SEQ_PUT_HEX_FIELD_RET(s, field->ip);
747 SEQ_PUT_HEX_FIELD_RET(s, field->parent_ip);
749 return TRACE_TYPE_HANDLED;
752 static enum print_line_t trace_fn_bin(struct trace_iterator *iter, int flags)
754 struct ftrace_entry *field;
755 struct trace_seq *s = &iter->seq;
757 trace_assign_type(field, iter->ent);
759 SEQ_PUT_FIELD_RET(s, field->ip);
760 SEQ_PUT_FIELD_RET(s, field->parent_ip);
762 return TRACE_TYPE_HANDLED;
765 static struct trace_event trace_fn_event = {
767 .trace = trace_fn_trace,
770 .binary = trace_fn_bin,
773 /* TRACE_CTX an TRACE_WAKE */
774 static enum print_line_t trace_ctxwake_print(struct trace_iterator *iter,
777 struct ctx_switch_entry *field;
778 char comm[TASK_COMM_LEN];
782 trace_assign_type(field, iter->ent);
784 T = task_state_char(field->next_state);
785 S = task_state_char(field->prev_state);
786 trace_find_cmdline(field->next_pid, comm);
787 if (!trace_seq_printf(&iter->seq,
788 " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n",
796 return TRACE_TYPE_PARTIAL_LINE;
798 return TRACE_TYPE_HANDLED;
801 static enum print_line_t trace_ctx_print(struct trace_iterator *iter, int flags)
803 return trace_ctxwake_print(iter, "==>");
806 static enum print_line_t trace_wake_print(struct trace_iterator *iter,
809 return trace_ctxwake_print(iter, " +");
812 static int trace_ctxwake_raw(struct trace_iterator *iter, char S)
814 struct ctx_switch_entry *field;
817 trace_assign_type(field, iter->ent);
820 task_state_char(field->prev_state);
821 T = task_state_char(field->next_state);
822 if (!trace_seq_printf(&iter->seq, "%d %d %c %d %d %d %c\n",
830 return TRACE_TYPE_PARTIAL_LINE;
832 return TRACE_TYPE_HANDLED;
835 static enum print_line_t trace_ctx_raw(struct trace_iterator *iter, int flags)
837 return trace_ctxwake_raw(iter, 0);
840 static enum print_line_t trace_wake_raw(struct trace_iterator *iter, int flags)
842 return trace_ctxwake_raw(iter, '+');
846 static int trace_ctxwake_hex(struct trace_iterator *iter, char S)
848 struct ctx_switch_entry *field;
849 struct trace_seq *s = &iter->seq;
852 trace_assign_type(field, iter->ent);
855 task_state_char(field->prev_state);
856 T = task_state_char(field->next_state);
858 SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid);
859 SEQ_PUT_HEX_FIELD_RET(s, field->prev_prio);
860 SEQ_PUT_HEX_FIELD_RET(s, S);
861 SEQ_PUT_HEX_FIELD_RET(s, field->next_cpu);
862 SEQ_PUT_HEX_FIELD_RET(s, field->next_pid);
863 SEQ_PUT_HEX_FIELD_RET(s, field->next_prio);
864 SEQ_PUT_HEX_FIELD_RET(s, T);
866 return TRACE_TYPE_HANDLED;
869 static enum print_line_t trace_ctx_hex(struct trace_iterator *iter, int flags)
871 return trace_ctxwake_hex(iter, 0);
874 static enum print_line_t trace_wake_hex(struct trace_iterator *iter, int flags)
876 return trace_ctxwake_hex(iter, '+');
879 static enum print_line_t trace_ctxwake_bin(struct trace_iterator *iter,
882 struct ctx_switch_entry *field;
883 struct trace_seq *s = &iter->seq;
885 trace_assign_type(field, iter->ent);
887 SEQ_PUT_FIELD_RET(s, field->prev_pid);
888 SEQ_PUT_FIELD_RET(s, field->prev_prio);
889 SEQ_PUT_FIELD_RET(s, field->prev_state);
890 SEQ_PUT_FIELD_RET(s, field->next_pid);
891 SEQ_PUT_FIELD_RET(s, field->next_prio);
892 SEQ_PUT_FIELD_RET(s, field->next_state);
894 return TRACE_TYPE_HANDLED;
897 static struct trace_event trace_ctx_event = {
899 .trace = trace_ctx_print,
900 .raw = trace_ctx_raw,
901 .hex = trace_ctx_hex,
902 .binary = trace_ctxwake_bin,
905 static struct trace_event trace_wake_event = {
907 .trace = trace_wake_print,
908 .raw = trace_wake_raw,
909 .hex = trace_wake_hex,
910 .binary = trace_ctxwake_bin,
914 static enum print_line_t trace_special_print(struct trace_iterator *iter,
917 struct special_entry *field;
919 trace_assign_type(field, iter->ent);
921 if (!trace_seq_printf(&iter->seq, "# %ld %ld %ld\n",
925 return TRACE_TYPE_PARTIAL_LINE;
927 return TRACE_TYPE_HANDLED;
930 static enum print_line_t trace_special_hex(struct trace_iterator *iter,
933 struct special_entry *field;
934 struct trace_seq *s = &iter->seq;
936 trace_assign_type(field, iter->ent);
938 SEQ_PUT_HEX_FIELD_RET(s, field->arg1);
939 SEQ_PUT_HEX_FIELD_RET(s, field->arg2);
940 SEQ_PUT_HEX_FIELD_RET(s, field->arg3);
942 return TRACE_TYPE_HANDLED;
945 static enum print_line_t trace_special_bin(struct trace_iterator *iter,
948 struct special_entry *field;
949 struct trace_seq *s = &iter->seq;
951 trace_assign_type(field, iter->ent);
953 SEQ_PUT_FIELD_RET(s, field->arg1);
954 SEQ_PUT_FIELD_RET(s, field->arg2);
955 SEQ_PUT_FIELD_RET(s, field->arg3);
957 return TRACE_TYPE_HANDLED;
960 static struct trace_event trace_special_event = {
961 .type = TRACE_SPECIAL,
962 .trace = trace_special_print,
963 .raw = trace_special_print,
964 .hex = trace_special_hex,
965 .binary = trace_special_bin,
970 static enum print_line_t trace_stack_print(struct trace_iterator *iter,
973 struct stack_entry *field;
974 struct trace_seq *s = &iter->seq;
977 trace_assign_type(field, iter->ent);
979 if (!trace_seq_puts(s, "<stack trace>\n"))
981 for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
982 if (!field->caller[i] || (field->caller[i] == ULONG_MAX))
984 if (!trace_seq_puts(s, " => "))
987 if (!seq_print_ip_sym(s, field->caller[i], flags))
989 if (!trace_seq_puts(s, "\n"))
993 return TRACE_TYPE_HANDLED;
996 return TRACE_TYPE_PARTIAL_LINE;
999 static struct trace_event trace_stack_event = {
1000 .type = TRACE_STACK,
1001 .trace = trace_stack_print,
1002 .raw = trace_special_print,
1003 .hex = trace_special_hex,
1004 .binary = trace_special_bin,
1007 /* TRACE_USER_STACK */
1008 static enum print_line_t trace_user_stack_print(struct trace_iterator *iter,
1011 struct userstack_entry *field;
1012 struct trace_seq *s = &iter->seq;
1014 trace_assign_type(field, iter->ent);
1016 if (!trace_seq_puts(s, "<user stack trace>\n"))
1019 if (!seq_print_userip_objs(field, s, flags))
1022 return TRACE_TYPE_HANDLED;
1025 return TRACE_TYPE_PARTIAL_LINE;
1028 static struct trace_event trace_user_stack_event = {
1029 .type = TRACE_USER_STACK,
1030 .trace = trace_user_stack_print,
1031 .raw = trace_special_print,
1032 .hex = trace_special_hex,
1033 .binary = trace_special_bin,
1037 static enum print_line_t
1038 trace_bprint_print(struct trace_iterator *iter, int flags)
1040 struct trace_entry *entry = iter->ent;
1041 struct trace_seq *s = &iter->seq;
1042 struct bprint_entry *field;
1044 trace_assign_type(field, entry);
1046 if (!seq_print_ip_sym(s, field->ip, flags))
1049 if (!trace_seq_puts(s, ": "))
1052 if (!trace_seq_bprintf(s, field->fmt, field->buf))
1055 return TRACE_TYPE_HANDLED;
1058 return TRACE_TYPE_PARTIAL_LINE;
1062 static enum print_line_t
1063 trace_bprint_raw(struct trace_iterator *iter, int flags)
1065 struct bprint_entry *field;
1066 struct trace_seq *s = &iter->seq;
1068 trace_assign_type(field, iter->ent);
1070 if (!trace_seq_printf(s, ": %lx : ", field->ip))
1073 if (!trace_seq_bprintf(s, field->fmt, field->buf))
1076 return TRACE_TYPE_HANDLED;
1079 return TRACE_TYPE_PARTIAL_LINE;
1083 static struct trace_event trace_bprint_event = {
1084 .type = TRACE_BPRINT,
1085 .trace = trace_bprint_print,
1086 .raw = trace_bprint_raw,
1090 static enum print_line_t trace_print_print(struct trace_iterator *iter,
1093 struct print_entry *field;
1094 struct trace_seq *s = &iter->seq;
1096 trace_assign_type(field, iter->ent);
1098 if (!seq_print_ip_sym(s, field->ip, flags))
1101 if (!trace_seq_printf(s, ": %s", field->buf))
1104 return TRACE_TYPE_HANDLED;
1107 return TRACE_TYPE_PARTIAL_LINE;
1110 static enum print_line_t trace_print_raw(struct trace_iterator *iter, int flags)
1112 struct print_entry *field;
1114 trace_assign_type(field, iter->ent);
1116 if (!trace_seq_printf(&iter->seq, "# %lx %s", field->ip, field->buf))
1119 return TRACE_TYPE_HANDLED;
1122 return TRACE_TYPE_PARTIAL_LINE;
1125 static struct trace_event trace_print_event = {
1126 .type = TRACE_PRINT,
1127 .trace = trace_print_print,
1128 .raw = trace_print_raw,
1132 static struct trace_event *events[] __initdata = {
1136 &trace_special_event,
1138 &trace_user_stack_event,
1139 &trace_bprint_event,
1144 __init static int init_events(void)
1146 struct trace_event *event;
1149 for (i = 0; events[i]; i++) {
1152 ret = register_ftrace_event(event);
1154 printk(KERN_WARNING "event %d failed to register\n",
1162 device_initcall(init_events);