ftrace: function tracer with irqs disabled
[linux-2.6] / kernel / trace / trace.c
1 /*
2  * ring buffer based function tracer
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally taken from the RT patch by:
8  *    Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code from the latency_tracer, that is:
11  *  Copyright (C) 2004-2006 Ingo Molnar
12  *  Copyright (C) 2004 William Lee Irwin III
13  */
14 #include <linux/utsrelease.h>
15 #include <linux/kallsyms.h>
16 #include <linux/seq_file.h>
17 #include <linux/notifier.h>
18 #include <linux/debugfs.h>
19 #include <linux/pagemap.h>
20 #include <linux/hardirq.h>
21 #include <linux/linkage.h>
22 #include <linux/uaccess.h>
23 #include <linux/ftrace.h>
24 #include <linux/module.h>
25 #include <linux/percpu.h>
26 #include <linux/kdebug.h>
27 #include <linux/ctype.h>
28 #include <linux/init.h>
29 #include <linux/poll.h>
30 #include <linux/gfp.h>
31 #include <linux/fs.h>
32 #include <linux/kprobes.h>
33 #include <linux/writeback.h>
34
35 #include <linux/stacktrace.h>
36 #include <linux/ring_buffer.h>
37 #include <linux/irqflags.h>
38
39 #include "trace.h"
40
41 #define TRACE_BUFFER_FLAGS      (RB_FL_OVERWRITE)
42
43 unsigned long __read_mostly     tracing_max_latency = (cycle_t)ULONG_MAX;
44 unsigned long __read_mostly     tracing_thresh;
45
46 static DEFINE_PER_CPU(local_t, ftrace_cpu_disabled);
47
48 static inline void ftrace_disable_cpu(void)
49 {
50         preempt_disable();
51         local_inc(&__get_cpu_var(ftrace_cpu_disabled));
52 }
53
54 static inline void ftrace_enable_cpu(void)
55 {
56         local_dec(&__get_cpu_var(ftrace_cpu_disabled));
57         preempt_enable();
58 }
59
60 static cpumask_t __read_mostly          tracing_buffer_mask;
61
62 #define for_each_tracing_cpu(cpu)       \
63         for_each_cpu_mask(cpu, tracing_buffer_mask)
64
65 static int tracing_disabled = 1;
66
67 /*
68  * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
69  *
70  * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
71  * is set, then ftrace_dump is called. This will output the contents
72  * of the ftrace buffers to the console.  This is very useful for
73  * capturing traces that lead to crashes and outputing it to a
74  * serial console.
75  *
76  * It is default off, but you can enable it with either specifying
77  * "ftrace_dump_on_oops" in the kernel command line, or setting
78  * /proc/sys/kernel/ftrace_dump_on_oops to true.
79  */
80 int ftrace_dump_on_oops;
81
82 static int tracing_set_tracer(char *buf);
83
84 static int __init set_ftrace(char *str)
85 {
86         tracing_set_tracer(str);
87         return 1;
88 }
89 __setup("ftrace", set_ftrace);
90
91 static int __init set_ftrace_dump_on_oops(char *str)
92 {
93         ftrace_dump_on_oops = 1;
94         return 1;
95 }
96 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
97
98 long
99 ns2usecs(cycle_t nsec)
100 {
101         nsec += 500;
102         do_div(nsec, 1000);
103         return nsec;
104 }
105
106 cycle_t ftrace_now(int cpu)
107 {
108         u64 ts = ring_buffer_time_stamp(cpu);
109         ring_buffer_normalize_time_stamp(cpu, &ts);
110         return ts;
111 }
112
113 /*
114  * The global_trace is the descriptor that holds the tracing
115  * buffers for the live tracing. For each CPU, it contains
116  * a link list of pages that will store trace entries. The
117  * page descriptor of the pages in the memory is used to hold
118  * the link list by linking the lru item in the page descriptor
119  * to each of the pages in the buffer per CPU.
120  *
121  * For each active CPU there is a data field that holds the
122  * pages for the buffer for that CPU. Each CPU has the same number
123  * of pages allocated for its buffer.
124  */
125 static struct trace_array       global_trace;
126
127 static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu);
128
129 /*
130  * The max_tr is used to snapshot the global_trace when a maximum
131  * latency is reached. Some tracers will use this to store a maximum
132  * trace while it continues examining live traces.
133  *
134  * The buffers for the max_tr are set up the same as the global_trace.
135  * When a snapshot is taken, the link list of the max_tr is swapped
136  * with the link list of the global_trace and the buffers are reset for
137  * the global_trace so the tracing can continue.
138  */
139 static struct trace_array       max_tr;
140
141 static DEFINE_PER_CPU(struct trace_array_cpu, max_data);
142
143 /* tracer_enabled is used to toggle activation of a tracer */
144 static int                      tracer_enabled = 1;
145
146 /* function tracing enabled */
147 int                             ftrace_function_enabled;
148
149 /*
150  * trace_buf_size is the size in bytes that is allocated
151  * for a buffer. Note, the number of bytes is always rounded
152  * to page size.
153  *
154  * This number is purposely set to a low number of 16384.
155  * If the dump on oops happens, it will be much appreciated
156  * to not have to wait for all that output. Anyway this can be
157  * boot time and run time configurable.
158  */
159 #define TRACE_BUF_SIZE_DEFAULT  1441792UL /* 16384 * 88 (sizeof(entry)) */
160
161 static unsigned long            trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
162
163 /* trace_types holds a link list of available tracers. */
164 static struct tracer            *trace_types __read_mostly;
165
166 /* current_trace points to the tracer that is currently active */
167 static struct tracer            *current_trace __read_mostly;
168
169 /*
170  * max_tracer_type_len is used to simplify the allocating of
171  * buffers to read userspace tracer names. We keep track of
172  * the longest tracer name registered.
173  */
174 static int                      max_tracer_type_len;
175
176 /*
177  * trace_types_lock is used to protect the trace_types list.
178  * This lock is also used to keep user access serialized.
179  * Accesses from userspace will grab this lock while userspace
180  * activities happen inside the kernel.
181  */
182 static DEFINE_MUTEX(trace_types_lock);
183
184 /* trace_wait is a waitqueue for tasks blocked on trace_poll */
185 static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
186
187 /* trace_flags holds iter_ctrl options */
188 unsigned long trace_flags = TRACE_ITER_PRINT_PARENT;
189
190 /**
191  * trace_wake_up - wake up tasks waiting for trace input
192  *
193  * Simply wakes up any task that is blocked on the trace_wait
194  * queue. These is used with trace_poll for tasks polling the trace.
195  */
196 void trace_wake_up(void)
197 {
198         /*
199          * The runqueue_is_locked() can fail, but this is the best we
200          * have for now:
201          */
202         if (!(trace_flags & TRACE_ITER_BLOCK) && !runqueue_is_locked())
203                 wake_up(&trace_wait);
204 }
205
206 static int __init set_buf_size(char *str)
207 {
208         unsigned long buf_size;
209         int ret;
210
211         if (!str)
212                 return 0;
213         ret = strict_strtoul(str, 0, &buf_size);
214         /* nr_entries can not be zero */
215         if (ret < 0 || buf_size == 0)
216                 return 0;
217         trace_buf_size = buf_size;
218         return 1;
219 }
220 __setup("trace_buf_size=", set_buf_size);
221
222 unsigned long nsecs_to_usecs(unsigned long nsecs)
223 {
224         return nsecs / 1000;
225 }
226
227 /*
228  * TRACE_ITER_SYM_MASK masks the options in trace_flags that
229  * control the output of kernel symbols.
230  */
231 #define TRACE_ITER_SYM_MASK \
232         (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
233
234 /* These must match the bit postions in trace_iterator_flags */
235 static const char *trace_options[] = {
236         "print-parent",
237         "sym-offset",
238         "sym-addr",
239         "verbose",
240         "raw",
241         "hex",
242         "bin",
243         "block",
244         "stacktrace",
245         "sched-tree",
246         "ftrace_printk",
247         "ftrace_preempt",
248         NULL
249 };
250
251 /*
252  * ftrace_max_lock is used to protect the swapping of buffers
253  * when taking a max snapshot. The buffers themselves are
254  * protected by per_cpu spinlocks. But the action of the swap
255  * needs its own lock.
256  *
257  * This is defined as a raw_spinlock_t in order to help
258  * with performance when lockdep debugging is enabled.
259  */
260 static raw_spinlock_t ftrace_max_lock =
261         (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
262
263 /*
264  * Copy the new maximum trace into the separate maximum-trace
265  * structure. (this way the maximum trace is permanently saved,
266  * for later retrieval via /debugfs/tracing/latency_trace)
267  */
268 static void
269 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
270 {
271         struct trace_array_cpu *data = tr->data[cpu];
272
273         max_tr.cpu = cpu;
274         max_tr.time_start = data->preempt_timestamp;
275
276         data = max_tr.data[cpu];
277         data->saved_latency = tracing_max_latency;
278
279         memcpy(data->comm, tsk->comm, TASK_COMM_LEN);
280         data->pid = tsk->pid;
281         data->uid = tsk->uid;
282         data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
283         data->policy = tsk->policy;
284         data->rt_priority = tsk->rt_priority;
285
286         /* record this tasks comm */
287         tracing_record_cmdline(current);
288 }
289
290 /**
291  * trace_seq_printf - sequence printing of trace information
292  * @s: trace sequence descriptor
293  * @fmt: printf format string
294  *
295  * The tracer may use either sequence operations or its own
296  * copy to user routines. To simplify formating of a trace
297  * trace_seq_printf is used to store strings into a special
298  * buffer (@s). Then the output may be either used by
299  * the sequencer or pulled into another buffer.
300  */
301 int
302 trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
303 {
304         int len = (PAGE_SIZE - 1) - s->len;
305         va_list ap;
306         int ret;
307
308         if (!len)
309                 return 0;
310
311         va_start(ap, fmt);
312         ret = vsnprintf(s->buffer + s->len, len, fmt, ap);
313         va_end(ap);
314
315         /* If we can't write it all, don't bother writing anything */
316         if (ret >= len)
317                 return 0;
318
319         s->len += ret;
320
321         return len;
322 }
323
324 /**
325  * trace_seq_puts - trace sequence printing of simple string
326  * @s: trace sequence descriptor
327  * @str: simple string to record
328  *
329  * The tracer may use either the sequence operations or its own
330  * copy to user routines. This function records a simple string
331  * into a special buffer (@s) for later retrieval by a sequencer
332  * or other mechanism.
333  */
334 static int
335 trace_seq_puts(struct trace_seq *s, const char *str)
336 {
337         int len = strlen(str);
338
339         if (len > ((PAGE_SIZE - 1) - s->len))
340                 return 0;
341
342         memcpy(s->buffer + s->len, str, len);
343         s->len += len;
344
345         return len;
346 }
347
348 static int
349 trace_seq_putc(struct trace_seq *s, unsigned char c)
350 {
351         if (s->len >= (PAGE_SIZE - 1))
352                 return 0;
353
354         s->buffer[s->len++] = c;
355
356         return 1;
357 }
358
359 static int
360 trace_seq_putmem(struct trace_seq *s, void *mem, size_t len)
361 {
362         if (len > ((PAGE_SIZE - 1) - s->len))
363                 return 0;
364
365         memcpy(s->buffer + s->len, mem, len);
366         s->len += len;
367
368         return len;
369 }
370
371 #define MAX_MEMHEX_BYTES        8
372 #define HEX_CHARS               (MAX_MEMHEX_BYTES*2 + 1)
373
374 static int
375 trace_seq_putmem_hex(struct trace_seq *s, void *mem, size_t len)
376 {
377         unsigned char hex[HEX_CHARS];
378         unsigned char *data = mem;
379         int i, j;
380
381 #ifdef __BIG_ENDIAN
382         for (i = 0, j = 0; i < len; i++) {
383 #else
384         for (i = len-1, j = 0; i >= 0; i--) {
385 #endif
386                 hex[j++] = hex_asc_hi(data[i]);
387                 hex[j++] = hex_asc_lo(data[i]);
388         }
389         hex[j++] = ' ';
390
391         return trace_seq_putmem(s, hex, j);
392 }
393
394 static void
395 trace_seq_reset(struct trace_seq *s)
396 {
397         s->len = 0;
398         s->readpos = 0;
399 }
400
401 ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
402 {
403         int len;
404         int ret;
405
406         if (s->len <= s->readpos)
407                 return -EBUSY;
408
409         len = s->len - s->readpos;
410         if (cnt > len)
411                 cnt = len;
412         ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
413         if (ret)
414                 return -EFAULT;
415
416         s->readpos += len;
417         return cnt;
418 }
419
420 static void
421 trace_print_seq(struct seq_file *m, struct trace_seq *s)
422 {
423         int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len;
424
425         s->buffer[len] = 0;
426         seq_puts(m, s->buffer);
427
428         trace_seq_reset(s);
429 }
430
431 /**
432  * update_max_tr - snapshot all trace buffers from global_trace to max_tr
433  * @tr: tracer
434  * @tsk: the task with the latency
435  * @cpu: The cpu that initiated the trace.
436  *
437  * Flip the buffers between the @tr and the max_tr and record information
438  * about which task was the cause of this latency.
439  */
440 void
441 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
442 {
443         struct ring_buffer *buf = tr->buffer;
444
445         WARN_ON_ONCE(!irqs_disabled());
446         __raw_spin_lock(&ftrace_max_lock);
447
448         tr->buffer = max_tr.buffer;
449         max_tr.buffer = buf;
450
451         ftrace_disable_cpu();
452         ring_buffer_reset(tr->buffer);
453         ftrace_enable_cpu();
454
455         __update_max_tr(tr, tsk, cpu);
456         __raw_spin_unlock(&ftrace_max_lock);
457 }
458
459 /**
460  * update_max_tr_single - only copy one trace over, and reset the rest
461  * @tr - tracer
462  * @tsk - task with the latency
463  * @cpu - the cpu of the buffer to copy.
464  *
465  * Flip the trace of a single CPU buffer between the @tr and the max_tr.
466  */
467 void
468 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
469 {
470         int ret;
471
472         WARN_ON_ONCE(!irqs_disabled());
473         __raw_spin_lock(&ftrace_max_lock);
474
475         ftrace_disable_cpu();
476
477         ring_buffer_reset(max_tr.buffer);
478         ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu);
479
480         ftrace_enable_cpu();
481
482         WARN_ON_ONCE(ret);
483
484         __update_max_tr(tr, tsk, cpu);
485         __raw_spin_unlock(&ftrace_max_lock);
486 }
487
488 /**
489  * register_tracer - register a tracer with the ftrace system.
490  * @type - the plugin for the tracer
491  *
492  * Register a new plugin tracer.
493  */
494 int register_tracer(struct tracer *type)
495 {
496         struct tracer *t;
497         int len;
498         int ret = 0;
499
500         if (!type->name) {
501                 pr_info("Tracer must have a name\n");
502                 return -1;
503         }
504
505         mutex_lock(&trace_types_lock);
506         for (t = trace_types; t; t = t->next) {
507                 if (strcmp(type->name, t->name) == 0) {
508                         /* already found */
509                         pr_info("Trace %s already registered\n",
510                                 type->name);
511                         ret = -1;
512                         goto out;
513                 }
514         }
515
516 #ifdef CONFIG_FTRACE_STARTUP_TEST
517         if (type->selftest) {
518                 struct tracer *saved_tracer = current_trace;
519                 struct trace_array *tr = &global_trace;
520                 int saved_ctrl = tr->ctrl;
521                 int i;
522                 /*
523                  * Run a selftest on this tracer.
524                  * Here we reset the trace buffer, and set the current
525                  * tracer to be this tracer. The tracer can then run some
526                  * internal tracing to verify that everything is in order.
527                  * If we fail, we do not register this tracer.
528                  */
529                 for_each_tracing_cpu(i) {
530                         tracing_reset(tr, i);
531                 }
532                 current_trace = type;
533                 tr->ctrl = 0;
534                 /* the test is responsible for initializing and enabling */
535                 pr_info("Testing tracer %s: ", type->name);
536                 ret = type->selftest(type, tr);
537                 /* the test is responsible for resetting too */
538                 current_trace = saved_tracer;
539                 tr->ctrl = saved_ctrl;
540                 if (ret) {
541                         printk(KERN_CONT "FAILED!\n");
542                         goto out;
543                 }
544                 /* Only reset on passing, to avoid touching corrupted buffers */
545                 for_each_tracing_cpu(i) {
546                         tracing_reset(tr, i);
547                 }
548                 printk(KERN_CONT "PASSED\n");
549         }
550 #endif
551
552         type->next = trace_types;
553         trace_types = type;
554         len = strlen(type->name);
555         if (len > max_tracer_type_len)
556                 max_tracer_type_len = len;
557
558  out:
559         mutex_unlock(&trace_types_lock);
560
561         return ret;
562 }
563
564 void unregister_tracer(struct tracer *type)
565 {
566         struct tracer **t;
567         int len;
568
569         mutex_lock(&trace_types_lock);
570         for (t = &trace_types; *t; t = &(*t)->next) {
571                 if (*t == type)
572                         goto found;
573         }
574         pr_info("Trace %s not registered\n", type->name);
575         goto out;
576
577  found:
578         *t = (*t)->next;
579         if (strlen(type->name) != max_tracer_type_len)
580                 goto out;
581
582         max_tracer_type_len = 0;
583         for (t = &trace_types; *t; t = &(*t)->next) {
584                 len = strlen((*t)->name);
585                 if (len > max_tracer_type_len)
586                         max_tracer_type_len = len;
587         }
588  out:
589         mutex_unlock(&trace_types_lock);
590 }
591
592 void tracing_reset(struct trace_array *tr, int cpu)
593 {
594         ftrace_disable_cpu();
595         ring_buffer_reset_cpu(tr->buffer, cpu);
596         ftrace_enable_cpu();
597 }
598
599 #define SAVED_CMDLINES 128
600 static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
601 static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
602 static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
603 static int cmdline_idx;
604 static DEFINE_SPINLOCK(trace_cmdline_lock);
605
606 /* temporary disable recording */
607 atomic_t trace_record_cmdline_disabled __read_mostly;
608
609 static void trace_init_cmdlines(void)
610 {
611         memset(&map_pid_to_cmdline, -1, sizeof(map_pid_to_cmdline));
612         memset(&map_cmdline_to_pid, -1, sizeof(map_cmdline_to_pid));
613         cmdline_idx = 0;
614 }
615
616 void trace_stop_cmdline_recording(void);
617
618 static void trace_save_cmdline(struct task_struct *tsk)
619 {
620         unsigned map;
621         unsigned idx;
622
623         if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
624                 return;
625
626         /*
627          * It's not the end of the world if we don't get
628          * the lock, but we also don't want to spin
629          * nor do we want to disable interrupts,
630          * so if we miss here, then better luck next time.
631          */
632         if (!spin_trylock(&trace_cmdline_lock))
633                 return;
634
635         idx = map_pid_to_cmdline[tsk->pid];
636         if (idx >= SAVED_CMDLINES) {
637                 idx = (cmdline_idx + 1) % SAVED_CMDLINES;
638
639                 map = map_cmdline_to_pid[idx];
640                 if (map <= PID_MAX_DEFAULT)
641                         map_pid_to_cmdline[map] = (unsigned)-1;
642
643                 map_pid_to_cmdline[tsk->pid] = idx;
644
645                 cmdline_idx = idx;
646         }
647
648         memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
649
650         spin_unlock(&trace_cmdline_lock);
651 }
652
653 static char *trace_find_cmdline(int pid)
654 {
655         char *cmdline = "<...>";
656         unsigned map;
657
658         if (!pid)
659                 return "<idle>";
660
661         if (pid > PID_MAX_DEFAULT)
662                 goto out;
663
664         map = map_pid_to_cmdline[pid];
665         if (map >= SAVED_CMDLINES)
666                 goto out;
667
668         cmdline = saved_cmdlines[map];
669
670  out:
671         return cmdline;
672 }
673
674 void tracing_record_cmdline(struct task_struct *tsk)
675 {
676         if (atomic_read(&trace_record_cmdline_disabled))
677                 return;
678
679         trace_save_cmdline(tsk);
680 }
681
682 void
683 tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
684                              int pc)
685 {
686         struct task_struct *tsk = current;
687
688         entry->preempt_count            = pc & 0xff;
689         entry->pid                      = (tsk) ? tsk->pid : 0;
690         entry->flags =
691 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
692                 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
693 #else
694                 TRACE_FLAG_IRQS_NOSUPPORT |
695 #endif
696                 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
697                 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
698                 (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
699 }
700
701 void
702 trace_function(struct trace_array *tr, struct trace_array_cpu *data,
703                unsigned long ip, unsigned long parent_ip, unsigned long flags,
704                int pc)
705 {
706         struct ring_buffer_event *event;
707         struct ftrace_entry *entry;
708         unsigned long irq_flags;
709
710         /* If we are reading the ring buffer, don't trace */
711         if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
712                 return;
713
714         event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
715                                          &irq_flags);
716         if (!event)
717                 return;
718         entry   = ring_buffer_event_data(event);
719         tracing_generic_entry_update(&entry->ent, flags, pc);
720         entry->ent.type                 = TRACE_FN;
721         entry->ip                       = ip;
722         entry->parent_ip                = parent_ip;
723         ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
724 }
725
726 void
727 ftrace(struct trace_array *tr, struct trace_array_cpu *data,
728        unsigned long ip, unsigned long parent_ip, unsigned long flags,
729        int pc)
730 {
731         if (likely(!atomic_read(&data->disabled)))
732                 trace_function(tr, data, ip, parent_ip, flags, pc);
733 }
734
735 static void ftrace_trace_stack(struct trace_array *tr,
736                                struct trace_array_cpu *data,
737                                unsigned long flags,
738                                int skip, int pc)
739 {
740 #ifdef CONFIG_STACKTRACE
741         struct ring_buffer_event *event;
742         struct stack_entry *entry;
743         struct stack_trace trace;
744         unsigned long irq_flags;
745
746         if (!(trace_flags & TRACE_ITER_STACKTRACE))
747                 return;
748
749         event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
750                                          &irq_flags);
751         if (!event)
752                 return;
753         entry   = ring_buffer_event_data(event);
754         tracing_generic_entry_update(&entry->ent, flags, pc);
755         entry->ent.type         = TRACE_STACK;
756
757         memset(&entry->caller, 0, sizeof(entry->caller));
758
759         trace.nr_entries        = 0;
760         trace.max_entries       = FTRACE_STACK_ENTRIES;
761         trace.skip              = skip;
762         trace.entries           = entry->caller;
763
764         save_stack_trace(&trace);
765         ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
766 #endif
767 }
768
769 void __trace_stack(struct trace_array *tr,
770                    struct trace_array_cpu *data,
771                    unsigned long flags,
772                    int skip)
773 {
774         ftrace_trace_stack(tr, data, flags, skip, preempt_count());
775 }
776
777 static void
778 ftrace_trace_special(void *__tr, void *__data,
779                      unsigned long arg1, unsigned long arg2, unsigned long arg3,
780                      int pc)
781 {
782         struct ring_buffer_event *event;
783         struct trace_array_cpu *data = __data;
784         struct trace_array *tr = __tr;
785         struct special_entry *entry;
786         unsigned long irq_flags;
787
788         event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
789                                          &irq_flags);
790         if (!event)
791                 return;
792         entry   = ring_buffer_event_data(event);
793         tracing_generic_entry_update(&entry->ent, 0, pc);
794         entry->ent.type                 = TRACE_SPECIAL;
795         entry->arg1                     = arg1;
796         entry->arg2                     = arg2;
797         entry->arg3                     = arg3;
798         ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
799         ftrace_trace_stack(tr, data, irq_flags, 4, pc);
800
801         trace_wake_up();
802 }
803
804 void
805 __trace_special(void *__tr, void *__data,
806                 unsigned long arg1, unsigned long arg2, unsigned long arg3)
807 {
808         ftrace_trace_special(__tr, __data, arg1, arg2, arg3, preempt_count());
809 }
810
811 void
812 tracing_sched_switch_trace(struct trace_array *tr,
813                            struct trace_array_cpu *data,
814                            struct task_struct *prev,
815                            struct task_struct *next,
816                            unsigned long flags, int pc)
817 {
818         struct ring_buffer_event *event;
819         struct ctx_switch_entry *entry;
820         unsigned long irq_flags;
821
822         event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
823                                            &irq_flags);
824         if (!event)
825                 return;
826         entry   = ring_buffer_event_data(event);
827         tracing_generic_entry_update(&entry->ent, flags, pc);
828         entry->ent.type                 = TRACE_CTX;
829         entry->prev_pid                 = prev->pid;
830         entry->prev_prio                = prev->prio;
831         entry->prev_state               = prev->state;
832         entry->next_pid                 = next->pid;
833         entry->next_prio                = next->prio;
834         entry->next_state               = next->state;
835         entry->next_cpu = task_cpu(next);
836         ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
837         ftrace_trace_stack(tr, data, flags, 5, pc);
838 }
839
840 void
841 tracing_sched_wakeup_trace(struct trace_array *tr,
842                            struct trace_array_cpu *data,
843                            struct task_struct *wakee,
844                            struct task_struct *curr,
845                            unsigned long flags, int pc)
846 {
847         struct ring_buffer_event *event;
848         struct ctx_switch_entry *entry;
849         unsigned long irq_flags;
850
851         event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
852                                            &irq_flags);
853         if (!event)
854                 return;
855         entry   = ring_buffer_event_data(event);
856         tracing_generic_entry_update(&entry->ent, flags, pc);
857         entry->ent.type                 = TRACE_WAKE;
858         entry->prev_pid                 = curr->pid;
859         entry->prev_prio                = curr->prio;
860         entry->prev_state               = curr->state;
861         entry->next_pid                 = wakee->pid;
862         entry->next_prio                = wakee->prio;
863         entry->next_state               = wakee->state;
864         entry->next_cpu                 = task_cpu(wakee);
865         ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
866         ftrace_trace_stack(tr, data, flags, 6, pc);
867
868         trace_wake_up();
869 }
870
871 void
872 ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
873 {
874         struct trace_array *tr = &global_trace;
875         struct trace_array_cpu *data;
876         int cpu;
877         int pc;
878
879         if (tracing_disabled || !tr->ctrl)
880                 return;
881
882         pc = preempt_count();
883         preempt_disable_notrace();
884         cpu = raw_smp_processor_id();
885         data = tr->data[cpu];
886
887         if (likely(!atomic_read(&data->disabled)))
888                 ftrace_trace_special(tr, data, arg1, arg2, arg3, pc);
889
890         preempt_enable_notrace();
891 }
892
893 #ifdef CONFIG_FUNCTION_TRACER
894 static void
895 function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
896 {
897         struct trace_array *tr = &global_trace;
898         struct trace_array_cpu *data;
899         unsigned long flags;
900         long disabled;
901         int cpu, resched;
902         int pc;
903
904         if (unlikely(!ftrace_function_enabled))
905                 return;
906
907         pc = preempt_count();
908         resched = ftrace_preempt_disable();
909         local_save_flags(flags);
910         cpu = raw_smp_processor_id();
911         data = tr->data[cpu];
912         disabled = atomic_inc_return(&data->disabled);
913
914         if (likely(disabled == 1))
915                 trace_function(tr, data, ip, parent_ip, flags, pc);
916
917         atomic_dec(&data->disabled);
918         ftrace_preempt_enable(resched);
919 }
920
921 static void
922 function_trace_call(unsigned long ip, unsigned long parent_ip)
923 {
924         struct trace_array *tr = &global_trace;
925         struct trace_array_cpu *data;
926         unsigned long flags;
927         long disabled;
928         int cpu;
929         int pc;
930
931         if (unlikely(!ftrace_function_enabled))
932                 return;
933
934         /*
935          * Need to use raw, since this must be called before the
936          * recursive protection is performed.
937          */
938         raw_local_irq_save(flags);
939         cpu = raw_smp_processor_id();
940         data = tr->data[cpu];
941         disabled = atomic_inc_return(&data->disabled);
942
943         if (likely(disabled == 1)) {
944                 pc = preempt_count();
945                 trace_function(tr, data, ip, parent_ip, flags, pc);
946         }
947
948         atomic_dec(&data->disabled);
949         raw_local_irq_restore(flags);
950 }
951
952 static struct ftrace_ops trace_ops __read_mostly =
953 {
954         .func = function_trace_call,
955 };
956
957 void tracing_start_function_trace(void)
958 {
959         ftrace_function_enabled = 0;
960
961         if (trace_flags & TRACE_ITER_PREEMPTONLY)
962                 trace_ops.func = function_trace_call_preempt_only;
963         else
964                 trace_ops.func = function_trace_call;
965
966         register_ftrace_function(&trace_ops);
967         if (tracer_enabled)
968                 ftrace_function_enabled = 1;
969 }
970
971 void tracing_stop_function_trace(void)
972 {
973         ftrace_function_enabled = 0;
974         unregister_ftrace_function(&trace_ops);
975 }
976 #endif
977
978 enum trace_file_type {
979         TRACE_FILE_LAT_FMT      = 1,
980 };
981
982 static void trace_iterator_increment(struct trace_iterator *iter, int cpu)
983 {
984         /* Don't allow ftrace to trace into the ring buffers */
985         ftrace_disable_cpu();
986
987         iter->idx++;
988         if (iter->buffer_iter[iter->cpu])
989                 ring_buffer_read(iter->buffer_iter[iter->cpu], NULL);
990
991         ftrace_enable_cpu();
992 }
993
994 static struct trace_entry *
995 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts)
996 {
997         struct ring_buffer_event *event;
998         struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu];
999
1000         /* Don't allow ftrace to trace into the ring buffers */
1001         ftrace_disable_cpu();
1002
1003         if (buf_iter)
1004                 event = ring_buffer_iter_peek(buf_iter, ts);
1005         else
1006                 event = ring_buffer_peek(iter->tr->buffer, cpu, ts);
1007
1008         ftrace_enable_cpu();
1009
1010         return event ? ring_buffer_event_data(event) : NULL;
1011 }
1012
1013 static struct trace_entry *
1014 __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts)
1015 {
1016         struct ring_buffer *buffer = iter->tr->buffer;
1017         struct trace_entry *ent, *next = NULL;
1018         u64 next_ts = 0, ts;
1019         int next_cpu = -1;
1020         int cpu;
1021
1022         for_each_tracing_cpu(cpu) {
1023
1024                 if (ring_buffer_empty_cpu(buffer, cpu))
1025                         continue;
1026
1027                 ent = peek_next_entry(iter, cpu, &ts);
1028
1029                 /*
1030                  * Pick the entry with the smallest timestamp:
1031                  */
1032                 if (ent && (!next || ts < next_ts)) {
1033                         next = ent;
1034                         next_cpu = cpu;
1035                         next_ts = ts;
1036                 }
1037         }
1038
1039         if (ent_cpu)
1040                 *ent_cpu = next_cpu;
1041
1042         if (ent_ts)
1043                 *ent_ts = next_ts;
1044
1045         return next;
1046 }
1047
1048 /* Find the next real entry, without updating the iterator itself */
1049 static struct trace_entry *
1050 find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts)
1051 {
1052         return __find_next_entry(iter, ent_cpu, ent_ts);
1053 }
1054
1055 /* Find the next real entry, and increment the iterator to the next entry */
1056 static void *find_next_entry_inc(struct trace_iterator *iter)
1057 {
1058         iter->ent = __find_next_entry(iter, &iter->cpu, &iter->ts);
1059
1060         if (iter->ent)
1061                 trace_iterator_increment(iter, iter->cpu);
1062
1063         return iter->ent ? iter : NULL;
1064 }
1065
1066 static void trace_consume(struct trace_iterator *iter)
1067 {
1068         /* Don't allow ftrace to trace into the ring buffers */
1069         ftrace_disable_cpu();
1070         ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts);
1071         ftrace_enable_cpu();
1072 }
1073
1074 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
1075 {
1076         struct trace_iterator *iter = m->private;
1077         int i = (int)*pos;
1078         void *ent;
1079
1080         (*pos)++;
1081
1082         /* can't go backwards */
1083         if (iter->idx > i)
1084                 return NULL;
1085
1086         if (iter->idx < 0)
1087                 ent = find_next_entry_inc(iter);
1088         else
1089                 ent = iter;
1090
1091         while (ent && iter->idx < i)
1092                 ent = find_next_entry_inc(iter);
1093
1094         iter->pos = *pos;
1095
1096         return ent;
1097 }
1098
1099 static void *s_start(struct seq_file *m, loff_t *pos)
1100 {
1101         struct trace_iterator *iter = m->private;
1102         void *p = NULL;
1103         loff_t l = 0;
1104         int cpu;
1105
1106         mutex_lock(&trace_types_lock);
1107
1108         if (!current_trace || current_trace != iter->trace) {
1109                 mutex_unlock(&trace_types_lock);
1110                 return NULL;
1111         }
1112
1113         atomic_inc(&trace_record_cmdline_disabled);
1114
1115         /* let the tracer grab locks here if needed */
1116         if (current_trace->start)
1117                 current_trace->start(iter);
1118
1119         if (*pos != iter->pos) {
1120                 iter->ent = NULL;
1121                 iter->cpu = 0;
1122                 iter->idx = -1;
1123
1124                 ftrace_disable_cpu();
1125
1126                 for_each_tracing_cpu(cpu) {
1127                         ring_buffer_iter_reset(iter->buffer_iter[cpu]);
1128                 }
1129
1130                 ftrace_enable_cpu();
1131
1132                 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
1133                         ;
1134
1135         } else {
1136                 l = *pos - 1;
1137                 p = s_next(m, p, &l);
1138         }
1139
1140         return p;
1141 }
1142
1143 static void s_stop(struct seq_file *m, void *p)
1144 {
1145         struct trace_iterator *iter = m->private;
1146
1147         atomic_dec(&trace_record_cmdline_disabled);
1148
1149         /* let the tracer release locks here if needed */
1150         if (current_trace && current_trace == iter->trace && iter->trace->stop)
1151                 iter->trace->stop(iter);
1152
1153         mutex_unlock(&trace_types_lock);
1154 }
1155
1156 #define KRETPROBE_MSG "[unknown/kretprobe'd]"
1157
1158 #ifdef CONFIG_KRETPROBES
1159 static inline int kretprobed(unsigned long addr)
1160 {
1161         return addr == (unsigned long)kretprobe_trampoline;
1162 }
1163 #else
1164 static inline int kretprobed(unsigned long addr)
1165 {
1166         return 0;
1167 }
1168 #endif /* CONFIG_KRETPROBES */
1169
1170 static int
1171 seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address)
1172 {
1173 #ifdef CONFIG_KALLSYMS
1174         char str[KSYM_SYMBOL_LEN];
1175
1176         kallsyms_lookup(address, NULL, NULL, NULL, str);
1177
1178         return trace_seq_printf(s, fmt, str);
1179 #endif
1180         return 1;
1181 }
1182
1183 static int
1184 seq_print_sym_offset(struct trace_seq *s, const char *fmt,
1185                      unsigned long address)
1186 {
1187 #ifdef CONFIG_KALLSYMS
1188         char str[KSYM_SYMBOL_LEN];
1189
1190         sprint_symbol(str, address);
1191         return trace_seq_printf(s, fmt, str);
1192 #endif
1193         return 1;
1194 }
1195
1196 #ifndef CONFIG_64BIT
1197 # define IP_FMT "%08lx"
1198 #else
1199 # define IP_FMT "%016lx"
1200 #endif
1201
1202 static int
1203 seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
1204 {
1205         int ret;
1206
1207         if (!ip)
1208                 return trace_seq_printf(s, "0");
1209
1210         if (sym_flags & TRACE_ITER_SYM_OFFSET)
1211                 ret = seq_print_sym_offset(s, "%s", ip);
1212         else
1213                 ret = seq_print_sym_short(s, "%s", ip);
1214
1215         if (!ret)
1216                 return 0;
1217
1218         if (sym_flags & TRACE_ITER_SYM_ADDR)
1219                 ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
1220         return ret;
1221 }
1222
1223 static void print_lat_help_header(struct seq_file *m)
1224 {
1225         seq_puts(m, "#                  _------=> CPU#            \n");
1226         seq_puts(m, "#                 / _-----=> irqs-off        \n");
1227         seq_puts(m, "#                | / _----=> need-resched    \n");
1228         seq_puts(m, "#                || / _---=> hardirq/softirq \n");
1229         seq_puts(m, "#                ||| / _--=> preempt-depth   \n");
1230         seq_puts(m, "#                |||| /                      \n");
1231         seq_puts(m, "#                |||||     delay             \n");
1232         seq_puts(m, "#  cmd     pid   ||||| time  |   caller      \n");
1233         seq_puts(m, "#     \\   /      |||||   \\   |   /           \n");
1234 }
1235
1236 static void print_func_help_header(struct seq_file *m)
1237 {
1238         seq_puts(m, "#           TASK-PID    CPU#    TIMESTAMP  FUNCTION\n");
1239         seq_puts(m, "#              | |       |          |         |\n");
1240 }
1241
1242
1243 static void
1244 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
1245 {
1246         unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
1247         struct trace_array *tr = iter->tr;
1248         struct trace_array_cpu *data = tr->data[tr->cpu];
1249         struct tracer *type = current_trace;
1250         unsigned long total;
1251         unsigned long entries;
1252         const char *name = "preemption";
1253
1254         if (type)
1255                 name = type->name;
1256
1257         entries = ring_buffer_entries(iter->tr->buffer);
1258         total = entries +
1259                 ring_buffer_overruns(iter->tr->buffer);
1260
1261         seq_printf(m, "%s latency trace v1.1.5 on %s\n",
1262                    name, UTS_RELEASE);
1263         seq_puts(m, "-----------------------------------"
1264                  "---------------------------------\n");
1265         seq_printf(m, " latency: %lu us, #%lu/%lu, CPU#%d |"
1266                    " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
1267                    nsecs_to_usecs(data->saved_latency),
1268                    entries,
1269                    total,
1270                    tr->cpu,
1271 #if defined(CONFIG_PREEMPT_NONE)
1272                    "server",
1273 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
1274                    "desktop",
1275 #elif defined(CONFIG_PREEMPT)
1276                    "preempt",
1277 #else
1278                    "unknown",
1279 #endif
1280                    /* These are reserved for later use */
1281                    0, 0, 0, 0);
1282 #ifdef CONFIG_SMP
1283         seq_printf(m, " #P:%d)\n", num_online_cpus());
1284 #else
1285         seq_puts(m, ")\n");
1286 #endif
1287         seq_puts(m, "    -----------------\n");
1288         seq_printf(m, "    | task: %.16s-%d "
1289                    "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
1290                    data->comm, data->pid, data->uid, data->nice,
1291                    data->policy, data->rt_priority);
1292         seq_puts(m, "    -----------------\n");
1293
1294         if (data->critical_start) {
1295                 seq_puts(m, " => started at: ");
1296                 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
1297                 trace_print_seq(m, &iter->seq);
1298                 seq_puts(m, "\n => ended at:   ");
1299                 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
1300                 trace_print_seq(m, &iter->seq);
1301                 seq_puts(m, "\n");
1302         }
1303
1304         seq_puts(m, "\n");
1305 }
1306
1307 static void
1308 lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
1309 {
1310         int hardirq, softirq;
1311         char *comm;
1312
1313         comm = trace_find_cmdline(entry->pid);
1314
1315         trace_seq_printf(s, "%8.8s-%-5d ", comm, entry->pid);
1316         trace_seq_printf(s, "%3d", cpu);
1317         trace_seq_printf(s, "%c%c",
1318                         (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' :
1319                          (entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ? 'X' : '.',
1320                         ((entry->flags & TRACE_FLAG_NEED_RESCHED) ? 'N' : '.'));
1321
1322         hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
1323         softirq = entry->flags & TRACE_FLAG_SOFTIRQ;
1324         if (hardirq && softirq) {
1325                 trace_seq_putc(s, 'H');
1326         } else {
1327                 if (hardirq) {
1328                         trace_seq_putc(s, 'h');
1329                 } else {
1330                         if (softirq)
1331                                 trace_seq_putc(s, 's');
1332                         else
1333                                 trace_seq_putc(s, '.');
1334                 }
1335         }
1336
1337         if (entry->preempt_count)
1338                 trace_seq_printf(s, "%x", entry->preempt_count);
1339         else
1340                 trace_seq_puts(s, ".");
1341 }
1342
1343 unsigned long preempt_mark_thresh = 100;
1344
1345 static void
1346 lat_print_timestamp(struct trace_seq *s, u64 abs_usecs,
1347                     unsigned long rel_usecs)
1348 {
1349         trace_seq_printf(s, " %4lldus", abs_usecs);
1350         if (rel_usecs > preempt_mark_thresh)
1351                 trace_seq_puts(s, "!: ");
1352         else if (rel_usecs > 1)
1353                 trace_seq_puts(s, "+: ");
1354         else
1355                 trace_seq_puts(s, " : ");
1356 }
1357
1358 static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
1359
1360 /*
1361  * The message is supposed to contain an ending newline.
1362  * If the printing stops prematurely, try to add a newline of our own.
1363  */
1364 void trace_seq_print_cont(struct trace_seq *s, struct trace_iterator *iter)
1365 {
1366         struct trace_entry *ent;
1367         struct trace_field_cont *cont;
1368         bool ok = true;
1369
1370         ent = peek_next_entry(iter, iter->cpu, NULL);
1371         if (!ent || ent->type != TRACE_CONT) {
1372                 trace_seq_putc(s, '\n');
1373                 return;
1374         }
1375
1376         do {
1377                 cont = (struct trace_field_cont *)ent;
1378                 if (ok)
1379                         ok = (trace_seq_printf(s, "%s", cont->buf) > 0);
1380
1381                 ftrace_disable_cpu();
1382
1383                 if (iter->buffer_iter[iter->cpu])
1384                         ring_buffer_read(iter->buffer_iter[iter->cpu], NULL);
1385                 else
1386                         ring_buffer_consume(iter->tr->buffer, iter->cpu, NULL);
1387
1388                 ftrace_enable_cpu();
1389
1390                 ent = peek_next_entry(iter, iter->cpu, NULL);
1391         } while (ent && ent->type == TRACE_CONT);
1392
1393         if (!ok)
1394                 trace_seq_putc(s, '\n');
1395 }
1396
1397 static enum print_line_t
1398 print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
1399 {
1400         struct trace_seq *s = &iter->seq;
1401         unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
1402         struct trace_entry *next_entry;
1403         unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE);
1404         struct trace_entry *entry = iter->ent;
1405         unsigned long abs_usecs;
1406         unsigned long rel_usecs;
1407         u64 next_ts;
1408         char *comm;
1409         int S, T;
1410         int i;
1411         unsigned state;
1412
1413         if (entry->type == TRACE_CONT)
1414                 return TRACE_TYPE_HANDLED;
1415
1416         next_entry = find_next_entry(iter, NULL, &next_ts);
1417         if (!next_entry)
1418                 next_ts = iter->ts;
1419         rel_usecs = ns2usecs(next_ts - iter->ts);
1420         abs_usecs = ns2usecs(iter->ts - iter->tr->time_start);
1421
1422         if (verbose) {
1423                 comm = trace_find_cmdline(entry->pid);
1424                 trace_seq_printf(s, "%16s %5d %3d %d %08x %08x [%08lx]"
1425                                  " %ld.%03ldms (+%ld.%03ldms): ",
1426                                  comm,
1427                                  entry->pid, cpu, entry->flags,
1428                                  entry->preempt_count, trace_idx,
1429                                  ns2usecs(iter->ts),
1430                                  abs_usecs/1000,
1431                                  abs_usecs % 1000, rel_usecs/1000,
1432                                  rel_usecs % 1000);
1433         } else {
1434                 lat_print_generic(s, entry, cpu);
1435                 lat_print_timestamp(s, abs_usecs, rel_usecs);
1436         }
1437         switch (entry->type) {
1438         case TRACE_FN: {
1439                 struct ftrace_entry *field;
1440
1441                 trace_assign_type(field, entry);
1442
1443                 seq_print_ip_sym(s, field->ip, sym_flags);
1444                 trace_seq_puts(s, " (");
1445                 if (kretprobed(field->parent_ip))
1446                         trace_seq_puts(s, KRETPROBE_MSG);
1447                 else
1448                         seq_print_ip_sym(s, field->parent_ip, sym_flags);
1449                 trace_seq_puts(s, ")\n");
1450                 break;
1451         }
1452         case TRACE_CTX:
1453         case TRACE_WAKE: {
1454                 struct ctx_switch_entry *field;
1455
1456                 trace_assign_type(field, entry);
1457
1458                 T = field->next_state < sizeof(state_to_char) ?
1459                         state_to_char[field->next_state] : 'X';
1460
1461                 state = field->prev_state ?
1462                         __ffs(field->prev_state) + 1 : 0;
1463                 S = state < sizeof(state_to_char) - 1 ? state_to_char[state] : 'X';
1464                 comm = trace_find_cmdline(field->next_pid);
1465                 trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n",
1466                                  field->prev_pid,
1467                                  field->prev_prio,
1468                                  S, entry->type == TRACE_CTX ? "==>" : "  +",
1469                                  field->next_cpu,
1470                                  field->next_pid,
1471                                  field->next_prio,
1472                                  T, comm);
1473                 break;
1474         }
1475         case TRACE_SPECIAL: {
1476                 struct special_entry *field;
1477
1478                 trace_assign_type(field, entry);
1479
1480                 trace_seq_printf(s, "# %ld %ld %ld\n",
1481                                  field->arg1,
1482                                  field->arg2,
1483                                  field->arg3);
1484                 break;
1485         }
1486         case TRACE_STACK: {
1487                 struct stack_entry *field;
1488
1489                 trace_assign_type(field, entry);
1490
1491                 for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
1492                         if (i)
1493                                 trace_seq_puts(s, " <= ");
1494                         seq_print_ip_sym(s, field->caller[i], sym_flags);
1495                 }
1496                 trace_seq_puts(s, "\n");
1497                 break;
1498         }
1499         case TRACE_PRINT: {
1500                 struct print_entry *field;
1501
1502                 trace_assign_type(field, entry);
1503
1504                 seq_print_ip_sym(s, field->ip, sym_flags);
1505                 trace_seq_printf(s, ": %s", field->buf);
1506                 if (entry->flags & TRACE_FLAG_CONT)
1507                         trace_seq_print_cont(s, iter);
1508                 break;
1509         }
1510         default:
1511                 trace_seq_printf(s, "Unknown type %d\n", entry->type);
1512         }
1513         return TRACE_TYPE_HANDLED;
1514 }
1515
1516 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
1517 {
1518         struct trace_seq *s = &iter->seq;
1519         unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
1520         struct trace_entry *entry;
1521         unsigned long usec_rem;
1522         unsigned long long t;
1523         unsigned long secs;
1524         char *comm;
1525         int ret;
1526         int S, T;
1527         int i;
1528
1529         entry = iter->ent;
1530
1531         if (entry->type == TRACE_CONT)
1532                 return TRACE_TYPE_HANDLED;
1533
1534         comm = trace_find_cmdline(iter->ent->pid);
1535
1536         t = ns2usecs(iter->ts);
1537         usec_rem = do_div(t, 1000000ULL);
1538         secs = (unsigned long)t;
1539
1540         ret = trace_seq_printf(s, "%16s-%-5d ", comm, entry->pid);
1541         if (!ret)
1542                 return TRACE_TYPE_PARTIAL_LINE;
1543         ret = trace_seq_printf(s, "[%03d] ", iter->cpu);
1544         if (!ret)
1545                 return TRACE_TYPE_PARTIAL_LINE;
1546         ret = trace_seq_printf(s, "%5lu.%06lu: ", secs, usec_rem);
1547         if (!ret)
1548                 return TRACE_TYPE_PARTIAL_LINE;
1549
1550         switch (entry->type) {
1551         case TRACE_FN: {
1552                 struct ftrace_entry *field;
1553
1554                 trace_assign_type(field, entry);
1555
1556                 ret = seq_print_ip_sym(s, field->ip, sym_flags);
1557                 if (!ret)
1558                         return TRACE_TYPE_PARTIAL_LINE;
1559                 if ((sym_flags & TRACE_ITER_PRINT_PARENT) &&
1560                                                 field->parent_ip) {
1561                         ret = trace_seq_printf(s, " <-");
1562                         if (!ret)
1563                                 return TRACE_TYPE_PARTIAL_LINE;
1564                         if (kretprobed(field->parent_ip))
1565                                 ret = trace_seq_puts(s, KRETPROBE_MSG);
1566                         else
1567                                 ret = seq_print_ip_sym(s,
1568                                                        field->parent_ip,
1569                                                        sym_flags);
1570                         if (!ret)
1571                                 return TRACE_TYPE_PARTIAL_LINE;
1572                 }
1573                 ret = trace_seq_printf(s, "\n");
1574                 if (!ret)
1575                         return TRACE_TYPE_PARTIAL_LINE;
1576                 break;
1577         }
1578         case TRACE_CTX:
1579         case TRACE_WAKE: {
1580                 struct ctx_switch_entry *field;
1581
1582                 trace_assign_type(field, entry);
1583
1584                 S = field->prev_state < sizeof(state_to_char) ?
1585                         state_to_char[field->prev_state] : 'X';
1586                 T = field->next_state < sizeof(state_to_char) ?
1587                         state_to_char[field->next_state] : 'X';
1588                 ret = trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c\n",
1589                                        field->prev_pid,
1590                                        field->prev_prio,
1591                                        S,
1592                                        entry->type == TRACE_CTX ? "==>" : "  +",
1593                                        field->next_cpu,
1594                                        field->next_pid,
1595                                        field->next_prio,
1596                                        T);
1597                 if (!ret)
1598                         return TRACE_TYPE_PARTIAL_LINE;
1599                 break;
1600         }
1601         case TRACE_SPECIAL: {
1602                 struct special_entry *field;
1603
1604                 trace_assign_type(field, entry);
1605
1606                 ret = trace_seq_printf(s, "# %ld %ld %ld\n",
1607                                  field->arg1,
1608                                  field->arg2,
1609                                  field->arg3);
1610                 if (!ret)
1611                         return TRACE_TYPE_PARTIAL_LINE;
1612                 break;
1613         }
1614         case TRACE_STACK: {
1615                 struct stack_entry *field;
1616
1617                 trace_assign_type(field, entry);
1618
1619                 for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
1620                         if (i) {
1621                                 ret = trace_seq_puts(s, " <= ");
1622                                 if (!ret)
1623                                         return TRACE_TYPE_PARTIAL_LINE;
1624                         }
1625                         ret = seq_print_ip_sym(s, field->caller[i],
1626                                                sym_flags);
1627                         if (!ret)
1628                                 return TRACE_TYPE_PARTIAL_LINE;
1629                 }
1630                 ret = trace_seq_puts(s, "\n");
1631                 if (!ret)
1632                         return TRACE_TYPE_PARTIAL_LINE;
1633                 break;
1634         }
1635         case TRACE_PRINT: {
1636                 struct print_entry *field;
1637
1638                 trace_assign_type(field, entry);
1639
1640                 seq_print_ip_sym(s, field->ip, sym_flags);
1641                 trace_seq_printf(s, ": %s", field->buf);
1642                 if (entry->flags & TRACE_FLAG_CONT)
1643                         trace_seq_print_cont(s, iter);
1644                 break;
1645         }
1646         }
1647         return TRACE_TYPE_HANDLED;
1648 }
1649
1650 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
1651 {
1652         struct trace_seq *s = &iter->seq;
1653         struct trace_entry *entry;
1654         int ret;
1655         int S, T;
1656
1657         entry = iter->ent;
1658
1659         if (entry->type == TRACE_CONT)
1660                 return TRACE_TYPE_HANDLED;
1661
1662         ret = trace_seq_printf(s, "%d %d %llu ",
1663                 entry->pid, iter->cpu, iter->ts);
1664         if (!ret)
1665                 return TRACE_TYPE_PARTIAL_LINE;
1666
1667         switch (entry->type) {
1668         case TRACE_FN: {
1669                 struct ftrace_entry *field;
1670
1671                 trace_assign_type(field, entry);
1672
1673                 ret = trace_seq_printf(s, "%x %x\n",
1674                                         field->ip,
1675                                         field->parent_ip);
1676                 if (!ret)
1677                         return TRACE_TYPE_PARTIAL_LINE;
1678                 break;
1679         }
1680         case TRACE_CTX:
1681         case TRACE_WAKE: {
1682                 struct ctx_switch_entry *field;
1683
1684                 trace_assign_type(field, entry);
1685
1686                 S = field->prev_state < sizeof(state_to_char) ?
1687                         state_to_char[field->prev_state] : 'X';
1688                 T = field->next_state < sizeof(state_to_char) ?
1689                         state_to_char[field->next_state] : 'X';
1690                 if (entry->type == TRACE_WAKE)
1691                         S = '+';
1692                 ret = trace_seq_printf(s, "%d %d %c %d %d %d %c\n",
1693                                        field->prev_pid,
1694                                        field->prev_prio,
1695                                        S,
1696                                        field->next_cpu,
1697                                        field->next_pid,
1698                                        field->next_prio,
1699                                        T);
1700                 if (!ret)
1701                         return TRACE_TYPE_PARTIAL_LINE;
1702                 break;
1703         }
1704         case TRACE_SPECIAL:
1705         case TRACE_STACK: {
1706                 struct special_entry *field;
1707
1708                 trace_assign_type(field, entry);
1709
1710                 ret = trace_seq_printf(s, "# %ld %ld %ld\n",
1711                                  field->arg1,
1712                                  field->arg2,
1713                                  field->arg3);
1714                 if (!ret)
1715                         return TRACE_TYPE_PARTIAL_LINE;
1716                 break;
1717         }
1718         case TRACE_PRINT: {
1719                 struct print_entry *field;
1720
1721                 trace_assign_type(field, entry);
1722
1723                 trace_seq_printf(s, "# %lx %s", field->ip, field->buf);
1724                 if (entry->flags & TRACE_FLAG_CONT)
1725                         trace_seq_print_cont(s, iter);
1726                 break;
1727         }
1728         }
1729         return TRACE_TYPE_HANDLED;
1730 }
1731
1732 #define SEQ_PUT_FIELD_RET(s, x)                         \
1733 do {                                                    \
1734         if (!trace_seq_putmem(s, &(x), sizeof(x)))      \
1735                 return 0;                               \
1736 } while (0)
1737
1738 #define SEQ_PUT_HEX_FIELD_RET(s, x)                     \
1739 do {                                                    \
1740         BUILD_BUG_ON(sizeof(x) > MAX_MEMHEX_BYTES);     \
1741         if (!trace_seq_putmem_hex(s, &(x), sizeof(x)))  \
1742                 return 0;                               \
1743 } while (0)
1744
1745 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
1746 {
1747         struct trace_seq *s = &iter->seq;
1748         unsigned char newline = '\n';
1749         struct trace_entry *entry;
1750         int S, T;
1751
1752         entry = iter->ent;
1753
1754         if (entry->type == TRACE_CONT)
1755                 return TRACE_TYPE_HANDLED;
1756
1757         SEQ_PUT_HEX_FIELD_RET(s, entry->pid);
1758         SEQ_PUT_HEX_FIELD_RET(s, iter->cpu);
1759         SEQ_PUT_HEX_FIELD_RET(s, iter->ts);
1760
1761         switch (entry->type) {
1762         case TRACE_FN: {
1763                 struct ftrace_entry *field;
1764
1765                 trace_assign_type(field, entry);
1766
1767                 SEQ_PUT_HEX_FIELD_RET(s, field->ip);
1768                 SEQ_PUT_HEX_FIELD_RET(s, field->parent_ip);
1769                 break;
1770         }
1771         case TRACE_CTX:
1772         case TRACE_WAKE: {
1773                 struct ctx_switch_entry *field;
1774
1775                 trace_assign_type(field, entry);
1776
1777                 S = field->prev_state < sizeof(state_to_char) ?
1778                         state_to_char[field->prev_state] : 'X';
1779                 T = field->next_state < sizeof(state_to_char) ?
1780                         state_to_char[field->next_state] : 'X';
1781                 if (entry->type == TRACE_WAKE)
1782                         S = '+';
1783                 SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid);
1784                 SEQ_PUT_HEX_FIELD_RET(s, field->prev_prio);
1785                 SEQ_PUT_HEX_FIELD_RET(s, S);
1786                 SEQ_PUT_HEX_FIELD_RET(s, field->next_cpu);
1787                 SEQ_PUT_HEX_FIELD_RET(s, field->next_pid);
1788                 SEQ_PUT_HEX_FIELD_RET(s, field->next_prio);
1789                 SEQ_PUT_HEX_FIELD_RET(s, T);
1790                 break;
1791         }
1792         case TRACE_SPECIAL:
1793         case TRACE_STACK: {
1794                 struct special_entry *field;
1795
1796                 trace_assign_type(field, entry);
1797
1798                 SEQ_PUT_HEX_FIELD_RET(s, field->arg1);
1799                 SEQ_PUT_HEX_FIELD_RET(s, field->arg2);
1800                 SEQ_PUT_HEX_FIELD_RET(s, field->arg3);
1801                 break;
1802         }
1803         }
1804         SEQ_PUT_FIELD_RET(s, newline);
1805
1806         return TRACE_TYPE_HANDLED;
1807 }
1808
1809 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
1810 {
1811         struct trace_seq *s = &iter->seq;
1812         struct trace_entry *entry;
1813
1814         entry = iter->ent;
1815
1816         if (entry->type == TRACE_CONT)
1817                 return TRACE_TYPE_HANDLED;
1818
1819         SEQ_PUT_FIELD_RET(s, entry->pid);
1820         SEQ_PUT_FIELD_RET(s, iter->cpu);
1821         SEQ_PUT_FIELD_RET(s, iter->ts);
1822
1823         switch (entry->type) {
1824         case TRACE_FN: {
1825                 struct ftrace_entry *field;
1826
1827                 trace_assign_type(field, entry);
1828
1829                 SEQ_PUT_FIELD_RET(s, field->ip);
1830                 SEQ_PUT_FIELD_RET(s, field->parent_ip);
1831                 break;
1832         }
1833         case TRACE_CTX: {
1834                 struct ctx_switch_entry *field;
1835
1836                 trace_assign_type(field, entry);
1837
1838                 SEQ_PUT_FIELD_RET(s, field->prev_pid);
1839                 SEQ_PUT_FIELD_RET(s, field->prev_prio);
1840                 SEQ_PUT_FIELD_RET(s, field->prev_state);
1841                 SEQ_PUT_FIELD_RET(s, field->next_pid);
1842                 SEQ_PUT_FIELD_RET(s, field->next_prio);
1843                 SEQ_PUT_FIELD_RET(s, field->next_state);
1844                 break;
1845         }
1846         case TRACE_SPECIAL:
1847         case TRACE_STACK: {
1848                 struct special_entry *field;
1849
1850                 trace_assign_type(field, entry);
1851
1852                 SEQ_PUT_FIELD_RET(s, field->arg1);
1853                 SEQ_PUT_FIELD_RET(s, field->arg2);
1854                 SEQ_PUT_FIELD_RET(s, field->arg3);
1855                 break;
1856         }
1857         }
1858         return 1;
1859 }
1860
1861 static int trace_empty(struct trace_iterator *iter)
1862 {
1863         int cpu;
1864
1865         for_each_tracing_cpu(cpu) {
1866                 if (iter->buffer_iter[cpu]) {
1867                         if (!ring_buffer_iter_empty(iter->buffer_iter[cpu]))
1868                                 return 0;
1869                 } else {
1870                         if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu))
1871                                 return 0;
1872                 }
1873         }
1874
1875         return 1;
1876 }
1877
1878 static enum print_line_t print_trace_line(struct trace_iterator *iter)
1879 {
1880         enum print_line_t ret;
1881
1882         if (iter->trace && iter->trace->print_line) {
1883                 ret = iter->trace->print_line(iter);
1884                 if (ret != TRACE_TYPE_UNHANDLED)
1885                         return ret;
1886         }
1887
1888         if (trace_flags & TRACE_ITER_BIN)
1889                 return print_bin_fmt(iter);
1890
1891         if (trace_flags & TRACE_ITER_HEX)
1892                 return print_hex_fmt(iter);
1893
1894         if (trace_flags & TRACE_ITER_RAW)
1895                 return print_raw_fmt(iter);
1896
1897         if (iter->iter_flags & TRACE_FILE_LAT_FMT)
1898                 return print_lat_fmt(iter, iter->idx, iter->cpu);
1899
1900         return print_trace_fmt(iter);
1901 }
1902
1903 static int s_show(struct seq_file *m, void *v)
1904 {
1905         struct trace_iterator *iter = v;
1906
1907         if (iter->ent == NULL) {
1908                 if (iter->tr) {
1909                         seq_printf(m, "# tracer: %s\n", iter->trace->name);
1910                         seq_puts(m, "#\n");
1911                 }
1912                 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
1913                         /* print nothing if the buffers are empty */
1914                         if (trace_empty(iter))
1915                                 return 0;
1916                         print_trace_header(m, iter);
1917                         if (!(trace_flags & TRACE_ITER_VERBOSE))
1918                                 print_lat_help_header(m);
1919                 } else {
1920                         if (!(trace_flags & TRACE_ITER_VERBOSE))
1921                                 print_func_help_header(m);
1922                 }
1923         } else {
1924                 print_trace_line(iter);
1925                 trace_print_seq(m, &iter->seq);
1926         }
1927
1928         return 0;
1929 }
1930
1931 static struct seq_operations tracer_seq_ops = {
1932         .start          = s_start,
1933         .next           = s_next,
1934         .stop           = s_stop,
1935         .show           = s_show,
1936 };
1937
1938 static struct trace_iterator *
1939 __tracing_open(struct inode *inode, struct file *file, int *ret)
1940 {
1941         struct trace_iterator *iter;
1942         struct seq_file *m;
1943         int cpu;
1944
1945         if (tracing_disabled) {
1946                 *ret = -ENODEV;
1947                 return NULL;
1948         }
1949
1950         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1951         if (!iter) {
1952                 *ret = -ENOMEM;
1953                 goto out;
1954         }
1955
1956         mutex_lock(&trace_types_lock);
1957         if (current_trace && current_trace->print_max)
1958                 iter->tr = &max_tr;
1959         else
1960                 iter->tr = inode->i_private;
1961         iter->trace = current_trace;
1962         iter->pos = -1;
1963
1964         for_each_tracing_cpu(cpu) {
1965
1966                 iter->buffer_iter[cpu] =
1967                         ring_buffer_read_start(iter->tr->buffer, cpu);
1968
1969                 if (!iter->buffer_iter[cpu])
1970                         goto fail_buffer;
1971         }
1972
1973         /* TODO stop tracer */
1974         *ret = seq_open(file, &tracer_seq_ops);
1975         if (*ret)
1976                 goto fail_buffer;
1977
1978         m = file->private_data;
1979         m->private = iter;
1980
1981         /* stop the trace while dumping */
1982         if (iter->tr->ctrl) {
1983                 tracer_enabled = 0;
1984                 ftrace_function_enabled = 0;
1985         }
1986
1987         if (iter->trace && iter->trace->open)
1988                         iter->trace->open(iter);
1989
1990         mutex_unlock(&trace_types_lock);
1991
1992  out:
1993         return iter;
1994
1995  fail_buffer:
1996         for_each_tracing_cpu(cpu) {
1997                 if (iter->buffer_iter[cpu])
1998                         ring_buffer_read_finish(iter->buffer_iter[cpu]);
1999         }
2000         mutex_unlock(&trace_types_lock);
2001
2002         return ERR_PTR(-ENOMEM);
2003 }
2004
2005 int tracing_open_generic(struct inode *inode, struct file *filp)
2006 {
2007         if (tracing_disabled)
2008                 return -ENODEV;
2009
2010         filp->private_data = inode->i_private;
2011         return 0;
2012 }
2013
2014 int tracing_release(struct inode *inode, struct file *file)
2015 {
2016         struct seq_file *m = (struct seq_file *)file->private_data;
2017         struct trace_iterator *iter = m->private;
2018         int cpu;
2019
2020         mutex_lock(&trace_types_lock);
2021         for_each_tracing_cpu(cpu) {
2022                 if (iter->buffer_iter[cpu])
2023                         ring_buffer_read_finish(iter->buffer_iter[cpu]);
2024         }
2025
2026         if (iter->trace && iter->trace->close)
2027                 iter->trace->close(iter);
2028
2029         /* reenable tracing if it was previously enabled */
2030         if (iter->tr->ctrl) {
2031                 tracer_enabled = 1;
2032                 /*
2033                  * It is safe to enable function tracing even if it
2034                  * isn't used
2035                  */
2036                 ftrace_function_enabled = 1;
2037         }
2038         mutex_unlock(&trace_types_lock);
2039
2040         seq_release(inode, file);
2041         kfree(iter);
2042         return 0;
2043 }
2044
2045 static int tracing_open(struct inode *inode, struct file *file)
2046 {
2047         int ret;
2048
2049         __tracing_open(inode, file, &ret);
2050
2051         return ret;
2052 }
2053
2054 static int tracing_lt_open(struct inode *inode, struct file *file)
2055 {
2056         struct trace_iterator *iter;
2057         int ret;
2058
2059         iter = __tracing_open(inode, file, &ret);
2060
2061         if (!ret)
2062                 iter->iter_flags |= TRACE_FILE_LAT_FMT;
2063
2064         return ret;
2065 }
2066
2067
2068 static void *
2069 t_next(struct seq_file *m, void *v, loff_t *pos)
2070 {
2071         struct tracer *t = m->private;
2072
2073         (*pos)++;
2074
2075         if (t)
2076                 t = t->next;
2077
2078         m->private = t;
2079
2080         return t;
2081 }
2082
2083 static void *t_start(struct seq_file *m, loff_t *pos)
2084 {
2085         struct tracer *t = m->private;
2086         loff_t l = 0;
2087
2088         mutex_lock(&trace_types_lock);
2089         for (; t && l < *pos; t = t_next(m, t, &l))
2090                 ;
2091
2092         return t;
2093 }
2094
2095 static void t_stop(struct seq_file *m, void *p)
2096 {
2097         mutex_unlock(&trace_types_lock);
2098 }
2099
2100 static int t_show(struct seq_file *m, void *v)
2101 {
2102         struct tracer *t = v;
2103
2104         if (!t)
2105                 return 0;
2106
2107         seq_printf(m, "%s", t->name);
2108         if (t->next)
2109                 seq_putc(m, ' ');
2110         else
2111                 seq_putc(m, '\n');
2112
2113         return 0;
2114 }
2115
2116 static struct seq_operations show_traces_seq_ops = {
2117         .start          = t_start,
2118         .next           = t_next,
2119         .stop           = t_stop,
2120         .show           = t_show,
2121 };
2122
2123 static int show_traces_open(struct inode *inode, struct file *file)
2124 {
2125         int ret;
2126
2127         if (tracing_disabled)
2128                 return -ENODEV;
2129
2130         ret = seq_open(file, &show_traces_seq_ops);
2131         if (!ret) {
2132                 struct seq_file *m = file->private_data;
2133                 m->private = trace_types;
2134         }
2135
2136         return ret;
2137 }
2138
2139 static struct file_operations tracing_fops = {
2140         .open           = tracing_open,
2141         .read           = seq_read,
2142         .llseek         = seq_lseek,
2143         .release        = tracing_release,
2144 };
2145
2146 static struct file_operations tracing_lt_fops = {
2147         .open           = tracing_lt_open,
2148         .read           = seq_read,
2149         .llseek         = seq_lseek,
2150         .release        = tracing_release,
2151 };
2152
2153 static struct file_operations show_traces_fops = {
2154         .open           = show_traces_open,
2155         .read           = seq_read,
2156         .release        = seq_release,
2157 };
2158
2159 /*
2160  * Only trace on a CPU if the bitmask is set:
2161  */
2162 static cpumask_t tracing_cpumask = CPU_MASK_ALL;
2163
2164 /*
2165  * When tracing/tracing_cpu_mask is modified then this holds
2166  * the new bitmask we are about to install:
2167  */
2168 static cpumask_t tracing_cpumask_new;
2169
2170 /*
2171  * The tracer itself will not take this lock, but still we want
2172  * to provide a consistent cpumask to user-space:
2173  */
2174 static DEFINE_MUTEX(tracing_cpumask_update_lock);
2175
2176 /*
2177  * Temporary storage for the character representation of the
2178  * CPU bitmask (and one more byte for the newline):
2179  */
2180 static char mask_str[NR_CPUS + 1];
2181
2182 static ssize_t
2183 tracing_cpumask_read(struct file *filp, char __user *ubuf,
2184                      size_t count, loff_t *ppos)
2185 {
2186         int len;
2187
2188         mutex_lock(&tracing_cpumask_update_lock);
2189
2190         len = cpumask_scnprintf(mask_str, count, tracing_cpumask);
2191         if (count - len < 2) {
2192                 count = -EINVAL;
2193                 goto out_err;
2194         }
2195         len += sprintf(mask_str + len, "\n");
2196         count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
2197
2198 out_err:
2199         mutex_unlock(&tracing_cpumask_update_lock);
2200
2201         return count;
2202 }
2203
2204 static ssize_t
2205 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
2206                       size_t count, loff_t *ppos)
2207 {
2208         int err, cpu;
2209
2210         mutex_lock(&tracing_cpumask_update_lock);
2211         err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
2212         if (err)
2213                 goto err_unlock;
2214
2215         raw_local_irq_disable();
2216         __raw_spin_lock(&ftrace_max_lock);
2217         for_each_tracing_cpu(cpu) {
2218                 /*
2219                  * Increase/decrease the disabled counter if we are
2220                  * about to flip a bit in the cpumask:
2221                  */
2222                 if (cpu_isset(cpu, tracing_cpumask) &&
2223                                 !cpu_isset(cpu, tracing_cpumask_new)) {
2224                         atomic_inc(&global_trace.data[cpu]->disabled);
2225                 }
2226                 if (!cpu_isset(cpu, tracing_cpumask) &&
2227                                 cpu_isset(cpu, tracing_cpumask_new)) {
2228                         atomic_dec(&global_trace.data[cpu]->disabled);
2229                 }
2230         }
2231         __raw_spin_unlock(&ftrace_max_lock);
2232         raw_local_irq_enable();
2233
2234         tracing_cpumask = tracing_cpumask_new;
2235
2236         mutex_unlock(&tracing_cpumask_update_lock);
2237
2238         return count;
2239
2240 err_unlock:
2241         mutex_unlock(&tracing_cpumask_update_lock);
2242
2243         return err;
2244 }
2245
2246 static struct file_operations tracing_cpumask_fops = {
2247         .open           = tracing_open_generic,
2248         .read           = tracing_cpumask_read,
2249         .write          = tracing_cpumask_write,
2250 };
2251
2252 static ssize_t
2253 tracing_iter_ctrl_read(struct file *filp, char __user *ubuf,
2254                        size_t cnt, loff_t *ppos)
2255 {
2256         char *buf;
2257         int r = 0;
2258         int len = 0;
2259         int i;
2260
2261         /* calulate max size */
2262         for (i = 0; trace_options[i]; i++) {
2263                 len += strlen(trace_options[i]);
2264                 len += 3; /* "no" and space */
2265         }
2266
2267         /* +2 for \n and \0 */
2268         buf = kmalloc(len + 2, GFP_KERNEL);
2269         if (!buf)
2270                 return -ENOMEM;
2271
2272         for (i = 0; trace_options[i]; i++) {
2273                 if (trace_flags & (1 << i))
2274                         r += sprintf(buf + r, "%s ", trace_options[i]);
2275                 else
2276                         r += sprintf(buf + r, "no%s ", trace_options[i]);
2277         }
2278
2279         r += sprintf(buf + r, "\n");
2280         WARN_ON(r >= len + 2);
2281
2282         r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2283
2284         kfree(buf);
2285
2286         return r;
2287 }
2288
2289 static ssize_t
2290 tracing_iter_ctrl_write(struct file *filp, const char __user *ubuf,
2291                         size_t cnt, loff_t *ppos)
2292 {
2293         char buf[64];
2294         char *cmp = buf;
2295         int neg = 0;
2296         int i;
2297
2298         if (cnt >= sizeof(buf))
2299                 return -EINVAL;
2300
2301         if (copy_from_user(&buf, ubuf, cnt))
2302                 return -EFAULT;
2303
2304         buf[cnt] = 0;
2305
2306         if (strncmp(buf, "no", 2) == 0) {
2307                 neg = 1;
2308                 cmp += 2;
2309         }
2310
2311         for (i = 0; trace_options[i]; i++) {
2312                 int len = strlen(trace_options[i]);
2313
2314                 if (strncmp(cmp, trace_options[i], len) == 0) {
2315                         if (neg)
2316                                 trace_flags &= ~(1 << i);
2317                         else
2318                                 trace_flags |= (1 << i);
2319                         break;
2320                 }
2321         }
2322         /*
2323          * If no option could be set, return an error:
2324          */
2325         if (!trace_options[i])
2326                 return -EINVAL;
2327
2328         filp->f_pos += cnt;
2329
2330         return cnt;
2331 }
2332
2333 static struct file_operations tracing_iter_fops = {
2334         .open           = tracing_open_generic,
2335         .read           = tracing_iter_ctrl_read,
2336         .write          = tracing_iter_ctrl_write,
2337 };
2338
2339 static const char readme_msg[] =
2340         "tracing mini-HOWTO:\n\n"
2341         "# mkdir /debug\n"
2342         "# mount -t debugfs nodev /debug\n\n"
2343         "# cat /debug/tracing/available_tracers\n"
2344         "wakeup preemptirqsoff preemptoff irqsoff ftrace sched_switch none\n\n"
2345         "# cat /debug/tracing/current_tracer\n"
2346         "none\n"
2347         "# echo sched_switch > /debug/tracing/current_tracer\n"
2348         "# cat /debug/tracing/current_tracer\n"
2349         "sched_switch\n"
2350         "# cat /debug/tracing/iter_ctrl\n"
2351         "noprint-parent nosym-offset nosym-addr noverbose\n"
2352         "# echo print-parent > /debug/tracing/iter_ctrl\n"
2353         "# echo 1 > /debug/tracing/tracing_enabled\n"
2354         "# cat /debug/tracing/trace > /tmp/trace.txt\n"
2355         "echo 0 > /debug/tracing/tracing_enabled\n"
2356 ;
2357
2358 static ssize_t
2359 tracing_readme_read(struct file *filp, char __user *ubuf,
2360                        size_t cnt, loff_t *ppos)
2361 {
2362         return simple_read_from_buffer(ubuf, cnt, ppos,
2363                                         readme_msg, strlen(readme_msg));
2364 }
2365
2366 static struct file_operations tracing_readme_fops = {
2367         .open           = tracing_open_generic,
2368         .read           = tracing_readme_read,
2369 };
2370
2371 static ssize_t
2372 tracing_ctrl_read(struct file *filp, char __user *ubuf,
2373                   size_t cnt, loff_t *ppos)
2374 {
2375         struct trace_array *tr = filp->private_data;
2376         char buf[64];
2377         int r;
2378
2379         r = sprintf(buf, "%ld\n", tr->ctrl);
2380         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2381 }
2382
2383 static ssize_t
2384 tracing_ctrl_write(struct file *filp, const char __user *ubuf,
2385                    size_t cnt, loff_t *ppos)
2386 {
2387         struct trace_array *tr = filp->private_data;
2388         char buf[64];
2389         long val;
2390         int ret;
2391
2392         if (cnt >= sizeof(buf))
2393                 return -EINVAL;
2394
2395         if (copy_from_user(&buf, ubuf, cnt))
2396                 return -EFAULT;
2397
2398         buf[cnt] = 0;
2399
2400         ret = strict_strtoul(buf, 10, &val);
2401         if (ret < 0)
2402                 return ret;
2403
2404         val = !!val;
2405
2406         mutex_lock(&trace_types_lock);
2407         if (tr->ctrl ^ val) {
2408                 if (val)
2409                         tracer_enabled = 1;
2410                 else
2411                         tracer_enabled = 0;
2412
2413                 tr->ctrl = val;
2414
2415                 if (current_trace && current_trace->ctrl_update)
2416                         current_trace->ctrl_update(tr);
2417         }
2418         mutex_unlock(&trace_types_lock);
2419
2420         filp->f_pos += cnt;
2421
2422         return cnt;
2423 }
2424
2425 static ssize_t
2426 tracing_set_trace_read(struct file *filp, char __user *ubuf,
2427                        size_t cnt, loff_t *ppos)
2428 {
2429         char buf[max_tracer_type_len+2];
2430         int r;
2431
2432         mutex_lock(&trace_types_lock);
2433         if (current_trace)
2434                 r = sprintf(buf, "%s\n", current_trace->name);
2435         else
2436                 r = sprintf(buf, "\n");
2437         mutex_unlock(&trace_types_lock);
2438
2439         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2440 }
2441
2442 static int tracing_set_tracer(char *buf)
2443 {
2444         struct trace_array *tr = &global_trace;
2445         struct tracer *t;
2446         int ret = 0;
2447
2448         mutex_lock(&trace_types_lock);
2449         for (t = trace_types; t; t = t->next) {
2450                 if (strcmp(t->name, buf) == 0)
2451                         break;
2452         }
2453         if (!t) {
2454                 ret = -EINVAL;
2455                 goto out;
2456         }
2457         if (t == current_trace)
2458                 goto out;
2459
2460         if (current_trace && current_trace->reset)
2461                 current_trace->reset(tr);
2462
2463         current_trace = t;
2464         if (t->init)
2465                 t->init(tr);
2466
2467  out:
2468         mutex_unlock(&trace_types_lock);
2469
2470         return ret;
2471 }
2472
2473 static ssize_t
2474 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
2475                         size_t cnt, loff_t *ppos)
2476 {
2477         char buf[max_tracer_type_len+1];
2478         int i;
2479         size_t ret;
2480
2481         if (cnt > max_tracer_type_len)
2482                 cnt = max_tracer_type_len;
2483
2484         if (copy_from_user(&buf, ubuf, cnt))
2485                 return -EFAULT;
2486
2487         buf[cnt] = 0;
2488
2489         /* strip ending whitespace. */
2490         for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
2491                 buf[i] = 0;
2492
2493         ret = tracing_set_tracer(buf);
2494         if (!ret)
2495                 ret = cnt;
2496
2497         if (ret > 0)
2498                 filp->f_pos += ret;
2499
2500         return ret;
2501 }
2502
2503 static ssize_t
2504 tracing_max_lat_read(struct file *filp, char __user *ubuf,
2505                      size_t cnt, loff_t *ppos)
2506 {
2507         unsigned long *ptr = filp->private_data;
2508         char buf[64];
2509         int r;
2510
2511         r = snprintf(buf, sizeof(buf), "%ld\n",
2512                      *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
2513         if (r > sizeof(buf))
2514                 r = sizeof(buf);
2515         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2516 }
2517
2518 static ssize_t
2519 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
2520                       size_t cnt, loff_t *ppos)
2521 {
2522         long *ptr = filp->private_data;
2523         char buf[64];
2524         long val;
2525         int ret;
2526
2527         if (cnt >= sizeof(buf))
2528                 return -EINVAL;
2529
2530         if (copy_from_user(&buf, ubuf, cnt))
2531                 return -EFAULT;
2532
2533         buf[cnt] = 0;
2534
2535         ret = strict_strtoul(buf, 10, &val);
2536         if (ret < 0)
2537                 return ret;
2538
2539         *ptr = val * 1000;
2540
2541         return cnt;
2542 }
2543
2544 static atomic_t tracing_reader;
2545
2546 static int tracing_open_pipe(struct inode *inode, struct file *filp)
2547 {
2548         struct trace_iterator *iter;
2549
2550         if (tracing_disabled)
2551                 return -ENODEV;
2552
2553         /* We only allow for reader of the pipe */
2554         if (atomic_inc_return(&tracing_reader) != 1) {
2555                 atomic_dec(&tracing_reader);
2556                 return -EBUSY;
2557         }
2558
2559         /* create a buffer to store the information to pass to userspace */
2560         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2561         if (!iter)
2562                 return -ENOMEM;
2563
2564         mutex_lock(&trace_types_lock);
2565         iter->tr = &global_trace;
2566         iter->trace = current_trace;
2567         filp->private_data = iter;
2568
2569         if (iter->trace->pipe_open)
2570                 iter->trace->pipe_open(iter);
2571         mutex_unlock(&trace_types_lock);
2572
2573         return 0;
2574 }
2575
2576 static int tracing_release_pipe(struct inode *inode, struct file *file)
2577 {
2578         struct trace_iterator *iter = file->private_data;
2579
2580         kfree(iter);
2581         atomic_dec(&tracing_reader);
2582
2583         return 0;
2584 }
2585
2586 static unsigned int
2587 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
2588 {
2589         struct trace_iterator *iter = filp->private_data;
2590
2591         if (trace_flags & TRACE_ITER_BLOCK) {
2592                 /*
2593                  * Always select as readable when in blocking mode
2594                  */
2595                 return POLLIN | POLLRDNORM;
2596         } else {
2597                 if (!trace_empty(iter))
2598                         return POLLIN | POLLRDNORM;
2599                 poll_wait(filp, &trace_wait, poll_table);
2600                 if (!trace_empty(iter))
2601                         return POLLIN | POLLRDNORM;
2602
2603                 return 0;
2604         }
2605 }
2606
2607 /*
2608  * Consumer reader.
2609  */
2610 static ssize_t
2611 tracing_read_pipe(struct file *filp, char __user *ubuf,
2612                   size_t cnt, loff_t *ppos)
2613 {
2614         struct trace_iterator *iter = filp->private_data;
2615         ssize_t sret;
2616
2617         /* return any leftover data */
2618         sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
2619         if (sret != -EBUSY)
2620                 return sret;
2621
2622         trace_seq_reset(&iter->seq);
2623
2624         mutex_lock(&trace_types_lock);
2625         if (iter->trace->read) {
2626                 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
2627                 if (sret)
2628                         goto out;
2629         }
2630
2631 waitagain:
2632         sret = 0;
2633         while (trace_empty(iter)) {
2634
2635                 if ((filp->f_flags & O_NONBLOCK)) {
2636                         sret = -EAGAIN;
2637                         goto out;
2638                 }
2639
2640                 /*
2641                  * This is a make-shift waitqueue. The reason we don't use
2642                  * an actual wait queue is because:
2643                  *  1) we only ever have one waiter
2644                  *  2) the tracing, traces all functions, we don't want
2645                  *     the overhead of calling wake_up and friends
2646                  *     (and tracing them too)
2647                  *     Anyway, this is really very primitive wakeup.
2648                  */
2649                 set_current_state(TASK_INTERRUPTIBLE);
2650                 iter->tr->waiter = current;
2651
2652                 mutex_unlock(&trace_types_lock);
2653
2654                 /* sleep for 100 msecs, and try again. */
2655                 schedule_timeout(HZ/10);
2656
2657                 mutex_lock(&trace_types_lock);
2658
2659                 iter->tr->waiter = NULL;
2660
2661                 if (signal_pending(current)) {
2662                         sret = -EINTR;
2663                         goto out;
2664                 }
2665
2666                 if (iter->trace != current_trace)
2667                         goto out;
2668
2669                 /*
2670                  * We block until we read something and tracing is disabled.
2671                  * We still block if tracing is disabled, but we have never
2672                  * read anything. This allows a user to cat this file, and
2673                  * then enable tracing. But after we have read something,
2674                  * we give an EOF when tracing is again disabled.
2675                  *
2676                  * iter->pos will be 0 if we haven't read anything.
2677                  */
2678                 if (!tracer_enabled && iter->pos)
2679                         break;
2680
2681                 continue;
2682         }
2683
2684         /* stop when tracing is finished */
2685         if (trace_empty(iter))
2686                 goto out;
2687
2688         if (cnt >= PAGE_SIZE)
2689                 cnt = PAGE_SIZE - 1;
2690
2691         /* reset all but tr, trace, and overruns */
2692         memset(&iter->seq, 0,
2693                sizeof(struct trace_iterator) -
2694                offsetof(struct trace_iterator, seq));
2695         iter->pos = -1;
2696
2697         while (find_next_entry_inc(iter) != NULL) {
2698                 enum print_line_t ret;
2699                 int len = iter->seq.len;
2700
2701                 ret = print_trace_line(iter);
2702                 if (ret == TRACE_TYPE_PARTIAL_LINE) {
2703                         /* don't print partial lines */
2704                         iter->seq.len = len;
2705                         break;
2706                 }
2707
2708                 trace_consume(iter);
2709
2710                 if (iter->seq.len >= cnt)
2711                         break;
2712         }
2713
2714         /* Now copy what we have to the user */
2715         sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
2716         if (iter->seq.readpos >= iter->seq.len)
2717                 trace_seq_reset(&iter->seq);
2718
2719         /*
2720          * If there was nothing to send to user, inspite of consuming trace
2721          * entries, go back to wait for more entries.
2722          */
2723         if (sret == -EBUSY)
2724                 goto waitagain;
2725
2726 out:
2727         mutex_unlock(&trace_types_lock);
2728
2729         return sret;
2730 }
2731
2732 static ssize_t
2733 tracing_entries_read(struct file *filp, char __user *ubuf,
2734                      size_t cnt, loff_t *ppos)
2735 {
2736         struct trace_array *tr = filp->private_data;
2737         char buf[64];
2738         int r;
2739
2740         r = sprintf(buf, "%lu\n", tr->entries);
2741         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2742 }
2743
2744 static ssize_t
2745 tracing_entries_write(struct file *filp, const char __user *ubuf,
2746                       size_t cnt, loff_t *ppos)
2747 {
2748         unsigned long val;
2749         char buf[64];
2750         int ret;
2751         struct trace_array *tr = filp->private_data;
2752
2753         if (cnt >= sizeof(buf))
2754                 return -EINVAL;
2755
2756         if (copy_from_user(&buf, ubuf, cnt))
2757                 return -EFAULT;
2758
2759         buf[cnt] = 0;
2760
2761         ret = strict_strtoul(buf, 10, &val);
2762         if (ret < 0)
2763                 return ret;
2764
2765         /* must have at least 1 entry */
2766         if (!val)
2767                 return -EINVAL;
2768
2769         mutex_lock(&trace_types_lock);
2770
2771         if (tr->ctrl) {
2772                 cnt = -EBUSY;
2773                 pr_info("ftrace: please disable tracing"
2774                         " before modifying buffer size\n");
2775                 goto out;
2776         }
2777
2778         if (val != global_trace.entries) {
2779                 ret = ring_buffer_resize(global_trace.buffer, val);
2780                 if (ret < 0) {
2781                         cnt = ret;
2782                         goto out;
2783                 }
2784
2785                 ret = ring_buffer_resize(max_tr.buffer, val);
2786                 if (ret < 0) {
2787                         int r;
2788                         cnt = ret;
2789                         r = ring_buffer_resize(global_trace.buffer,
2790                                                global_trace.entries);
2791                         if (r < 0) {
2792                                 /* AARGH! We are left with different
2793                                  * size max buffer!!!! */
2794                                 WARN_ON(1);
2795                                 tracing_disabled = 1;
2796                         }
2797                         goto out;
2798                 }
2799
2800                 global_trace.entries = val;
2801         }
2802
2803         filp->f_pos += cnt;
2804
2805         /* If check pages failed, return ENOMEM */
2806         if (tracing_disabled)
2807                 cnt = -ENOMEM;
2808  out:
2809         max_tr.entries = global_trace.entries;
2810         mutex_unlock(&trace_types_lock);
2811
2812         return cnt;
2813 }
2814
2815 static int mark_printk(const char *fmt, ...)
2816 {
2817         int ret;
2818         va_list args;
2819         va_start(args, fmt);
2820         ret = trace_vprintk(0, fmt, args);
2821         va_end(args);
2822         return ret;
2823 }
2824
2825 static ssize_t
2826 tracing_mark_write(struct file *filp, const char __user *ubuf,
2827                                         size_t cnt, loff_t *fpos)
2828 {
2829         char *buf;
2830         char *end;
2831         struct trace_array *tr = &global_trace;
2832
2833         if (!tr->ctrl || tracing_disabled)
2834                 return -EINVAL;
2835
2836         if (cnt > TRACE_BUF_SIZE)
2837                 cnt = TRACE_BUF_SIZE;
2838
2839         buf = kmalloc(cnt + 1, GFP_KERNEL);
2840         if (buf == NULL)
2841                 return -ENOMEM;
2842
2843         if (copy_from_user(buf, ubuf, cnt)) {
2844                 kfree(buf);
2845                 return -EFAULT;
2846         }
2847
2848         /* Cut from the first nil or newline. */
2849         buf[cnt] = '\0';
2850         end = strchr(buf, '\n');
2851         if (end)
2852                 *end = '\0';
2853
2854         cnt = mark_printk("%s\n", buf);
2855         kfree(buf);
2856         *fpos += cnt;
2857
2858         return cnt;
2859 }
2860
2861 static struct file_operations tracing_max_lat_fops = {
2862         .open           = tracing_open_generic,
2863         .read           = tracing_max_lat_read,
2864         .write          = tracing_max_lat_write,
2865 };
2866
2867 static struct file_operations tracing_ctrl_fops = {
2868         .open           = tracing_open_generic,
2869         .read           = tracing_ctrl_read,
2870         .write          = tracing_ctrl_write,
2871 };
2872
2873 static struct file_operations set_tracer_fops = {
2874         .open           = tracing_open_generic,
2875         .read           = tracing_set_trace_read,
2876         .write          = tracing_set_trace_write,
2877 };
2878
2879 static struct file_operations tracing_pipe_fops = {
2880         .open           = tracing_open_pipe,
2881         .poll           = tracing_poll_pipe,
2882         .read           = tracing_read_pipe,
2883         .release        = tracing_release_pipe,
2884 };
2885
2886 static struct file_operations tracing_entries_fops = {
2887         .open           = tracing_open_generic,
2888         .read           = tracing_entries_read,
2889         .write          = tracing_entries_write,
2890 };
2891
2892 static struct file_operations tracing_mark_fops = {
2893         .open           = tracing_open_generic,
2894         .write          = tracing_mark_write,
2895 };
2896
2897 #ifdef CONFIG_DYNAMIC_FTRACE
2898
2899 int __weak ftrace_arch_read_dyn_info(char *buf, int size)
2900 {
2901         return 0;
2902 }
2903
2904 static ssize_t
2905 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
2906                   size_t cnt, loff_t *ppos)
2907 {
2908         static char ftrace_dyn_info_buffer[1024];
2909         static DEFINE_MUTEX(dyn_info_mutex);
2910         unsigned long *p = filp->private_data;
2911         char *buf = ftrace_dyn_info_buffer;
2912         int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
2913         int r;
2914
2915         mutex_lock(&dyn_info_mutex);
2916         r = sprintf(buf, "%ld ", *p);
2917
2918         r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
2919         buf[r++] = '\n';
2920
2921         r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2922
2923         mutex_unlock(&dyn_info_mutex);
2924
2925         return r;
2926 }
2927
2928 static struct file_operations tracing_dyn_info_fops = {
2929         .open           = tracing_open_generic,
2930         .read           = tracing_read_dyn_info,
2931 };
2932 #endif
2933
2934 static struct dentry *d_tracer;
2935
2936 struct dentry *tracing_init_dentry(void)
2937 {
2938         static int once;
2939
2940         if (d_tracer)
2941                 return d_tracer;
2942
2943         d_tracer = debugfs_create_dir("tracing", NULL);
2944
2945         if (!d_tracer && !once) {
2946                 once = 1;
2947                 pr_warning("Could not create debugfs directory 'tracing'\n");
2948                 return NULL;
2949         }
2950
2951         return d_tracer;
2952 }
2953
2954 #ifdef CONFIG_FTRACE_SELFTEST
2955 /* Let selftest have access to static functions in this file */
2956 #include "trace_selftest.c"
2957 #endif
2958
2959 static __init int tracer_init_debugfs(void)
2960 {
2961         struct dentry *d_tracer;
2962         struct dentry *entry;
2963
2964         d_tracer = tracing_init_dentry();
2965
2966         entry = debugfs_create_file("tracing_enabled", 0644, d_tracer,
2967                                     &global_trace, &tracing_ctrl_fops);
2968         if (!entry)
2969                 pr_warning("Could not create debugfs 'tracing_enabled' entry\n");
2970
2971         entry = debugfs_create_file("iter_ctrl", 0644, d_tracer,
2972                                     NULL, &tracing_iter_fops);
2973         if (!entry)
2974                 pr_warning("Could not create debugfs 'iter_ctrl' entry\n");
2975
2976         entry = debugfs_create_file("tracing_cpumask", 0644, d_tracer,
2977                                     NULL, &tracing_cpumask_fops);
2978         if (!entry)
2979                 pr_warning("Could not create debugfs 'tracing_cpumask' entry\n");
2980
2981         entry = debugfs_create_file("latency_trace", 0444, d_tracer,
2982                                     &global_trace, &tracing_lt_fops);
2983         if (!entry)
2984                 pr_warning("Could not create debugfs 'latency_trace' entry\n");
2985
2986         entry = debugfs_create_file("trace", 0444, d_tracer,
2987                                     &global_trace, &tracing_fops);
2988         if (!entry)
2989                 pr_warning("Could not create debugfs 'trace' entry\n");
2990
2991         entry = debugfs_create_file("available_tracers", 0444, d_tracer,
2992                                     &global_trace, &show_traces_fops);
2993         if (!entry)
2994                 pr_warning("Could not create debugfs 'available_tracers' entry\n");
2995
2996         entry = debugfs_create_file("current_tracer", 0444, d_tracer,
2997                                     &global_trace, &set_tracer_fops);
2998         if (!entry)
2999                 pr_warning("Could not create debugfs 'current_tracer' entry\n");
3000
3001         entry = debugfs_create_file("tracing_max_latency", 0644, d_tracer,
3002                                     &tracing_max_latency,
3003                                     &tracing_max_lat_fops);
3004         if (!entry)
3005                 pr_warning("Could not create debugfs "
3006                            "'tracing_max_latency' entry\n");
3007
3008         entry = debugfs_create_file("tracing_thresh", 0644, d_tracer,
3009                                     &tracing_thresh, &tracing_max_lat_fops);
3010         if (!entry)
3011                 pr_warning("Could not create debugfs "
3012                            "'tracing_thresh' entry\n");
3013         entry = debugfs_create_file("README", 0644, d_tracer,
3014                                     NULL, &tracing_readme_fops);
3015         if (!entry)
3016                 pr_warning("Could not create debugfs 'README' entry\n");
3017
3018         entry = debugfs_create_file("trace_pipe", 0644, d_tracer,
3019                                     NULL, &tracing_pipe_fops);
3020         if (!entry)
3021                 pr_warning("Could not create debugfs "
3022                            "'trace_pipe' entry\n");
3023
3024         entry = debugfs_create_file("trace_entries", 0644, d_tracer,
3025                                     &global_trace, &tracing_entries_fops);
3026         if (!entry)
3027                 pr_warning("Could not create debugfs "
3028                            "'trace_entries' entry\n");
3029
3030         entry = debugfs_create_file("trace_marker", 0220, d_tracer,
3031                                     NULL, &tracing_mark_fops);
3032         if (!entry)
3033                 pr_warning("Could not create debugfs "
3034                            "'trace_marker' entry\n");
3035
3036 #ifdef CONFIG_DYNAMIC_FTRACE
3037         entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer,
3038                                     &ftrace_update_tot_cnt,
3039                                     &tracing_dyn_info_fops);
3040         if (!entry)
3041                 pr_warning("Could not create debugfs "
3042                            "'dyn_ftrace_total_info' entry\n");
3043 #endif
3044 #ifdef CONFIG_SYSPROF_TRACER
3045         init_tracer_sysprof_debugfs(d_tracer);
3046 #endif
3047         return 0;
3048 }
3049
3050 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3051 {
3052         static DEFINE_SPINLOCK(trace_buf_lock);
3053         static char trace_buf[TRACE_BUF_SIZE];
3054
3055         struct ring_buffer_event *event;
3056         struct trace_array *tr = &global_trace;
3057         struct trace_array_cpu *data;
3058         struct print_entry *entry;
3059         unsigned long flags, irq_flags;
3060         int cpu, len = 0, size, pc;
3061
3062         if (!tr->ctrl || tracing_disabled)
3063                 return 0;
3064
3065         pc = preempt_count();
3066         preempt_disable_notrace();
3067         cpu = raw_smp_processor_id();
3068         data = tr->data[cpu];
3069
3070         if (unlikely(atomic_read(&data->disabled)))
3071                 goto out;
3072
3073         spin_lock_irqsave(&trace_buf_lock, flags);
3074         len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args);
3075
3076         len = min(len, TRACE_BUF_SIZE-1);
3077         trace_buf[len] = 0;
3078
3079         size = sizeof(*entry) + len + 1;
3080         event = ring_buffer_lock_reserve(tr->buffer, size, &irq_flags);
3081         if (!event)
3082                 goto out_unlock;
3083         entry = ring_buffer_event_data(event);
3084         tracing_generic_entry_update(&entry->ent, flags, pc);
3085         entry->ent.type                 = TRACE_PRINT;
3086         entry->ip                       = ip;
3087
3088         memcpy(&entry->buf, trace_buf, len);
3089         entry->buf[len] = 0;
3090         ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
3091
3092  out_unlock:
3093         spin_unlock_irqrestore(&trace_buf_lock, flags);
3094
3095  out:
3096         preempt_enable_notrace();
3097
3098         return len;
3099 }
3100 EXPORT_SYMBOL_GPL(trace_vprintk);
3101
3102 int __ftrace_printk(unsigned long ip, const char *fmt, ...)
3103 {
3104         int ret;
3105         va_list ap;
3106
3107         if (!(trace_flags & TRACE_ITER_PRINTK))
3108                 return 0;
3109
3110         va_start(ap, fmt);
3111         ret = trace_vprintk(ip, fmt, ap);
3112         va_end(ap);
3113         return ret;
3114 }
3115 EXPORT_SYMBOL_GPL(__ftrace_printk);
3116
3117 static int trace_panic_handler(struct notifier_block *this,
3118                                unsigned long event, void *unused)
3119 {
3120         if (ftrace_dump_on_oops)
3121                 ftrace_dump();
3122         return NOTIFY_OK;
3123 }
3124
3125 static struct notifier_block trace_panic_notifier = {
3126         .notifier_call  = trace_panic_handler,
3127         .next           = NULL,
3128         .priority       = 150   /* priority: INT_MAX >= x >= 0 */
3129 };
3130
3131 static int trace_die_handler(struct notifier_block *self,
3132                              unsigned long val,
3133                              void *data)
3134 {
3135         switch (val) {
3136         case DIE_OOPS:
3137                 if (ftrace_dump_on_oops)
3138                         ftrace_dump();
3139                 break;
3140         default:
3141                 break;
3142         }
3143         return NOTIFY_OK;
3144 }
3145
3146 static struct notifier_block trace_die_notifier = {
3147         .notifier_call = trace_die_handler,
3148         .priority = 200
3149 };
3150
3151 /*
3152  * printk is set to max of 1024, we really don't need it that big.
3153  * Nothing should be printing 1000 characters anyway.
3154  */
3155 #define TRACE_MAX_PRINT         1000
3156
3157 /*
3158  * Define here KERN_TRACE so that we have one place to modify
3159  * it if we decide to change what log level the ftrace dump
3160  * should be at.
3161  */
3162 #define KERN_TRACE              KERN_INFO
3163
3164 static void
3165 trace_printk_seq(struct trace_seq *s)
3166 {
3167         /* Probably should print a warning here. */
3168         if (s->len >= 1000)
3169                 s->len = 1000;
3170
3171         /* should be zero ended, but we are paranoid. */
3172         s->buffer[s->len] = 0;
3173
3174         printk(KERN_TRACE "%s", s->buffer);
3175
3176         trace_seq_reset(s);
3177 }
3178
3179 void ftrace_dump(void)
3180 {
3181         static DEFINE_SPINLOCK(ftrace_dump_lock);
3182         /* use static because iter can be a bit big for the stack */
3183         static struct trace_iterator iter;
3184         static cpumask_t mask;
3185         static int dump_ran;
3186         unsigned long flags;
3187         int cnt = 0, cpu;
3188
3189         /* only one dump */
3190         spin_lock_irqsave(&ftrace_dump_lock, flags);
3191         if (dump_ran)
3192                 goto out;
3193
3194         dump_ran = 1;
3195
3196         /* No turning back! */
3197         ftrace_kill();
3198
3199         for_each_tracing_cpu(cpu) {
3200                 atomic_inc(&global_trace.data[cpu]->disabled);
3201         }
3202
3203         printk(KERN_TRACE "Dumping ftrace buffer:\n");
3204
3205         iter.tr = &global_trace;
3206         iter.trace = current_trace;
3207
3208         /*
3209          * We need to stop all tracing on all CPUS to read the
3210          * the next buffer. This is a bit expensive, but is
3211          * not done often. We fill all what we can read,
3212          * and then release the locks again.
3213          */
3214
3215         cpus_clear(mask);
3216
3217         while (!trace_empty(&iter)) {
3218
3219                 if (!cnt)
3220                         printk(KERN_TRACE "---------------------------------\n");
3221
3222                 cnt++;
3223
3224                 /* reset all but tr, trace, and overruns */
3225                 memset(&iter.seq, 0,
3226                        sizeof(struct trace_iterator) -
3227                        offsetof(struct trace_iterator, seq));
3228                 iter.iter_flags |= TRACE_FILE_LAT_FMT;
3229                 iter.pos = -1;
3230
3231                 if (find_next_entry_inc(&iter) != NULL) {
3232                         print_trace_line(&iter);
3233                         trace_consume(&iter);
3234                 }
3235
3236                 trace_printk_seq(&iter.seq);
3237         }
3238
3239         if (!cnt)
3240                 printk(KERN_TRACE "   (ftrace buffer empty)\n");
3241         else
3242                 printk(KERN_TRACE "---------------------------------\n");
3243
3244  out:
3245         spin_unlock_irqrestore(&ftrace_dump_lock, flags);
3246 }
3247
3248 __init static int tracer_alloc_buffers(void)
3249 {
3250         struct trace_array_cpu *data;
3251         int i;
3252
3253         /* TODO: make the number of buffers hot pluggable with CPUS */
3254         tracing_buffer_mask = cpu_possible_map;
3255
3256         global_trace.buffer = ring_buffer_alloc(trace_buf_size,
3257                                                    TRACE_BUFFER_FLAGS);
3258         if (!global_trace.buffer) {
3259                 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
3260                 WARN_ON(1);
3261                 return 0;
3262         }
3263         global_trace.entries = ring_buffer_size(global_trace.buffer);
3264
3265 #ifdef CONFIG_TRACER_MAX_TRACE
3266         max_tr.buffer = ring_buffer_alloc(trace_buf_size,
3267                                              TRACE_BUFFER_FLAGS);
3268         if (!max_tr.buffer) {
3269                 printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n");
3270                 WARN_ON(1);
3271                 ring_buffer_free(global_trace.buffer);
3272                 return 0;
3273         }
3274         max_tr.entries = ring_buffer_size(max_tr.buffer);
3275         WARN_ON(max_tr.entries != global_trace.entries);
3276 #endif
3277
3278         /* Allocate the first page for all buffers */
3279         for_each_tracing_cpu(i) {
3280                 data = global_trace.data[i] = &per_cpu(global_trace_cpu, i);
3281                 max_tr.data[i] = &per_cpu(max_data, i);
3282         }
3283
3284         trace_init_cmdlines();
3285
3286         register_tracer(&nop_trace);
3287 #ifdef CONFIG_BOOT_TRACER
3288         register_tracer(&boot_tracer);
3289         current_trace = &boot_tracer;
3290         current_trace->init(&global_trace);
3291 #else
3292         current_trace = &nop_trace;
3293 #endif
3294
3295         /* All seems OK, enable tracing */
3296         global_trace.ctrl = tracer_enabled;
3297         tracing_disabled = 0;
3298
3299         atomic_notifier_chain_register(&panic_notifier_list,
3300                                        &trace_panic_notifier);
3301
3302         register_die_notifier(&trace_die_notifier);
3303
3304         return 0;
3305 }
3306 early_initcall(tracer_alloc_buffers);
3307 fs_initcall(tracer_init_debugfs);