trace: assign defaults at register_ftrace_event
[linux-2.6] / kernel / trace / trace.c
1 /*
2  * ring buffer based function tracer
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally taken from the RT patch by:
8  *    Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code from the latency_tracer, that is:
11  *  Copyright (C) 2004-2006 Ingo Molnar
12  *  Copyright (C) 2004 William Lee Irwin III
13  */
14 #include <linux/utsrelease.h>
15 #include <linux/kallsyms.h>
16 #include <linux/seq_file.h>
17 #include <linux/notifier.h>
18 #include <linux/debugfs.h>
19 #include <linux/pagemap.h>
20 #include <linux/hardirq.h>
21 #include <linux/linkage.h>
22 #include <linux/uaccess.h>
23 #include <linux/ftrace.h>
24 #include <linux/module.h>
25 #include <linux/percpu.h>
26 #include <linux/kdebug.h>
27 #include <linux/ctype.h>
28 #include <linux/init.h>
29 #include <linux/poll.h>
30 #include <linux/gfp.h>
31 #include <linux/fs.h>
32 #include <linux/kprobes.h>
33 #include <linux/writeback.h>
34
35 #include <linux/stacktrace.h>
36 #include <linux/ring_buffer.h>
37 #include <linux/irqflags.h>
38
39 #include "trace.h"
40 #include "trace_output.h"
41
42 #define TRACE_BUFFER_FLAGS      (RB_FL_OVERWRITE)
43
44 unsigned long __read_mostly     tracing_max_latency;
45 unsigned long __read_mostly     tracing_thresh;
46
47 /*
48  * We need to change this state when a selftest is running.
49  * A selftest will lurk into the ring-buffer to count the
50  * entries inserted during the selftest although some concurrent
51  * insertions into the ring-buffer such as ftrace_printk could occurred
52  * at the same time, giving false positive or negative results.
53  */
54 static bool __read_mostly tracing_selftest_running;
55
56 /*
57  * If a tracer is running, we do not want to run SELFTEST.
58  */
59 static bool __read_mostly tracing_selftest_disabled;
60
61 /* For tracers that don't implement custom flags */
62 static struct tracer_opt dummy_tracer_opt[] = {
63         { }
64 };
65
66 static struct tracer_flags dummy_tracer_flags = {
67         .val = 0,
68         .opts = dummy_tracer_opt
69 };
70
71 static int dummy_set_flag(u32 old_flags, u32 bit, int set)
72 {
73         return 0;
74 }
75
76 /*
77  * Kill all tracing for good (never come back).
78  * It is initialized to 1 but will turn to zero if the initialization
79  * of the tracer is successful. But that is the only place that sets
80  * this back to zero.
81  */
82 int tracing_disabled = 1;
83
84 static DEFINE_PER_CPU(local_t, ftrace_cpu_disabled);
85
86 static inline void ftrace_disable_cpu(void)
87 {
88         preempt_disable();
89         local_inc(&__get_cpu_var(ftrace_cpu_disabled));
90 }
91
92 static inline void ftrace_enable_cpu(void)
93 {
94         local_dec(&__get_cpu_var(ftrace_cpu_disabled));
95         preempt_enable();
96 }
97
98 static cpumask_var_t __read_mostly      tracing_buffer_mask;
99
100 #define for_each_tracing_cpu(cpu)       \
101         for_each_cpu(cpu, tracing_buffer_mask)
102
103 /*
104  * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
105  *
106  * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
107  * is set, then ftrace_dump is called. This will output the contents
108  * of the ftrace buffers to the console.  This is very useful for
109  * capturing traces that lead to crashes and outputing it to a
110  * serial console.
111  *
112  * It is default off, but you can enable it with either specifying
113  * "ftrace_dump_on_oops" in the kernel command line, or setting
114  * /proc/sys/kernel/ftrace_dump_on_oops to true.
115  */
116 int ftrace_dump_on_oops;
117
118 static int tracing_set_tracer(const char *buf);
119
120 #define BOOTUP_TRACER_SIZE              100
121 static char bootup_tracer_buf[BOOTUP_TRACER_SIZE] __initdata;
122 static char *default_bootup_tracer;
123
124 static int __init set_ftrace(char *str)
125 {
126         strncpy(bootup_tracer_buf, str, BOOTUP_TRACER_SIZE);
127         default_bootup_tracer = bootup_tracer_buf;
128         return 1;
129 }
130 __setup("ftrace=", set_ftrace);
131
132 static int __init set_ftrace_dump_on_oops(char *str)
133 {
134         ftrace_dump_on_oops = 1;
135         return 1;
136 }
137 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
138
139 long
140 ns2usecs(cycle_t nsec)
141 {
142         nsec += 500;
143         do_div(nsec, 1000);
144         return nsec;
145 }
146
147 cycle_t ftrace_now(int cpu)
148 {
149         u64 ts = ring_buffer_time_stamp(cpu);
150         ring_buffer_normalize_time_stamp(cpu, &ts);
151         return ts;
152 }
153
154 /*
155  * The global_trace is the descriptor that holds the tracing
156  * buffers for the live tracing. For each CPU, it contains
157  * a link list of pages that will store trace entries. The
158  * page descriptor of the pages in the memory is used to hold
159  * the link list by linking the lru item in the page descriptor
160  * to each of the pages in the buffer per CPU.
161  *
162  * For each active CPU there is a data field that holds the
163  * pages for the buffer for that CPU. Each CPU has the same number
164  * of pages allocated for its buffer.
165  */
166 static struct trace_array       global_trace;
167
168 static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu);
169
170 /*
171  * The max_tr is used to snapshot the global_trace when a maximum
172  * latency is reached. Some tracers will use this to store a maximum
173  * trace while it continues examining live traces.
174  *
175  * The buffers for the max_tr are set up the same as the global_trace.
176  * When a snapshot is taken, the link list of the max_tr is swapped
177  * with the link list of the global_trace and the buffers are reset for
178  * the global_trace so the tracing can continue.
179  */
180 static struct trace_array       max_tr;
181
182 static DEFINE_PER_CPU(struct trace_array_cpu, max_data);
183
184 /* tracer_enabled is used to toggle activation of a tracer */
185 static int                      tracer_enabled = 1;
186
187 /**
188  * tracing_is_enabled - return tracer_enabled status
189  *
190  * This function is used by other tracers to know the status
191  * of the tracer_enabled flag.  Tracers may use this function
192  * to know if it should enable their features when starting
193  * up. See irqsoff tracer for an example (start_irqsoff_tracer).
194  */
195 int tracing_is_enabled(void)
196 {
197         return tracer_enabled;
198 }
199
200 /*
201  * trace_buf_size is the size in bytes that is allocated
202  * for a buffer. Note, the number of bytes is always rounded
203  * to page size.
204  *
205  * This number is purposely set to a low number of 16384.
206  * If the dump on oops happens, it will be much appreciated
207  * to not have to wait for all that output. Anyway this can be
208  * boot time and run time configurable.
209  */
210 #define TRACE_BUF_SIZE_DEFAULT  1441792UL /* 16384 * 88 (sizeof(entry)) */
211
212 static unsigned long            trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
213
214 /* trace_types holds a link list of available tracers. */
215 static struct tracer            *trace_types __read_mostly;
216
217 /* current_trace points to the tracer that is currently active */
218 static struct tracer            *current_trace __read_mostly;
219
220 /*
221  * max_tracer_type_len is used to simplify the allocating of
222  * buffers to read userspace tracer names. We keep track of
223  * the longest tracer name registered.
224  */
225 static int                      max_tracer_type_len;
226
227 /*
228  * trace_types_lock is used to protect the trace_types list.
229  * This lock is also used to keep user access serialized.
230  * Accesses from userspace will grab this lock while userspace
231  * activities happen inside the kernel.
232  */
233 static DEFINE_MUTEX(trace_types_lock);
234
235 /* trace_wait is a waitqueue for tasks blocked on trace_poll */
236 static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
237
238 /* trace_flags holds trace_options default values */
239 unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
240         TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO;
241
242 /**
243  * trace_wake_up - wake up tasks waiting for trace input
244  *
245  * Simply wakes up any task that is blocked on the trace_wait
246  * queue. These is used with trace_poll for tasks polling the trace.
247  */
248 void trace_wake_up(void)
249 {
250         /*
251          * The runqueue_is_locked() can fail, but this is the best we
252          * have for now:
253          */
254         if (!(trace_flags & TRACE_ITER_BLOCK) && !runqueue_is_locked())
255                 wake_up(&trace_wait);
256 }
257
258 static int __init set_buf_size(char *str)
259 {
260         unsigned long buf_size;
261         int ret;
262
263         if (!str)
264                 return 0;
265         ret = strict_strtoul(str, 0, &buf_size);
266         /* nr_entries can not be zero */
267         if (ret < 0 || buf_size == 0)
268                 return 0;
269         trace_buf_size = buf_size;
270         return 1;
271 }
272 __setup("trace_buf_size=", set_buf_size);
273
274 unsigned long nsecs_to_usecs(unsigned long nsecs)
275 {
276         return nsecs / 1000;
277 }
278
279 /* These must match the bit postions in trace_iterator_flags */
280 static const char *trace_options[] = {
281         "print-parent",
282         "sym-offset",
283         "sym-addr",
284         "verbose",
285         "raw",
286         "hex",
287         "bin",
288         "block",
289         "stacktrace",
290         "sched-tree",
291         "ftrace_printk",
292         "ftrace_preempt",
293         "branch",
294         "annotate",
295         "userstacktrace",
296         "sym-userobj",
297         "printk-msg-only",
298         "context-info",
299         NULL
300 };
301
302 /*
303  * ftrace_max_lock is used to protect the swapping of buffers
304  * when taking a max snapshot. The buffers themselves are
305  * protected by per_cpu spinlocks. But the action of the swap
306  * needs its own lock.
307  *
308  * This is defined as a raw_spinlock_t in order to help
309  * with performance when lockdep debugging is enabled.
310  */
311 static raw_spinlock_t ftrace_max_lock =
312         (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
313
314 /*
315  * Copy the new maximum trace into the separate maximum-trace
316  * structure. (this way the maximum trace is permanently saved,
317  * for later retrieval via /debugfs/tracing/latency_trace)
318  */
319 static void
320 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
321 {
322         struct trace_array_cpu *data = tr->data[cpu];
323
324         max_tr.cpu = cpu;
325         max_tr.time_start = data->preempt_timestamp;
326
327         data = max_tr.data[cpu];
328         data->saved_latency = tracing_max_latency;
329
330         memcpy(data->comm, tsk->comm, TASK_COMM_LEN);
331         data->pid = tsk->pid;
332         data->uid = task_uid(tsk);
333         data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
334         data->policy = tsk->policy;
335         data->rt_priority = tsk->rt_priority;
336
337         /* record this tasks comm */
338         tracing_record_cmdline(current);
339 }
340
341 static void
342 trace_seq_reset(struct trace_seq *s)
343 {
344         s->len = 0;
345         s->readpos = 0;
346 }
347
348 ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
349 {
350         int len;
351         int ret;
352
353         if (s->len <= s->readpos)
354                 return -EBUSY;
355
356         len = s->len - s->readpos;
357         if (cnt > len)
358                 cnt = len;
359         ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
360         if (ret)
361                 return -EFAULT;
362
363         s->readpos += len;
364         return cnt;
365 }
366
367 static void
368 trace_print_seq(struct seq_file *m, struct trace_seq *s)
369 {
370         int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len;
371
372         s->buffer[len] = 0;
373         seq_puts(m, s->buffer);
374
375         trace_seq_reset(s);
376 }
377
378 /**
379  * update_max_tr - snapshot all trace buffers from global_trace to max_tr
380  * @tr: tracer
381  * @tsk: the task with the latency
382  * @cpu: The cpu that initiated the trace.
383  *
384  * Flip the buffers between the @tr and the max_tr and record information
385  * about which task was the cause of this latency.
386  */
387 void
388 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
389 {
390         struct ring_buffer *buf = tr->buffer;
391
392         WARN_ON_ONCE(!irqs_disabled());
393         __raw_spin_lock(&ftrace_max_lock);
394
395         tr->buffer = max_tr.buffer;
396         max_tr.buffer = buf;
397
398         ftrace_disable_cpu();
399         ring_buffer_reset(tr->buffer);
400         ftrace_enable_cpu();
401
402         __update_max_tr(tr, tsk, cpu);
403         __raw_spin_unlock(&ftrace_max_lock);
404 }
405
406 /**
407  * update_max_tr_single - only copy one trace over, and reset the rest
408  * @tr - tracer
409  * @tsk - task with the latency
410  * @cpu - the cpu of the buffer to copy.
411  *
412  * Flip the trace of a single CPU buffer between the @tr and the max_tr.
413  */
414 void
415 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
416 {
417         int ret;
418
419         WARN_ON_ONCE(!irqs_disabled());
420         __raw_spin_lock(&ftrace_max_lock);
421
422         ftrace_disable_cpu();
423
424         ring_buffer_reset(max_tr.buffer);
425         ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu);
426
427         ftrace_enable_cpu();
428
429         WARN_ON_ONCE(ret && ret != -EAGAIN);
430
431         __update_max_tr(tr, tsk, cpu);
432         __raw_spin_unlock(&ftrace_max_lock);
433 }
434
435 /**
436  * register_tracer - register a tracer with the ftrace system.
437  * @type - the plugin for the tracer
438  *
439  * Register a new plugin tracer.
440  */
441 int register_tracer(struct tracer *type)
442 {
443         struct tracer *t;
444         int len;
445         int ret = 0;
446
447         if (!type->name) {
448                 pr_info("Tracer must have a name\n");
449                 return -1;
450         }
451
452         /*
453          * When this gets called we hold the BKL which means that
454          * preemption is disabled. Various trace selftests however
455          * need to disable and enable preemption for successful tests.
456          * So we drop the BKL here and grab it after the tests again.
457          */
458         unlock_kernel();
459         mutex_lock(&trace_types_lock);
460
461         tracing_selftest_running = true;
462
463         for (t = trace_types; t; t = t->next) {
464                 if (strcmp(type->name, t->name) == 0) {
465                         /* already found */
466                         pr_info("Trace %s already registered\n",
467                                 type->name);
468                         ret = -1;
469                         goto out;
470                 }
471         }
472
473         if (!type->set_flag)
474                 type->set_flag = &dummy_set_flag;
475         if (!type->flags)
476                 type->flags = &dummy_tracer_flags;
477         else
478                 if (!type->flags->opts)
479                         type->flags->opts = dummy_tracer_opt;
480
481 #ifdef CONFIG_FTRACE_STARTUP_TEST
482         if (type->selftest && !tracing_selftest_disabled) {
483                 struct tracer *saved_tracer = current_trace;
484                 struct trace_array *tr = &global_trace;
485                 int i;
486
487                 /*
488                  * Run a selftest on this tracer.
489                  * Here we reset the trace buffer, and set the current
490                  * tracer to be this tracer. The tracer can then run some
491                  * internal tracing to verify that everything is in order.
492                  * If we fail, we do not register this tracer.
493                  */
494                 for_each_tracing_cpu(i)
495                         tracing_reset(tr, i);
496
497                 current_trace = type;
498                 /* the test is responsible for initializing and enabling */
499                 pr_info("Testing tracer %s: ", type->name);
500                 ret = type->selftest(type, tr);
501                 /* the test is responsible for resetting too */
502                 current_trace = saved_tracer;
503                 if (ret) {
504                         printk(KERN_CONT "FAILED!\n");
505                         goto out;
506                 }
507                 /* Only reset on passing, to avoid touching corrupted buffers */
508                 for_each_tracing_cpu(i)
509                         tracing_reset(tr, i);
510
511                 printk(KERN_CONT "PASSED\n");
512         }
513 #endif
514
515         type->next = trace_types;
516         trace_types = type;
517         len = strlen(type->name);
518         if (len > max_tracer_type_len)
519                 max_tracer_type_len = len;
520
521  out:
522         tracing_selftest_running = false;
523         mutex_unlock(&trace_types_lock);
524
525         if (!ret && default_bootup_tracer) {
526                 if (!strncmp(default_bootup_tracer, type->name,
527                              BOOTUP_TRACER_SIZE)) {
528                         printk(KERN_INFO "Starting tracer '%s'\n",
529                                type->name);
530                         /* Do we want this tracer to start on bootup? */
531                         tracing_set_tracer(type->name);
532                         default_bootup_tracer = NULL;
533                         /* disable other selftests, since this will break it. */
534                         tracing_selftest_disabled = 1;
535 #ifdef CONFIG_FTRACE_STARTUP_TEST
536                         printk(KERN_INFO "Disabling FTRACE selftests due"
537                                " to running tracer '%s'\n", type->name);
538 #endif
539                 }
540         }
541
542         lock_kernel();
543         return ret;
544 }
545
546 void unregister_tracer(struct tracer *type)
547 {
548         struct tracer **t;
549         int len;
550
551         mutex_lock(&trace_types_lock);
552         for (t = &trace_types; *t; t = &(*t)->next) {
553                 if (*t == type)
554                         goto found;
555         }
556         pr_info("Trace %s not registered\n", type->name);
557         goto out;
558
559  found:
560         *t = (*t)->next;
561         if (strlen(type->name) != max_tracer_type_len)
562                 goto out;
563
564         max_tracer_type_len = 0;
565         for (t = &trace_types; *t; t = &(*t)->next) {
566                 len = strlen((*t)->name);
567                 if (len > max_tracer_type_len)
568                         max_tracer_type_len = len;
569         }
570  out:
571         mutex_unlock(&trace_types_lock);
572 }
573
574 void tracing_reset(struct trace_array *tr, int cpu)
575 {
576         ftrace_disable_cpu();
577         ring_buffer_reset_cpu(tr->buffer, cpu);
578         ftrace_enable_cpu();
579 }
580
581 void tracing_reset_online_cpus(struct trace_array *tr)
582 {
583         int cpu;
584
585         tr->time_start = ftrace_now(tr->cpu);
586
587         for_each_online_cpu(cpu)
588                 tracing_reset(tr, cpu);
589 }
590
591 #define SAVED_CMDLINES 128
592 static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
593 static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
594 static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
595 static int cmdline_idx;
596 static DEFINE_SPINLOCK(trace_cmdline_lock);
597
598 /* temporary disable recording */
599 atomic_t trace_record_cmdline_disabled __read_mostly;
600
601 static void trace_init_cmdlines(void)
602 {
603         memset(&map_pid_to_cmdline, -1, sizeof(map_pid_to_cmdline));
604         memset(&map_cmdline_to_pid, -1, sizeof(map_cmdline_to_pid));
605         cmdline_idx = 0;
606 }
607
608 static int trace_stop_count;
609 static DEFINE_SPINLOCK(tracing_start_lock);
610
611 /**
612  * ftrace_off_permanent - disable all ftrace code permanently
613  *
614  * This should only be called when a serious anomally has
615  * been detected.  This will turn off the function tracing,
616  * ring buffers, and other tracing utilites. It takes no
617  * locks and can be called from any context.
618  */
619 void ftrace_off_permanent(void)
620 {
621         tracing_disabled = 1;
622         ftrace_stop();
623         tracing_off_permanent();
624 }
625
626 /**
627  * tracing_start - quick start of the tracer
628  *
629  * If tracing is enabled but was stopped by tracing_stop,
630  * this will start the tracer back up.
631  */
632 void tracing_start(void)
633 {
634         struct ring_buffer *buffer;
635         unsigned long flags;
636
637         if (tracing_disabled)
638                 return;
639
640         spin_lock_irqsave(&tracing_start_lock, flags);
641         if (--trace_stop_count) {
642                 if (trace_stop_count < 0) {
643                         /* Someone screwed up their debugging */
644                         WARN_ON_ONCE(1);
645                         trace_stop_count = 0;
646                 }
647                 goto out;
648         }
649
650
651         buffer = global_trace.buffer;
652         if (buffer)
653                 ring_buffer_record_enable(buffer);
654
655         buffer = max_tr.buffer;
656         if (buffer)
657                 ring_buffer_record_enable(buffer);
658
659         ftrace_start();
660  out:
661         spin_unlock_irqrestore(&tracing_start_lock, flags);
662 }
663
664 /**
665  * tracing_stop - quick stop of the tracer
666  *
667  * Light weight way to stop tracing. Use in conjunction with
668  * tracing_start.
669  */
670 void tracing_stop(void)
671 {
672         struct ring_buffer *buffer;
673         unsigned long flags;
674
675         ftrace_stop();
676         spin_lock_irqsave(&tracing_start_lock, flags);
677         if (trace_stop_count++)
678                 goto out;
679
680         buffer = global_trace.buffer;
681         if (buffer)
682                 ring_buffer_record_disable(buffer);
683
684         buffer = max_tr.buffer;
685         if (buffer)
686                 ring_buffer_record_disable(buffer);
687
688  out:
689         spin_unlock_irqrestore(&tracing_start_lock, flags);
690 }
691
692 void trace_stop_cmdline_recording(void);
693
694 static void trace_save_cmdline(struct task_struct *tsk)
695 {
696         unsigned map;
697         unsigned idx;
698
699         if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
700                 return;
701
702         /*
703          * It's not the end of the world if we don't get
704          * the lock, but we also don't want to spin
705          * nor do we want to disable interrupts,
706          * so if we miss here, then better luck next time.
707          */
708         if (!spin_trylock(&trace_cmdline_lock))
709                 return;
710
711         idx = map_pid_to_cmdline[tsk->pid];
712         if (idx >= SAVED_CMDLINES) {
713                 idx = (cmdline_idx + 1) % SAVED_CMDLINES;
714
715                 map = map_cmdline_to_pid[idx];
716                 if (map <= PID_MAX_DEFAULT)
717                         map_pid_to_cmdline[map] = (unsigned)-1;
718
719                 map_pid_to_cmdline[tsk->pid] = idx;
720
721                 cmdline_idx = idx;
722         }
723
724         memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
725
726         spin_unlock(&trace_cmdline_lock);
727 }
728
729 char *trace_find_cmdline(int pid)
730 {
731         char *cmdline = "<...>";
732         unsigned map;
733
734         if (!pid)
735                 return "<idle>";
736
737         if (pid > PID_MAX_DEFAULT)
738                 goto out;
739
740         map = map_pid_to_cmdline[pid];
741         if (map >= SAVED_CMDLINES)
742                 goto out;
743
744         cmdline = saved_cmdlines[map];
745
746  out:
747         return cmdline;
748 }
749
750 void tracing_record_cmdline(struct task_struct *tsk)
751 {
752         if (atomic_read(&trace_record_cmdline_disabled))
753                 return;
754
755         trace_save_cmdline(tsk);
756 }
757
758 void
759 tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
760                              int pc)
761 {
762         struct task_struct *tsk = current;
763
764         entry->preempt_count            = pc & 0xff;
765         entry->pid                      = (tsk) ? tsk->pid : 0;
766         entry->tgid                     = (tsk) ? tsk->tgid : 0;
767         entry->flags =
768 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
769                 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
770 #else
771                 TRACE_FLAG_IRQS_NOSUPPORT |
772 #endif
773                 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
774                 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
775                 (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
776 }
777
778 void
779 trace_function(struct trace_array *tr, struct trace_array_cpu *data,
780                unsigned long ip, unsigned long parent_ip, unsigned long flags,
781                int pc)
782 {
783         struct ring_buffer_event *event;
784         struct ftrace_entry *entry;
785         unsigned long irq_flags;
786
787         /* If we are reading the ring buffer, don't trace */
788         if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
789                 return;
790
791         event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
792                                          &irq_flags);
793         if (!event)
794                 return;
795         entry   = ring_buffer_event_data(event);
796         tracing_generic_entry_update(&entry->ent, flags, pc);
797         entry->ent.type                 = TRACE_FN;
798         entry->ip                       = ip;
799         entry->parent_ip                = parent_ip;
800         ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
801 }
802
803 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
804 static void __trace_graph_entry(struct trace_array *tr,
805                                 struct trace_array_cpu *data,
806                                 struct ftrace_graph_ent *trace,
807                                 unsigned long flags,
808                                 int pc)
809 {
810         struct ring_buffer_event *event;
811         struct ftrace_graph_ent_entry *entry;
812         unsigned long irq_flags;
813
814         if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
815                 return;
816
817         event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry),
818                                          &irq_flags);
819         if (!event)
820                 return;
821         entry   = ring_buffer_event_data(event);
822         tracing_generic_entry_update(&entry->ent, flags, pc);
823         entry->ent.type                 = TRACE_GRAPH_ENT;
824         entry->graph_ent                        = *trace;
825         ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags);
826 }
827
828 static void __trace_graph_return(struct trace_array *tr,
829                                 struct trace_array_cpu *data,
830                                 struct ftrace_graph_ret *trace,
831                                 unsigned long flags,
832                                 int pc)
833 {
834         struct ring_buffer_event *event;
835         struct ftrace_graph_ret_entry *entry;
836         unsigned long irq_flags;
837
838         if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
839                 return;
840
841         event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry),
842                                          &irq_flags);
843         if (!event)
844                 return;
845         entry   = ring_buffer_event_data(event);
846         tracing_generic_entry_update(&entry->ent, flags, pc);
847         entry->ent.type                 = TRACE_GRAPH_RET;
848         entry->ret                              = *trace;
849         ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags);
850 }
851 #endif
852
853 void
854 ftrace(struct trace_array *tr, struct trace_array_cpu *data,
855        unsigned long ip, unsigned long parent_ip, unsigned long flags,
856        int pc)
857 {
858         if (likely(!atomic_read(&data->disabled)))
859                 trace_function(tr, data, ip, parent_ip, flags, pc);
860 }
861
862 static void __ftrace_trace_stack(struct trace_array *tr,
863                                  struct trace_array_cpu *data,
864                                  unsigned long flags,
865                                  int skip, int pc)
866 {
867 #ifdef CONFIG_STACKTRACE
868         struct ring_buffer_event *event;
869         struct stack_entry *entry;
870         struct stack_trace trace;
871         unsigned long irq_flags;
872
873         event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
874                                          &irq_flags);
875         if (!event)
876                 return;
877         entry   = ring_buffer_event_data(event);
878         tracing_generic_entry_update(&entry->ent, flags, pc);
879         entry->ent.type         = TRACE_STACK;
880
881         memset(&entry->caller, 0, sizeof(entry->caller));
882
883         trace.nr_entries        = 0;
884         trace.max_entries       = FTRACE_STACK_ENTRIES;
885         trace.skip              = skip;
886         trace.entries           = entry->caller;
887
888         save_stack_trace(&trace);
889         ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
890 #endif
891 }
892
893 static void ftrace_trace_stack(struct trace_array *tr,
894                                struct trace_array_cpu *data,
895                                unsigned long flags,
896                                int skip, int pc)
897 {
898         if (!(trace_flags & TRACE_ITER_STACKTRACE))
899                 return;
900
901         __ftrace_trace_stack(tr, data, flags, skip, pc);
902 }
903
904 void __trace_stack(struct trace_array *tr,
905                    struct trace_array_cpu *data,
906                    unsigned long flags,
907                    int skip, int pc)
908 {
909         __ftrace_trace_stack(tr, data, flags, skip, pc);
910 }
911
912 static void ftrace_trace_userstack(struct trace_array *tr,
913                    struct trace_array_cpu *data,
914                    unsigned long flags, int pc)
915 {
916 #ifdef CONFIG_STACKTRACE
917         struct ring_buffer_event *event;
918         struct userstack_entry *entry;
919         struct stack_trace trace;
920         unsigned long irq_flags;
921
922         if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
923                 return;
924
925         event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
926                                          &irq_flags);
927         if (!event)
928                 return;
929         entry   = ring_buffer_event_data(event);
930         tracing_generic_entry_update(&entry->ent, flags, pc);
931         entry->ent.type         = TRACE_USER_STACK;
932
933         memset(&entry->caller, 0, sizeof(entry->caller));
934
935         trace.nr_entries        = 0;
936         trace.max_entries       = FTRACE_STACK_ENTRIES;
937         trace.skip              = 0;
938         trace.entries           = entry->caller;
939
940         save_stack_trace_user(&trace);
941         ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
942 #endif
943 }
944
945 void __trace_userstack(struct trace_array *tr,
946                    struct trace_array_cpu *data,
947                    unsigned long flags)
948 {
949         ftrace_trace_userstack(tr, data, flags, preempt_count());
950 }
951
952 static void
953 ftrace_trace_special(void *__tr, void *__data,
954                      unsigned long arg1, unsigned long arg2, unsigned long arg3,
955                      int pc)
956 {
957         struct ring_buffer_event *event;
958         struct trace_array_cpu *data = __data;
959         struct trace_array *tr = __tr;
960         struct special_entry *entry;
961         unsigned long irq_flags;
962
963         event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
964                                          &irq_flags);
965         if (!event)
966                 return;
967         entry   = ring_buffer_event_data(event);
968         tracing_generic_entry_update(&entry->ent, 0, pc);
969         entry->ent.type                 = TRACE_SPECIAL;
970         entry->arg1                     = arg1;
971         entry->arg2                     = arg2;
972         entry->arg3                     = arg3;
973         ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
974         ftrace_trace_stack(tr, data, irq_flags, 4, pc);
975         ftrace_trace_userstack(tr, data, irq_flags, pc);
976
977         trace_wake_up();
978 }
979
980 void
981 __trace_special(void *__tr, void *__data,
982                 unsigned long arg1, unsigned long arg2, unsigned long arg3)
983 {
984         ftrace_trace_special(__tr, __data, arg1, arg2, arg3, preempt_count());
985 }
986
987 void
988 tracing_sched_switch_trace(struct trace_array *tr,
989                            struct trace_array_cpu *data,
990                            struct task_struct *prev,
991                            struct task_struct *next,
992                            unsigned long flags, int pc)
993 {
994         struct ring_buffer_event *event;
995         struct ctx_switch_entry *entry;
996         unsigned long irq_flags;
997
998         event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
999                                            &irq_flags);
1000         if (!event)
1001                 return;
1002         entry   = ring_buffer_event_data(event);
1003         tracing_generic_entry_update(&entry->ent, flags, pc);
1004         entry->ent.type                 = TRACE_CTX;
1005         entry->prev_pid                 = prev->pid;
1006         entry->prev_prio                = prev->prio;
1007         entry->prev_state               = prev->state;
1008         entry->next_pid                 = next->pid;
1009         entry->next_prio                = next->prio;
1010         entry->next_state               = next->state;
1011         entry->next_cpu = task_cpu(next);
1012         ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
1013         ftrace_trace_stack(tr, data, flags, 5, pc);
1014         ftrace_trace_userstack(tr, data, flags, pc);
1015 }
1016
1017 void
1018 tracing_sched_wakeup_trace(struct trace_array *tr,
1019                            struct trace_array_cpu *data,
1020                            struct task_struct *wakee,
1021                            struct task_struct *curr,
1022                            unsigned long flags, int pc)
1023 {
1024         struct ring_buffer_event *event;
1025         struct ctx_switch_entry *entry;
1026         unsigned long irq_flags;
1027
1028         event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
1029                                            &irq_flags);
1030         if (!event)
1031                 return;
1032         entry   = ring_buffer_event_data(event);
1033         tracing_generic_entry_update(&entry->ent, flags, pc);
1034         entry->ent.type                 = TRACE_WAKE;
1035         entry->prev_pid                 = curr->pid;
1036         entry->prev_prio                = curr->prio;
1037         entry->prev_state               = curr->state;
1038         entry->next_pid                 = wakee->pid;
1039         entry->next_prio                = wakee->prio;
1040         entry->next_state               = wakee->state;
1041         entry->next_cpu                 = task_cpu(wakee);
1042         ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
1043         ftrace_trace_stack(tr, data, flags, 6, pc);
1044         ftrace_trace_userstack(tr, data, flags, pc);
1045
1046         trace_wake_up();
1047 }
1048
1049 void
1050 ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
1051 {
1052         struct trace_array *tr = &global_trace;
1053         struct trace_array_cpu *data;
1054         unsigned long flags;
1055         int cpu;
1056         int pc;
1057
1058         if (tracing_disabled)
1059                 return;
1060
1061         pc = preempt_count();
1062         local_irq_save(flags);
1063         cpu = raw_smp_processor_id();
1064         data = tr->data[cpu];
1065
1066         if (likely(atomic_inc_return(&data->disabled) == 1))
1067                 ftrace_trace_special(tr, data, arg1, arg2, arg3, pc);
1068
1069         atomic_dec(&data->disabled);
1070         local_irq_restore(flags);
1071 }
1072
1073 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1074 int trace_graph_entry(struct ftrace_graph_ent *trace)
1075 {
1076         struct trace_array *tr = &global_trace;
1077         struct trace_array_cpu *data;
1078         unsigned long flags;
1079         long disabled;
1080         int cpu;
1081         int pc;
1082
1083         if (!ftrace_trace_task(current))
1084                 return 0;
1085
1086         if (!ftrace_graph_addr(trace->func))
1087                 return 0;
1088
1089         local_irq_save(flags);
1090         cpu = raw_smp_processor_id();
1091         data = tr->data[cpu];
1092         disabled = atomic_inc_return(&data->disabled);
1093         if (likely(disabled == 1)) {
1094                 pc = preempt_count();
1095                 __trace_graph_entry(tr, data, trace, flags, pc);
1096         }
1097         /* Only do the atomic if it is not already set */
1098         if (!test_tsk_trace_graph(current))
1099                 set_tsk_trace_graph(current);
1100         atomic_dec(&data->disabled);
1101         local_irq_restore(flags);
1102
1103         return 1;
1104 }
1105
1106 void trace_graph_return(struct ftrace_graph_ret *trace)
1107 {
1108         struct trace_array *tr = &global_trace;
1109         struct trace_array_cpu *data;
1110         unsigned long flags;
1111         long disabled;
1112         int cpu;
1113         int pc;
1114
1115         local_irq_save(flags);
1116         cpu = raw_smp_processor_id();
1117         data = tr->data[cpu];
1118         disabled = atomic_inc_return(&data->disabled);
1119         if (likely(disabled == 1)) {
1120                 pc = preempt_count();
1121                 __trace_graph_return(tr, data, trace, flags, pc);
1122         }
1123         if (!trace->depth)
1124                 clear_tsk_trace_graph(current);
1125         atomic_dec(&data->disabled);
1126         local_irq_restore(flags);
1127 }
1128 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1129
1130 enum trace_file_type {
1131         TRACE_FILE_LAT_FMT      = 1,
1132         TRACE_FILE_ANNOTATE     = 2,
1133 };
1134
1135 static void trace_iterator_increment(struct trace_iterator *iter)
1136 {
1137         /* Don't allow ftrace to trace into the ring buffers */
1138         ftrace_disable_cpu();
1139
1140         iter->idx++;
1141         if (iter->buffer_iter[iter->cpu])
1142                 ring_buffer_read(iter->buffer_iter[iter->cpu], NULL);
1143
1144         ftrace_enable_cpu();
1145 }
1146
1147 static struct trace_entry *
1148 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts)
1149 {
1150         struct ring_buffer_event *event;
1151         struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu];
1152
1153         /* Don't allow ftrace to trace into the ring buffers */
1154         ftrace_disable_cpu();
1155
1156         if (buf_iter)
1157                 event = ring_buffer_iter_peek(buf_iter, ts);
1158         else
1159                 event = ring_buffer_peek(iter->tr->buffer, cpu, ts);
1160
1161         ftrace_enable_cpu();
1162
1163         return event ? ring_buffer_event_data(event) : NULL;
1164 }
1165
1166 static struct trace_entry *
1167 __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts)
1168 {
1169         struct ring_buffer *buffer = iter->tr->buffer;
1170         struct trace_entry *ent, *next = NULL;
1171         u64 next_ts = 0, ts;
1172         int next_cpu = -1;
1173         int cpu;
1174
1175         for_each_tracing_cpu(cpu) {
1176
1177                 if (ring_buffer_empty_cpu(buffer, cpu))
1178                         continue;
1179
1180                 ent = peek_next_entry(iter, cpu, &ts);
1181
1182                 /*
1183                  * Pick the entry with the smallest timestamp:
1184                  */
1185                 if (ent && (!next || ts < next_ts)) {
1186                         next = ent;
1187                         next_cpu = cpu;
1188                         next_ts = ts;
1189                 }
1190         }
1191
1192         if (ent_cpu)
1193                 *ent_cpu = next_cpu;
1194
1195         if (ent_ts)
1196                 *ent_ts = next_ts;
1197
1198         return next;
1199 }
1200
1201 /* Find the next real entry, without updating the iterator itself */
1202 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
1203                                           int *ent_cpu, u64 *ent_ts)
1204 {
1205         return __find_next_entry(iter, ent_cpu, ent_ts);
1206 }
1207
1208 /* Find the next real entry, and increment the iterator to the next entry */
1209 static void *find_next_entry_inc(struct trace_iterator *iter)
1210 {
1211         iter->ent = __find_next_entry(iter, &iter->cpu, &iter->ts);
1212
1213         if (iter->ent)
1214                 trace_iterator_increment(iter);
1215
1216         return iter->ent ? iter : NULL;
1217 }
1218
1219 static void trace_consume(struct trace_iterator *iter)
1220 {
1221         /* Don't allow ftrace to trace into the ring buffers */
1222         ftrace_disable_cpu();
1223         ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts);
1224         ftrace_enable_cpu();
1225 }
1226
1227 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
1228 {
1229         struct trace_iterator *iter = m->private;
1230         int i = (int)*pos;
1231         void *ent;
1232
1233         (*pos)++;
1234
1235         /* can't go backwards */
1236         if (iter->idx > i)
1237                 return NULL;
1238
1239         if (iter->idx < 0)
1240                 ent = find_next_entry_inc(iter);
1241         else
1242                 ent = iter;
1243
1244         while (ent && iter->idx < i)
1245                 ent = find_next_entry_inc(iter);
1246
1247         iter->pos = *pos;
1248
1249         return ent;
1250 }
1251
1252 static void *s_start(struct seq_file *m, loff_t *pos)
1253 {
1254         struct trace_iterator *iter = m->private;
1255         void *p = NULL;
1256         loff_t l = 0;
1257         int cpu;
1258
1259         mutex_lock(&trace_types_lock);
1260
1261         if (!current_trace || current_trace != iter->trace) {
1262                 mutex_unlock(&trace_types_lock);
1263                 return NULL;
1264         }
1265
1266         atomic_inc(&trace_record_cmdline_disabled);
1267
1268         if (*pos != iter->pos) {
1269                 iter->ent = NULL;
1270                 iter->cpu = 0;
1271                 iter->idx = -1;
1272
1273                 ftrace_disable_cpu();
1274
1275                 for_each_tracing_cpu(cpu) {
1276                         ring_buffer_iter_reset(iter->buffer_iter[cpu]);
1277                 }
1278
1279                 ftrace_enable_cpu();
1280
1281                 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
1282                         ;
1283
1284         } else {
1285                 l = *pos - 1;
1286                 p = s_next(m, p, &l);
1287         }
1288
1289         return p;
1290 }
1291
1292 static void s_stop(struct seq_file *m, void *p)
1293 {
1294         atomic_dec(&trace_record_cmdline_disabled);
1295         mutex_unlock(&trace_types_lock);
1296 }
1297
1298 static void print_lat_help_header(struct seq_file *m)
1299 {
1300         seq_puts(m, "#                  _------=> CPU#            \n");
1301         seq_puts(m, "#                 / _-----=> irqs-off        \n");
1302         seq_puts(m, "#                | / _----=> need-resched    \n");
1303         seq_puts(m, "#                || / _---=> hardirq/softirq \n");
1304         seq_puts(m, "#                ||| / _--=> preempt-depth   \n");
1305         seq_puts(m, "#                |||| /                      \n");
1306         seq_puts(m, "#                |||||     delay             \n");
1307         seq_puts(m, "#  cmd     pid   ||||| time  |   caller      \n");
1308         seq_puts(m, "#     \\   /      |||||   \\   |   /           \n");
1309 }
1310
1311 static void print_func_help_header(struct seq_file *m)
1312 {
1313         seq_puts(m, "#           TASK-PID    CPU#    TIMESTAMP  FUNCTION\n");
1314         seq_puts(m, "#              | |       |          |         |\n");
1315 }
1316
1317
1318 static void
1319 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
1320 {
1321         unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
1322         struct trace_array *tr = iter->tr;
1323         struct trace_array_cpu *data = tr->data[tr->cpu];
1324         struct tracer *type = current_trace;
1325         unsigned long total;
1326         unsigned long entries;
1327         const char *name = "preemption";
1328
1329         if (type)
1330                 name = type->name;
1331
1332         entries = ring_buffer_entries(iter->tr->buffer);
1333         total = entries +
1334                 ring_buffer_overruns(iter->tr->buffer);
1335
1336         seq_printf(m, "%s latency trace v1.1.5 on %s\n",
1337                    name, UTS_RELEASE);
1338         seq_puts(m, "-----------------------------------"
1339                  "---------------------------------\n");
1340         seq_printf(m, " latency: %lu us, #%lu/%lu, CPU#%d |"
1341                    " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
1342                    nsecs_to_usecs(data->saved_latency),
1343                    entries,
1344                    total,
1345                    tr->cpu,
1346 #if defined(CONFIG_PREEMPT_NONE)
1347                    "server",
1348 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
1349                    "desktop",
1350 #elif defined(CONFIG_PREEMPT)
1351                    "preempt",
1352 #else
1353                    "unknown",
1354 #endif
1355                    /* These are reserved for later use */
1356                    0, 0, 0, 0);
1357 #ifdef CONFIG_SMP
1358         seq_printf(m, " #P:%d)\n", num_online_cpus());
1359 #else
1360         seq_puts(m, ")\n");
1361 #endif
1362         seq_puts(m, "    -----------------\n");
1363         seq_printf(m, "    | task: %.16s-%d "
1364                    "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
1365                    data->comm, data->pid, data->uid, data->nice,
1366                    data->policy, data->rt_priority);
1367         seq_puts(m, "    -----------------\n");
1368
1369         if (data->critical_start) {
1370                 seq_puts(m, " => started at: ");
1371                 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
1372                 trace_print_seq(m, &iter->seq);
1373                 seq_puts(m, "\n => ended at:   ");
1374                 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
1375                 trace_print_seq(m, &iter->seq);
1376                 seq_puts(m, "\n");
1377         }
1378
1379         seq_puts(m, "\n");
1380 }
1381
1382 static void test_cpu_buff_start(struct trace_iterator *iter)
1383 {
1384         struct trace_seq *s = &iter->seq;
1385
1386         if (!(trace_flags & TRACE_ITER_ANNOTATE))
1387                 return;
1388
1389         if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
1390                 return;
1391
1392         if (cpumask_test_cpu(iter->cpu, iter->started))
1393                 return;
1394
1395         cpumask_set_cpu(iter->cpu, iter->started);
1396         trace_seq_printf(s, "##### CPU %u buffer started ####\n", iter->cpu);
1397 }
1398
1399 static enum print_line_t print_lat_fmt(struct trace_iterator *iter)
1400 {
1401         struct trace_seq *s = &iter->seq;
1402         unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
1403         struct trace_event *event;
1404         struct trace_entry *entry = iter->ent;
1405
1406         test_cpu_buff_start(iter);
1407
1408         event = ftrace_find_event(entry->type);
1409
1410         if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
1411                 if (!trace_print_lat_context(iter))
1412                         goto partial;
1413         }
1414
1415         if (event)
1416                 return event->latency_trace(iter, sym_flags);
1417
1418         if (!trace_seq_printf(s, "Unknown type %d\n", entry->type))
1419                 goto partial;
1420
1421         return TRACE_TYPE_HANDLED;
1422 partial:
1423         return TRACE_TYPE_PARTIAL_LINE;
1424 }
1425
1426 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
1427 {
1428         struct trace_seq *s = &iter->seq;
1429         unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
1430         struct trace_entry *entry;
1431         struct trace_event *event;
1432
1433         entry = iter->ent;
1434
1435         test_cpu_buff_start(iter);
1436
1437         event = ftrace_find_event(entry->type);
1438
1439         if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
1440                 if (!trace_print_context(iter))
1441                         goto partial;
1442         }
1443
1444         if (event)
1445                 return event->trace(iter, sym_flags);
1446
1447         if (!trace_seq_printf(s, "Unknown type %d\n", entry->type))
1448                 goto partial;
1449
1450         return TRACE_TYPE_HANDLED;
1451 partial:
1452         return TRACE_TYPE_PARTIAL_LINE;
1453 }
1454
1455 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
1456 {
1457         struct trace_seq *s = &iter->seq;
1458         struct trace_entry *entry;
1459         struct trace_event *event;
1460
1461         entry = iter->ent;
1462
1463         if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
1464                 if (!trace_seq_printf(s, "%d %d %llu ",
1465                                       entry->pid, iter->cpu, iter->ts))
1466                         goto partial;
1467         }
1468
1469         event = ftrace_find_event(entry->type);
1470         if (event)
1471                 return event->raw(iter, 0);
1472
1473         if (!trace_seq_printf(s, "%d ?\n", entry->type))
1474                 goto partial;
1475
1476         return TRACE_TYPE_HANDLED;
1477 partial:
1478         return TRACE_TYPE_PARTIAL_LINE;
1479 }
1480
1481 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
1482 {
1483         struct trace_seq *s = &iter->seq;
1484         unsigned char newline = '\n';
1485         struct trace_entry *entry;
1486         struct trace_event *event;
1487
1488         entry = iter->ent;
1489
1490         if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
1491                 SEQ_PUT_HEX_FIELD_RET(s, entry->pid);
1492                 SEQ_PUT_HEX_FIELD_RET(s, iter->cpu);
1493                 SEQ_PUT_HEX_FIELD_RET(s, iter->ts);
1494         }
1495
1496         event = ftrace_find_event(entry->type);
1497         if (event) {
1498                 enum print_line_t ret = event->hex(iter, 0);
1499                 if (ret != TRACE_TYPE_HANDLED)
1500                         return ret;
1501         }
1502
1503         SEQ_PUT_FIELD_RET(s, newline);
1504
1505         return TRACE_TYPE_HANDLED;
1506 }
1507
1508 static enum print_line_t print_printk_msg_only(struct trace_iterator *iter)
1509 {
1510         struct trace_seq *s = &iter->seq;
1511         struct trace_entry *entry = iter->ent;
1512         struct print_entry *field;
1513         int ret;
1514
1515         trace_assign_type(field, entry);
1516
1517         ret = trace_seq_printf(s, "%s", field->buf);
1518         if (!ret)
1519                 return TRACE_TYPE_PARTIAL_LINE;
1520
1521         return TRACE_TYPE_HANDLED;
1522 }
1523
1524 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
1525 {
1526         struct trace_seq *s = &iter->seq;
1527         struct trace_entry *entry;
1528         struct trace_event *event;
1529
1530         entry = iter->ent;
1531
1532         if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
1533                 SEQ_PUT_FIELD_RET(s, entry->pid);
1534                 SEQ_PUT_FIELD_RET(s, entry->cpu);
1535                 SEQ_PUT_FIELD_RET(s, iter->ts);
1536         }
1537
1538         event = ftrace_find_event(entry->type);
1539         return event ? event->binary(iter, 0) : TRACE_TYPE_HANDLED;
1540 }
1541
1542 static int trace_empty(struct trace_iterator *iter)
1543 {
1544         int cpu;
1545
1546         for_each_tracing_cpu(cpu) {
1547                 if (iter->buffer_iter[cpu]) {
1548                         if (!ring_buffer_iter_empty(iter->buffer_iter[cpu]))
1549                                 return 0;
1550                 } else {
1551                         if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu))
1552                                 return 0;
1553                 }
1554         }
1555
1556         return 1;
1557 }
1558
1559 static enum print_line_t print_trace_line(struct trace_iterator *iter)
1560 {
1561         enum print_line_t ret;
1562
1563         if (iter->trace && iter->trace->print_line) {
1564                 ret = iter->trace->print_line(iter);
1565                 if (ret != TRACE_TYPE_UNHANDLED)
1566                         return ret;
1567         }
1568
1569         if (iter->ent->type == TRACE_PRINT &&
1570                         trace_flags & TRACE_ITER_PRINTK &&
1571                         trace_flags & TRACE_ITER_PRINTK_MSGONLY)
1572                 return print_printk_msg_only(iter);
1573
1574         if (trace_flags & TRACE_ITER_BIN)
1575                 return print_bin_fmt(iter);
1576
1577         if (trace_flags & TRACE_ITER_HEX)
1578                 return print_hex_fmt(iter);
1579
1580         if (trace_flags & TRACE_ITER_RAW)
1581                 return print_raw_fmt(iter);
1582
1583         if (iter->iter_flags & TRACE_FILE_LAT_FMT)
1584                 return print_lat_fmt(iter);
1585
1586         return print_trace_fmt(iter);
1587 }
1588
1589 static int s_show(struct seq_file *m, void *v)
1590 {
1591         struct trace_iterator *iter = v;
1592
1593         if (iter->ent == NULL) {
1594                 if (iter->tr) {
1595                         seq_printf(m, "# tracer: %s\n", iter->trace->name);
1596                         seq_puts(m, "#\n");
1597                 }
1598                 if (iter->trace && iter->trace->print_header)
1599                         iter->trace->print_header(m);
1600                 else if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
1601                         /* print nothing if the buffers are empty */
1602                         if (trace_empty(iter))
1603                                 return 0;
1604                         print_trace_header(m, iter);
1605                         if (!(trace_flags & TRACE_ITER_VERBOSE))
1606                                 print_lat_help_header(m);
1607                 } else {
1608                         if (!(trace_flags & TRACE_ITER_VERBOSE))
1609                                 print_func_help_header(m);
1610                 }
1611         } else {
1612                 print_trace_line(iter);
1613                 trace_print_seq(m, &iter->seq);
1614         }
1615
1616         return 0;
1617 }
1618
1619 static struct seq_operations tracer_seq_ops = {
1620         .start          = s_start,
1621         .next           = s_next,
1622         .stop           = s_stop,
1623         .show           = s_show,
1624 };
1625
1626 static struct trace_iterator *
1627 __tracing_open(struct inode *inode, struct file *file, int *ret)
1628 {
1629         struct trace_iterator *iter;
1630         struct seq_file *m;
1631         int cpu;
1632
1633         if (tracing_disabled) {
1634                 *ret = -ENODEV;
1635                 return NULL;
1636         }
1637
1638         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1639         if (!iter) {
1640                 *ret = -ENOMEM;
1641                 goto out;
1642         }
1643
1644         mutex_lock(&trace_types_lock);
1645         if (current_trace && current_trace->print_max)
1646                 iter->tr = &max_tr;
1647         else
1648                 iter->tr = inode->i_private;
1649         iter->trace = current_trace;
1650         iter->pos = -1;
1651
1652         /* Notify the tracer early; before we stop tracing. */
1653         if (iter->trace && iter->trace->open)
1654                 iter->trace->open(iter);
1655
1656         /* Annotate start of buffers if we had overruns */
1657         if (ring_buffer_overruns(iter->tr->buffer))
1658                 iter->iter_flags |= TRACE_FILE_ANNOTATE;
1659
1660
1661         for_each_tracing_cpu(cpu) {
1662
1663                 iter->buffer_iter[cpu] =
1664                         ring_buffer_read_start(iter->tr->buffer, cpu);
1665
1666                 if (!iter->buffer_iter[cpu])
1667                         goto fail_buffer;
1668         }
1669
1670         /* TODO stop tracer */
1671         *ret = seq_open(file, &tracer_seq_ops);
1672         if (*ret)
1673                 goto fail_buffer;
1674
1675         m = file->private_data;
1676         m->private = iter;
1677
1678         /* stop the trace while dumping */
1679         tracing_stop();
1680
1681         mutex_unlock(&trace_types_lock);
1682
1683  out:
1684         return iter;
1685
1686  fail_buffer:
1687         for_each_tracing_cpu(cpu) {
1688                 if (iter->buffer_iter[cpu])
1689                         ring_buffer_read_finish(iter->buffer_iter[cpu]);
1690         }
1691         mutex_unlock(&trace_types_lock);
1692         kfree(iter);
1693
1694         return ERR_PTR(-ENOMEM);
1695 }
1696
1697 int tracing_open_generic(struct inode *inode, struct file *filp)
1698 {
1699         if (tracing_disabled)
1700                 return -ENODEV;
1701
1702         filp->private_data = inode->i_private;
1703         return 0;
1704 }
1705
1706 int tracing_release(struct inode *inode, struct file *file)
1707 {
1708         struct seq_file *m = (struct seq_file *)file->private_data;
1709         struct trace_iterator *iter = m->private;
1710         int cpu;
1711
1712         mutex_lock(&trace_types_lock);
1713         for_each_tracing_cpu(cpu) {
1714                 if (iter->buffer_iter[cpu])
1715                         ring_buffer_read_finish(iter->buffer_iter[cpu]);
1716         }
1717
1718         if (iter->trace && iter->trace->close)
1719                 iter->trace->close(iter);
1720
1721         /* reenable tracing if it was previously enabled */
1722         tracing_start();
1723         mutex_unlock(&trace_types_lock);
1724
1725         seq_release(inode, file);
1726         kfree(iter);
1727         return 0;
1728 }
1729
1730 static int tracing_open(struct inode *inode, struct file *file)
1731 {
1732         int ret;
1733
1734         __tracing_open(inode, file, &ret);
1735
1736         return ret;
1737 }
1738
1739 static int tracing_lt_open(struct inode *inode, struct file *file)
1740 {
1741         struct trace_iterator *iter;
1742         int ret;
1743
1744         iter = __tracing_open(inode, file, &ret);
1745
1746         if (!ret)
1747                 iter->iter_flags |= TRACE_FILE_LAT_FMT;
1748
1749         return ret;
1750 }
1751
1752
1753 static void *
1754 t_next(struct seq_file *m, void *v, loff_t *pos)
1755 {
1756         struct tracer *t = m->private;
1757
1758         (*pos)++;
1759
1760         if (t)
1761                 t = t->next;
1762
1763         m->private = t;
1764
1765         return t;
1766 }
1767
1768 static void *t_start(struct seq_file *m, loff_t *pos)
1769 {
1770         struct tracer *t = m->private;
1771         loff_t l = 0;
1772
1773         mutex_lock(&trace_types_lock);
1774         for (; t && l < *pos; t = t_next(m, t, &l))
1775                 ;
1776
1777         return t;
1778 }
1779
1780 static void t_stop(struct seq_file *m, void *p)
1781 {
1782         mutex_unlock(&trace_types_lock);
1783 }
1784
1785 static int t_show(struct seq_file *m, void *v)
1786 {
1787         struct tracer *t = v;
1788
1789         if (!t)
1790                 return 0;
1791
1792         seq_printf(m, "%s", t->name);
1793         if (t->next)
1794                 seq_putc(m, ' ');
1795         else
1796                 seq_putc(m, '\n');
1797
1798         return 0;
1799 }
1800
1801 static struct seq_operations show_traces_seq_ops = {
1802         .start          = t_start,
1803         .next           = t_next,
1804         .stop           = t_stop,
1805         .show           = t_show,
1806 };
1807
1808 static int show_traces_open(struct inode *inode, struct file *file)
1809 {
1810         int ret;
1811
1812         if (tracing_disabled)
1813                 return -ENODEV;
1814
1815         ret = seq_open(file, &show_traces_seq_ops);
1816         if (!ret) {
1817                 struct seq_file *m = file->private_data;
1818                 m->private = trace_types;
1819         }
1820
1821         return ret;
1822 }
1823
1824 static struct file_operations tracing_fops = {
1825         .open           = tracing_open,
1826         .read           = seq_read,
1827         .llseek         = seq_lseek,
1828         .release        = tracing_release,
1829 };
1830
1831 static struct file_operations tracing_lt_fops = {
1832         .open           = tracing_lt_open,
1833         .read           = seq_read,
1834         .llseek         = seq_lseek,
1835         .release        = tracing_release,
1836 };
1837
1838 static struct file_operations show_traces_fops = {
1839         .open           = show_traces_open,
1840         .read           = seq_read,
1841         .release        = seq_release,
1842 };
1843
1844 /*
1845  * Only trace on a CPU if the bitmask is set:
1846  */
1847 static cpumask_var_t tracing_cpumask;
1848
1849 /*
1850  * The tracer itself will not take this lock, but still we want
1851  * to provide a consistent cpumask to user-space:
1852  */
1853 static DEFINE_MUTEX(tracing_cpumask_update_lock);
1854
1855 /*
1856  * Temporary storage for the character representation of the
1857  * CPU bitmask (and one more byte for the newline):
1858  */
1859 static char mask_str[NR_CPUS + 1];
1860
1861 static ssize_t
1862 tracing_cpumask_read(struct file *filp, char __user *ubuf,
1863                      size_t count, loff_t *ppos)
1864 {
1865         int len;
1866
1867         mutex_lock(&tracing_cpumask_update_lock);
1868
1869         len = cpumask_scnprintf(mask_str, count, tracing_cpumask);
1870         if (count - len < 2) {
1871                 count = -EINVAL;
1872                 goto out_err;
1873         }
1874         len += sprintf(mask_str + len, "\n");
1875         count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
1876
1877 out_err:
1878         mutex_unlock(&tracing_cpumask_update_lock);
1879
1880         return count;
1881 }
1882
1883 static ssize_t
1884 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
1885                       size_t count, loff_t *ppos)
1886 {
1887         int err, cpu;
1888         cpumask_var_t tracing_cpumask_new;
1889
1890         if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
1891                 return -ENOMEM;
1892
1893         mutex_lock(&tracing_cpumask_update_lock);
1894         err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
1895         if (err)
1896                 goto err_unlock;
1897
1898         local_irq_disable();
1899         __raw_spin_lock(&ftrace_max_lock);
1900         for_each_tracing_cpu(cpu) {
1901                 /*
1902                  * Increase/decrease the disabled counter if we are
1903                  * about to flip a bit in the cpumask:
1904                  */
1905                 if (cpumask_test_cpu(cpu, tracing_cpumask) &&
1906                                 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
1907                         atomic_inc(&global_trace.data[cpu]->disabled);
1908                 }
1909                 if (!cpumask_test_cpu(cpu, tracing_cpumask) &&
1910                                 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
1911                         atomic_dec(&global_trace.data[cpu]->disabled);
1912                 }
1913         }
1914         __raw_spin_unlock(&ftrace_max_lock);
1915         local_irq_enable();
1916
1917         cpumask_copy(tracing_cpumask, tracing_cpumask_new);
1918
1919         mutex_unlock(&tracing_cpumask_update_lock);
1920         free_cpumask_var(tracing_cpumask_new);
1921
1922         return count;
1923
1924 err_unlock:
1925         mutex_unlock(&tracing_cpumask_update_lock);
1926         free_cpumask_var(tracing_cpumask);
1927
1928         return err;
1929 }
1930
1931 static struct file_operations tracing_cpumask_fops = {
1932         .open           = tracing_open_generic,
1933         .read           = tracing_cpumask_read,
1934         .write          = tracing_cpumask_write,
1935 };
1936
1937 static ssize_t
1938 tracing_trace_options_read(struct file *filp, char __user *ubuf,
1939                        size_t cnt, loff_t *ppos)
1940 {
1941         int i;
1942         char *buf;
1943         int r = 0;
1944         int len = 0;
1945         u32 tracer_flags = current_trace->flags->val;
1946         struct tracer_opt *trace_opts = current_trace->flags->opts;
1947
1948
1949         /* calulate max size */
1950         for (i = 0; trace_options[i]; i++) {
1951                 len += strlen(trace_options[i]);
1952                 len += 3; /* "no" and space */
1953         }
1954
1955         /*
1956          * Increase the size with names of options specific
1957          * of the current tracer.
1958          */
1959         for (i = 0; trace_opts[i].name; i++) {
1960                 len += strlen(trace_opts[i].name);
1961                 len += 3; /* "no" and space */
1962         }
1963
1964         /* +2 for \n and \0 */
1965         buf = kmalloc(len + 2, GFP_KERNEL);
1966         if (!buf)
1967                 return -ENOMEM;
1968
1969         for (i = 0; trace_options[i]; i++) {
1970                 if (trace_flags & (1 << i))
1971                         r += sprintf(buf + r, "%s ", trace_options[i]);
1972                 else
1973                         r += sprintf(buf + r, "no%s ", trace_options[i]);
1974         }
1975
1976         for (i = 0; trace_opts[i].name; i++) {
1977                 if (tracer_flags & trace_opts[i].bit)
1978                         r += sprintf(buf + r, "%s ",
1979                                 trace_opts[i].name);
1980                 else
1981                         r += sprintf(buf + r, "no%s ",
1982                                 trace_opts[i].name);
1983         }
1984
1985         r += sprintf(buf + r, "\n");
1986         WARN_ON(r >= len + 2);
1987
1988         r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1989
1990         kfree(buf);
1991
1992         return r;
1993 }
1994
1995 /* Try to assign a tracer specific option */
1996 static int set_tracer_option(struct tracer *trace, char *cmp, int neg)
1997 {
1998         struct tracer_flags *trace_flags = trace->flags;
1999         struct tracer_opt *opts = NULL;
2000         int ret = 0, i = 0;
2001         int len;
2002
2003         for (i = 0; trace_flags->opts[i].name; i++) {
2004                 opts = &trace_flags->opts[i];
2005                 len = strlen(opts->name);
2006
2007                 if (strncmp(cmp, opts->name, len) == 0) {
2008                         ret = trace->set_flag(trace_flags->val,
2009                                 opts->bit, !neg);
2010                         break;
2011                 }
2012         }
2013         /* Not found */
2014         if (!trace_flags->opts[i].name)
2015                 return -EINVAL;
2016
2017         /* Refused to handle */
2018         if (ret)
2019                 return ret;
2020
2021         if (neg)
2022                 trace_flags->val &= ~opts->bit;
2023         else
2024                 trace_flags->val |= opts->bit;
2025
2026         return 0;
2027 }
2028
2029 static ssize_t
2030 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
2031                         size_t cnt, loff_t *ppos)
2032 {
2033         char buf[64];
2034         char *cmp = buf;
2035         int neg = 0;
2036         int ret;
2037         int i;
2038
2039         if (cnt >= sizeof(buf))
2040                 return -EINVAL;
2041
2042         if (copy_from_user(&buf, ubuf, cnt))
2043                 return -EFAULT;
2044
2045         buf[cnt] = 0;
2046
2047         if (strncmp(buf, "no", 2) == 0) {
2048                 neg = 1;
2049                 cmp += 2;
2050         }
2051
2052         for (i = 0; trace_options[i]; i++) {
2053                 int len = strlen(trace_options[i]);
2054
2055                 if (strncmp(cmp, trace_options[i], len) == 0) {
2056                         if (neg)
2057                                 trace_flags &= ~(1 << i);
2058                         else
2059                                 trace_flags |= (1 << i);
2060                         break;
2061                 }
2062         }
2063
2064         /* If no option could be set, test the specific tracer options */
2065         if (!trace_options[i]) {
2066                 ret = set_tracer_option(current_trace, cmp, neg);
2067                 if (ret)
2068                         return ret;
2069         }
2070
2071         filp->f_pos += cnt;
2072
2073         return cnt;
2074 }
2075
2076 static struct file_operations tracing_iter_fops = {
2077         .open           = tracing_open_generic,
2078         .read           = tracing_trace_options_read,
2079         .write          = tracing_trace_options_write,
2080 };
2081
2082 static const char readme_msg[] =
2083         "tracing mini-HOWTO:\n\n"
2084         "# mkdir /debug\n"
2085         "# mount -t debugfs nodev /debug\n\n"
2086         "# cat /debug/tracing/available_tracers\n"
2087         "wakeup preemptirqsoff preemptoff irqsoff ftrace sched_switch none\n\n"
2088         "# cat /debug/tracing/current_tracer\n"
2089         "none\n"
2090         "# echo sched_switch > /debug/tracing/current_tracer\n"
2091         "# cat /debug/tracing/current_tracer\n"
2092         "sched_switch\n"
2093         "# cat /debug/tracing/trace_options\n"
2094         "noprint-parent nosym-offset nosym-addr noverbose\n"
2095         "# echo print-parent > /debug/tracing/trace_options\n"
2096         "# echo 1 > /debug/tracing/tracing_enabled\n"
2097         "# cat /debug/tracing/trace > /tmp/trace.txt\n"
2098         "echo 0 > /debug/tracing/tracing_enabled\n"
2099 ;
2100
2101 static ssize_t
2102 tracing_readme_read(struct file *filp, char __user *ubuf,
2103                        size_t cnt, loff_t *ppos)
2104 {
2105         return simple_read_from_buffer(ubuf, cnt, ppos,
2106                                         readme_msg, strlen(readme_msg));
2107 }
2108
2109 static struct file_operations tracing_readme_fops = {
2110         .open           = tracing_open_generic,
2111         .read           = tracing_readme_read,
2112 };
2113
2114 static ssize_t
2115 tracing_ctrl_read(struct file *filp, char __user *ubuf,
2116                   size_t cnt, loff_t *ppos)
2117 {
2118         char buf[64];
2119         int r;
2120
2121         r = sprintf(buf, "%u\n", tracer_enabled);
2122         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2123 }
2124
2125 static ssize_t
2126 tracing_ctrl_write(struct file *filp, const char __user *ubuf,
2127                    size_t cnt, loff_t *ppos)
2128 {
2129         struct trace_array *tr = filp->private_data;
2130         char buf[64];
2131         long val;
2132         int ret;
2133
2134         if (cnt >= sizeof(buf))
2135                 return -EINVAL;
2136
2137         if (copy_from_user(&buf, ubuf, cnt))
2138                 return -EFAULT;
2139
2140         buf[cnt] = 0;
2141
2142         ret = strict_strtoul(buf, 10, &val);
2143         if (ret < 0)
2144                 return ret;
2145
2146         val = !!val;
2147
2148         mutex_lock(&trace_types_lock);
2149         if (tracer_enabled ^ val) {
2150                 if (val) {
2151                         tracer_enabled = 1;
2152                         if (current_trace->start)
2153                                 current_trace->start(tr);
2154                         tracing_start();
2155                 } else {
2156                         tracer_enabled = 0;
2157                         tracing_stop();
2158                         if (current_trace->stop)
2159                                 current_trace->stop(tr);
2160                 }
2161         }
2162         mutex_unlock(&trace_types_lock);
2163
2164         filp->f_pos += cnt;
2165
2166         return cnt;
2167 }
2168
2169 static ssize_t
2170 tracing_set_trace_read(struct file *filp, char __user *ubuf,
2171                        size_t cnt, loff_t *ppos)
2172 {
2173         char buf[max_tracer_type_len+2];
2174         int r;
2175
2176         mutex_lock(&trace_types_lock);
2177         if (current_trace)
2178                 r = sprintf(buf, "%s\n", current_trace->name);
2179         else
2180                 r = sprintf(buf, "\n");
2181         mutex_unlock(&trace_types_lock);
2182
2183         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2184 }
2185
2186 static int tracing_set_tracer(const char *buf)
2187 {
2188         struct trace_array *tr = &global_trace;
2189         struct tracer *t;
2190         int ret = 0;
2191
2192         mutex_lock(&trace_types_lock);
2193         for (t = trace_types; t; t = t->next) {
2194                 if (strcmp(t->name, buf) == 0)
2195                         break;
2196         }
2197         if (!t) {
2198                 ret = -EINVAL;
2199                 goto out;
2200         }
2201         if (t == current_trace)
2202                 goto out;
2203
2204         trace_branch_disable();
2205         if (current_trace && current_trace->reset)
2206                 current_trace->reset(tr);
2207
2208         current_trace = t;
2209         if (t->init) {
2210                 ret = t->init(tr);
2211                 if (ret)
2212                         goto out;
2213         }
2214
2215         trace_branch_enable(tr);
2216  out:
2217         mutex_unlock(&trace_types_lock);
2218
2219         return ret;
2220 }
2221
2222 static ssize_t
2223 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
2224                         size_t cnt, loff_t *ppos)
2225 {
2226         char buf[max_tracer_type_len+1];
2227         int i;
2228         size_t ret;
2229         int err;
2230
2231         ret = cnt;
2232
2233         if (cnt > max_tracer_type_len)
2234                 cnt = max_tracer_type_len;
2235
2236         if (copy_from_user(&buf, ubuf, cnt))
2237                 return -EFAULT;
2238
2239         buf[cnt] = 0;
2240
2241         /* strip ending whitespace. */
2242         for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
2243                 buf[i] = 0;
2244
2245         err = tracing_set_tracer(buf);
2246         if (err)
2247                 return err;
2248
2249         filp->f_pos += ret;
2250
2251         return ret;
2252 }
2253
2254 static ssize_t
2255 tracing_max_lat_read(struct file *filp, char __user *ubuf,
2256                      size_t cnt, loff_t *ppos)
2257 {
2258         unsigned long *ptr = filp->private_data;
2259         char buf[64];
2260         int r;
2261
2262         r = snprintf(buf, sizeof(buf), "%ld\n",
2263                      *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
2264         if (r > sizeof(buf))
2265                 r = sizeof(buf);
2266         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2267 }
2268
2269 static ssize_t
2270 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
2271                       size_t cnt, loff_t *ppos)
2272 {
2273         long *ptr = filp->private_data;
2274         char buf[64];
2275         long val;
2276         int ret;
2277
2278         if (cnt >= sizeof(buf))
2279                 return -EINVAL;
2280
2281         if (copy_from_user(&buf, ubuf, cnt))
2282                 return -EFAULT;
2283
2284         buf[cnt] = 0;
2285
2286         ret = strict_strtoul(buf, 10, &val);
2287         if (ret < 0)
2288                 return ret;
2289
2290         *ptr = val * 1000;
2291
2292         return cnt;
2293 }
2294
2295 static atomic_t tracing_reader;
2296
2297 static int tracing_open_pipe(struct inode *inode, struct file *filp)
2298 {
2299         struct trace_iterator *iter;
2300
2301         if (tracing_disabled)
2302                 return -ENODEV;
2303
2304         /* We only allow for reader of the pipe */
2305         if (atomic_inc_return(&tracing_reader) != 1) {
2306                 atomic_dec(&tracing_reader);
2307                 return -EBUSY;
2308         }
2309
2310         /* create a buffer to store the information to pass to userspace */
2311         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2312         if (!iter)
2313                 return -ENOMEM;
2314
2315         if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
2316                 kfree(iter);
2317                 return -ENOMEM;
2318         }
2319
2320         mutex_lock(&trace_types_lock);
2321
2322         /* trace pipe does not show start of buffer */
2323         cpumask_setall(iter->started);
2324
2325         iter->tr = &global_trace;
2326         iter->trace = current_trace;
2327         filp->private_data = iter;
2328
2329         if (iter->trace->pipe_open)
2330                 iter->trace->pipe_open(iter);
2331         mutex_unlock(&trace_types_lock);
2332
2333         return 0;
2334 }
2335
2336 static int tracing_release_pipe(struct inode *inode, struct file *file)
2337 {
2338         struct trace_iterator *iter = file->private_data;
2339
2340         free_cpumask_var(iter->started);
2341         kfree(iter);
2342         atomic_dec(&tracing_reader);
2343
2344         return 0;
2345 }
2346
2347 static unsigned int
2348 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
2349 {
2350         struct trace_iterator *iter = filp->private_data;
2351
2352         if (trace_flags & TRACE_ITER_BLOCK) {
2353                 /*
2354                  * Always select as readable when in blocking mode
2355                  */
2356                 return POLLIN | POLLRDNORM;
2357         } else {
2358                 if (!trace_empty(iter))
2359                         return POLLIN | POLLRDNORM;
2360                 poll_wait(filp, &trace_wait, poll_table);
2361                 if (!trace_empty(iter))
2362                         return POLLIN | POLLRDNORM;
2363
2364                 return 0;
2365         }
2366 }
2367
2368 /*
2369  * Consumer reader.
2370  */
2371 static ssize_t
2372 tracing_read_pipe(struct file *filp, char __user *ubuf,
2373                   size_t cnt, loff_t *ppos)
2374 {
2375         struct trace_iterator *iter = filp->private_data;
2376         ssize_t sret;
2377
2378         /* return any leftover data */
2379         sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
2380         if (sret != -EBUSY)
2381                 return sret;
2382
2383         trace_seq_reset(&iter->seq);
2384
2385         mutex_lock(&trace_types_lock);
2386         if (iter->trace->read) {
2387                 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
2388                 if (sret)
2389                         goto out;
2390         }
2391
2392 waitagain:
2393         sret = 0;
2394         while (trace_empty(iter)) {
2395
2396                 if ((filp->f_flags & O_NONBLOCK)) {
2397                         sret = -EAGAIN;
2398                         goto out;
2399                 }
2400
2401                 /*
2402                  * This is a make-shift waitqueue. The reason we don't use
2403                  * an actual wait queue is because:
2404                  *  1) we only ever have one waiter
2405                  *  2) the tracing, traces all functions, we don't want
2406                  *     the overhead of calling wake_up and friends
2407                  *     (and tracing them too)
2408                  *     Anyway, this is really very primitive wakeup.
2409                  */
2410                 set_current_state(TASK_INTERRUPTIBLE);
2411                 iter->tr->waiter = current;
2412
2413                 mutex_unlock(&trace_types_lock);
2414
2415                 /* sleep for 100 msecs, and try again. */
2416                 schedule_timeout(HZ/10);
2417
2418                 mutex_lock(&trace_types_lock);
2419
2420                 iter->tr->waiter = NULL;
2421
2422                 if (signal_pending(current)) {
2423                         sret = -EINTR;
2424                         goto out;
2425                 }
2426
2427                 if (iter->trace != current_trace)
2428                         goto out;
2429
2430                 /*
2431                  * We block until we read something and tracing is disabled.
2432                  * We still block if tracing is disabled, but we have never
2433                  * read anything. This allows a user to cat this file, and
2434                  * then enable tracing. But after we have read something,
2435                  * we give an EOF when tracing is again disabled.
2436                  *
2437                  * iter->pos will be 0 if we haven't read anything.
2438                  */
2439                 if (!tracer_enabled && iter->pos)
2440                         break;
2441
2442                 continue;
2443         }
2444
2445         /* stop when tracing is finished */
2446         if (trace_empty(iter))
2447                 goto out;
2448
2449         if (cnt >= PAGE_SIZE)
2450                 cnt = PAGE_SIZE - 1;
2451
2452         /* reset all but tr, trace, and overruns */
2453         memset(&iter->seq, 0,
2454                sizeof(struct trace_iterator) -
2455                offsetof(struct trace_iterator, seq));
2456         iter->pos = -1;
2457
2458         while (find_next_entry_inc(iter) != NULL) {
2459                 enum print_line_t ret;
2460                 int len = iter->seq.len;
2461
2462                 ret = print_trace_line(iter);
2463                 if (ret == TRACE_TYPE_PARTIAL_LINE) {
2464                         /* don't print partial lines */
2465                         iter->seq.len = len;
2466                         break;
2467                 }
2468
2469                 trace_consume(iter);
2470
2471                 if (iter->seq.len >= cnt)
2472                         break;
2473         }
2474
2475         /* Now copy what we have to the user */
2476         sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
2477         if (iter->seq.readpos >= iter->seq.len)
2478                 trace_seq_reset(&iter->seq);
2479
2480         /*
2481          * If there was nothing to send to user, inspite of consuming trace
2482          * entries, go back to wait for more entries.
2483          */
2484         if (sret == -EBUSY)
2485                 goto waitagain;
2486
2487 out:
2488         mutex_unlock(&trace_types_lock);
2489
2490         return sret;
2491 }
2492
2493 static ssize_t
2494 tracing_entries_read(struct file *filp, char __user *ubuf,
2495                      size_t cnt, loff_t *ppos)
2496 {
2497         struct trace_array *tr = filp->private_data;
2498         char buf[64];
2499         int r;
2500
2501         r = sprintf(buf, "%lu\n", tr->entries >> 10);
2502         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2503 }
2504
2505 static ssize_t
2506 tracing_entries_write(struct file *filp, const char __user *ubuf,
2507                       size_t cnt, loff_t *ppos)
2508 {
2509         unsigned long val;
2510         char buf[64];
2511         int ret, cpu;
2512
2513         if (cnt >= sizeof(buf))
2514                 return -EINVAL;
2515
2516         if (copy_from_user(&buf, ubuf, cnt))
2517                 return -EFAULT;
2518
2519         buf[cnt] = 0;
2520
2521         ret = strict_strtoul(buf, 10, &val);
2522         if (ret < 0)
2523                 return ret;
2524
2525         /* must have at least 1 entry */
2526         if (!val)
2527                 return -EINVAL;
2528
2529         mutex_lock(&trace_types_lock);
2530
2531         tracing_stop();
2532
2533         /* disable all cpu buffers */
2534         for_each_tracing_cpu(cpu) {
2535                 if (global_trace.data[cpu])
2536                         atomic_inc(&global_trace.data[cpu]->disabled);
2537                 if (max_tr.data[cpu])
2538                         atomic_inc(&max_tr.data[cpu]->disabled);
2539         }
2540
2541         /* value is in KB */
2542         val <<= 10;
2543
2544         if (val != global_trace.entries) {
2545                 ret = ring_buffer_resize(global_trace.buffer, val);
2546                 if (ret < 0) {
2547                         cnt = ret;
2548                         goto out;
2549                 }
2550
2551                 ret = ring_buffer_resize(max_tr.buffer, val);
2552                 if (ret < 0) {
2553                         int r;
2554                         cnt = ret;
2555                         r = ring_buffer_resize(global_trace.buffer,
2556                                                global_trace.entries);
2557                         if (r < 0) {
2558                                 /* AARGH! We are left with different
2559                                  * size max buffer!!!! */
2560                                 WARN_ON(1);
2561                                 tracing_disabled = 1;
2562                         }
2563                         goto out;
2564                 }
2565
2566                 global_trace.entries = val;
2567         }
2568
2569         filp->f_pos += cnt;
2570
2571         /* If check pages failed, return ENOMEM */
2572         if (tracing_disabled)
2573                 cnt = -ENOMEM;
2574  out:
2575         for_each_tracing_cpu(cpu) {
2576                 if (global_trace.data[cpu])
2577                         atomic_dec(&global_trace.data[cpu]->disabled);
2578                 if (max_tr.data[cpu])
2579                         atomic_dec(&max_tr.data[cpu]->disabled);
2580         }
2581
2582         tracing_start();
2583         max_tr.entries = global_trace.entries;
2584         mutex_unlock(&trace_types_lock);
2585
2586         return cnt;
2587 }
2588
2589 static int mark_printk(const char *fmt, ...)
2590 {
2591         int ret;
2592         va_list args;
2593         va_start(args, fmt);
2594         ret = trace_vprintk(0, -1, fmt, args);
2595         va_end(args);
2596         return ret;
2597 }
2598
2599 static ssize_t
2600 tracing_mark_write(struct file *filp, const char __user *ubuf,
2601                                         size_t cnt, loff_t *fpos)
2602 {
2603         char *buf;
2604         char *end;
2605
2606         if (tracing_disabled)
2607                 return -EINVAL;
2608
2609         if (cnt > TRACE_BUF_SIZE)
2610                 cnt = TRACE_BUF_SIZE;
2611
2612         buf = kmalloc(cnt + 1, GFP_KERNEL);
2613         if (buf == NULL)
2614                 return -ENOMEM;
2615
2616         if (copy_from_user(buf, ubuf, cnt)) {
2617                 kfree(buf);
2618                 return -EFAULT;
2619         }
2620
2621         /* Cut from the first nil or newline. */
2622         buf[cnt] = '\0';
2623         end = strchr(buf, '\n');
2624         if (end)
2625                 *end = '\0';
2626
2627         cnt = mark_printk("%s\n", buf);
2628         kfree(buf);
2629         *fpos += cnt;
2630
2631         return cnt;
2632 }
2633
2634 static struct file_operations tracing_max_lat_fops = {
2635         .open           = tracing_open_generic,
2636         .read           = tracing_max_lat_read,
2637         .write          = tracing_max_lat_write,
2638 };
2639
2640 static struct file_operations tracing_ctrl_fops = {
2641         .open           = tracing_open_generic,
2642         .read           = tracing_ctrl_read,
2643         .write          = tracing_ctrl_write,
2644 };
2645
2646 static struct file_operations set_tracer_fops = {
2647         .open           = tracing_open_generic,
2648         .read           = tracing_set_trace_read,
2649         .write          = tracing_set_trace_write,
2650 };
2651
2652 static struct file_operations tracing_pipe_fops = {
2653         .open           = tracing_open_pipe,
2654         .poll           = tracing_poll_pipe,
2655         .read           = tracing_read_pipe,
2656         .release        = tracing_release_pipe,
2657 };
2658
2659 static struct file_operations tracing_entries_fops = {
2660         .open           = tracing_open_generic,
2661         .read           = tracing_entries_read,
2662         .write          = tracing_entries_write,
2663 };
2664
2665 static struct file_operations tracing_mark_fops = {
2666         .open           = tracing_open_generic,
2667         .write          = tracing_mark_write,
2668 };
2669
2670 #ifdef CONFIG_DYNAMIC_FTRACE
2671
2672 int __weak ftrace_arch_read_dyn_info(char *buf, int size)
2673 {
2674         return 0;
2675 }
2676
2677 static ssize_t
2678 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
2679                   size_t cnt, loff_t *ppos)
2680 {
2681         static char ftrace_dyn_info_buffer[1024];
2682         static DEFINE_MUTEX(dyn_info_mutex);
2683         unsigned long *p = filp->private_data;
2684         char *buf = ftrace_dyn_info_buffer;
2685         int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
2686         int r;
2687
2688         mutex_lock(&dyn_info_mutex);
2689         r = sprintf(buf, "%ld ", *p);
2690
2691         r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
2692         buf[r++] = '\n';
2693
2694         r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2695
2696         mutex_unlock(&dyn_info_mutex);
2697
2698         return r;
2699 }
2700
2701 static struct file_operations tracing_dyn_info_fops = {
2702         .open           = tracing_open_generic,
2703         .read           = tracing_read_dyn_info,
2704 };
2705 #endif
2706
2707 static struct dentry *d_tracer;
2708
2709 struct dentry *tracing_init_dentry(void)
2710 {
2711         static int once;
2712
2713         if (d_tracer)
2714                 return d_tracer;
2715
2716         d_tracer = debugfs_create_dir("tracing", NULL);
2717
2718         if (!d_tracer && !once) {
2719                 once = 1;
2720                 pr_warning("Could not create debugfs directory 'tracing'\n");
2721                 return NULL;
2722         }
2723
2724         return d_tracer;
2725 }
2726
2727 #ifdef CONFIG_FTRACE_SELFTEST
2728 /* Let selftest have access to static functions in this file */
2729 #include "trace_selftest.c"
2730 #endif
2731
2732 static __init int tracer_init_debugfs(void)
2733 {
2734         struct dentry *d_tracer;
2735         struct dentry *entry;
2736
2737         d_tracer = tracing_init_dentry();
2738
2739         entry = debugfs_create_file("tracing_enabled", 0644, d_tracer,
2740                                     &global_trace, &tracing_ctrl_fops);
2741         if (!entry)
2742                 pr_warning("Could not create debugfs 'tracing_enabled' entry\n");
2743
2744         entry = debugfs_create_file("trace_options", 0644, d_tracer,
2745                                     NULL, &tracing_iter_fops);
2746         if (!entry)
2747                 pr_warning("Could not create debugfs 'trace_options' entry\n");
2748
2749         entry = debugfs_create_file("tracing_cpumask", 0644, d_tracer,
2750                                     NULL, &tracing_cpumask_fops);
2751         if (!entry)
2752                 pr_warning("Could not create debugfs 'tracing_cpumask' entry\n");
2753
2754         entry = debugfs_create_file("latency_trace", 0444, d_tracer,
2755                                     &global_trace, &tracing_lt_fops);
2756         if (!entry)
2757                 pr_warning("Could not create debugfs 'latency_trace' entry\n");
2758
2759         entry = debugfs_create_file("trace", 0444, d_tracer,
2760                                     &global_trace, &tracing_fops);
2761         if (!entry)
2762                 pr_warning("Could not create debugfs 'trace' entry\n");
2763
2764         entry = debugfs_create_file("available_tracers", 0444, d_tracer,
2765                                     &global_trace, &show_traces_fops);
2766         if (!entry)
2767                 pr_warning("Could not create debugfs 'available_tracers' entry\n");
2768
2769         entry = debugfs_create_file("current_tracer", 0444, d_tracer,
2770                                     &global_trace, &set_tracer_fops);
2771         if (!entry)
2772                 pr_warning("Could not create debugfs 'current_tracer' entry\n");
2773
2774         entry = debugfs_create_file("tracing_max_latency", 0644, d_tracer,
2775                                     &tracing_max_latency,
2776                                     &tracing_max_lat_fops);
2777         if (!entry)
2778                 pr_warning("Could not create debugfs "
2779                            "'tracing_max_latency' entry\n");
2780
2781         entry = debugfs_create_file("tracing_thresh", 0644, d_tracer,
2782                                     &tracing_thresh, &tracing_max_lat_fops);
2783         if (!entry)
2784                 pr_warning("Could not create debugfs "
2785                            "'tracing_thresh' entry\n");
2786         entry = debugfs_create_file("README", 0644, d_tracer,
2787                                     NULL, &tracing_readme_fops);
2788         if (!entry)
2789                 pr_warning("Could not create debugfs 'README' entry\n");
2790
2791         entry = debugfs_create_file("trace_pipe", 0644, d_tracer,
2792                                     NULL, &tracing_pipe_fops);
2793         if (!entry)
2794                 pr_warning("Could not create debugfs "
2795                            "'trace_pipe' entry\n");
2796
2797         entry = debugfs_create_file("buffer_size_kb", 0644, d_tracer,
2798                                     &global_trace, &tracing_entries_fops);
2799         if (!entry)
2800                 pr_warning("Could not create debugfs "
2801                            "'buffer_size_kb' entry\n");
2802
2803         entry = debugfs_create_file("trace_marker", 0220, d_tracer,
2804                                     NULL, &tracing_mark_fops);
2805         if (!entry)
2806                 pr_warning("Could not create debugfs "
2807                            "'trace_marker' entry\n");
2808
2809 #ifdef CONFIG_DYNAMIC_FTRACE
2810         entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer,
2811                                     &ftrace_update_tot_cnt,
2812                                     &tracing_dyn_info_fops);
2813         if (!entry)
2814                 pr_warning("Could not create debugfs "
2815                            "'dyn_ftrace_total_info' entry\n");
2816 #endif
2817 #ifdef CONFIG_SYSPROF_TRACER
2818         init_tracer_sysprof_debugfs(d_tracer);
2819 #endif
2820         return 0;
2821 }
2822
2823 int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args)
2824 {
2825         static DEFINE_SPINLOCK(trace_buf_lock);
2826         static char trace_buf[TRACE_BUF_SIZE];
2827
2828         struct ring_buffer_event *event;
2829         struct trace_array *tr = &global_trace;
2830         struct trace_array_cpu *data;
2831         int cpu, len = 0, size, pc;
2832         struct print_entry *entry;
2833         unsigned long irq_flags;
2834
2835         if (tracing_disabled || tracing_selftest_running)
2836                 return 0;
2837
2838         pc = preempt_count();
2839         preempt_disable_notrace();
2840         cpu = raw_smp_processor_id();
2841         data = tr->data[cpu];
2842
2843         if (unlikely(atomic_read(&data->disabled)))
2844                 goto out;
2845
2846         pause_graph_tracing();
2847         spin_lock_irqsave(&trace_buf_lock, irq_flags);
2848         len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args);
2849
2850         len = min(len, TRACE_BUF_SIZE-1);
2851         trace_buf[len] = 0;
2852
2853         size = sizeof(*entry) + len + 1;
2854         event = ring_buffer_lock_reserve(tr->buffer, size, &irq_flags);
2855         if (!event)
2856                 goto out_unlock;
2857         entry = ring_buffer_event_data(event);
2858         tracing_generic_entry_update(&entry->ent, irq_flags, pc);
2859         entry->ent.type                 = TRACE_PRINT;
2860         entry->ip                       = ip;
2861         entry->depth                    = depth;
2862
2863         memcpy(&entry->buf, trace_buf, len);
2864         entry->buf[len] = 0;
2865         ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
2866
2867  out_unlock:
2868         spin_unlock_irqrestore(&trace_buf_lock, irq_flags);
2869         unpause_graph_tracing();
2870  out:
2871         preempt_enable_notrace();
2872
2873         return len;
2874 }
2875 EXPORT_SYMBOL_GPL(trace_vprintk);
2876
2877 int __ftrace_printk(unsigned long ip, const char *fmt, ...)
2878 {
2879         int ret;
2880         va_list ap;
2881
2882         if (!(trace_flags & TRACE_ITER_PRINTK))
2883                 return 0;
2884
2885         va_start(ap, fmt);
2886         ret = trace_vprintk(ip, task_curr_ret_stack(current), fmt, ap);
2887         va_end(ap);
2888         return ret;
2889 }
2890 EXPORT_SYMBOL_GPL(__ftrace_printk);
2891
2892 int __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap)
2893 {
2894         if (!(trace_flags & TRACE_ITER_PRINTK))
2895                 return 0;
2896
2897         return trace_vprintk(ip, task_curr_ret_stack(current), fmt, ap);
2898 }
2899 EXPORT_SYMBOL_GPL(__ftrace_vprintk);
2900
2901 static int trace_panic_handler(struct notifier_block *this,
2902                                unsigned long event, void *unused)
2903 {
2904         if (ftrace_dump_on_oops)
2905                 ftrace_dump();
2906         return NOTIFY_OK;
2907 }
2908
2909 static struct notifier_block trace_panic_notifier = {
2910         .notifier_call  = trace_panic_handler,
2911         .next           = NULL,
2912         .priority       = 150   /* priority: INT_MAX >= x >= 0 */
2913 };
2914
2915 static int trace_die_handler(struct notifier_block *self,
2916                              unsigned long val,
2917                              void *data)
2918 {
2919         switch (val) {
2920         case DIE_OOPS:
2921                 if (ftrace_dump_on_oops)
2922                         ftrace_dump();
2923                 break;
2924         default:
2925                 break;
2926         }
2927         return NOTIFY_OK;
2928 }
2929
2930 static struct notifier_block trace_die_notifier = {
2931         .notifier_call = trace_die_handler,
2932         .priority = 200
2933 };
2934
2935 /*
2936  * printk is set to max of 1024, we really don't need it that big.
2937  * Nothing should be printing 1000 characters anyway.
2938  */
2939 #define TRACE_MAX_PRINT         1000
2940
2941 /*
2942  * Define here KERN_TRACE so that we have one place to modify
2943  * it if we decide to change what log level the ftrace dump
2944  * should be at.
2945  */
2946 #define KERN_TRACE              KERN_EMERG
2947
2948 static void
2949 trace_printk_seq(struct trace_seq *s)
2950 {
2951         /* Probably should print a warning here. */
2952         if (s->len >= 1000)
2953                 s->len = 1000;
2954
2955         /* should be zero ended, but we are paranoid. */
2956         s->buffer[s->len] = 0;
2957
2958         printk(KERN_TRACE "%s", s->buffer);
2959
2960         trace_seq_reset(s);
2961 }
2962
2963 void ftrace_dump(void)
2964 {
2965         static DEFINE_SPINLOCK(ftrace_dump_lock);
2966         /* use static because iter can be a bit big for the stack */
2967         static struct trace_iterator iter;
2968         static int dump_ran;
2969         unsigned long flags;
2970         int cnt = 0, cpu;
2971
2972         /* only one dump */
2973         spin_lock_irqsave(&ftrace_dump_lock, flags);
2974         if (dump_ran)
2975                 goto out;
2976
2977         dump_ran = 1;
2978
2979         /* No turning back! */
2980         tracing_off();
2981         ftrace_kill();
2982
2983         for_each_tracing_cpu(cpu) {
2984                 atomic_inc(&global_trace.data[cpu]->disabled);
2985         }
2986
2987         /* don't look at user memory in panic mode */
2988         trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
2989
2990         printk(KERN_TRACE "Dumping ftrace buffer:\n");
2991
2992         iter.tr = &global_trace;
2993         iter.trace = current_trace;
2994
2995         /*
2996          * We need to stop all tracing on all CPUS to read the
2997          * the next buffer. This is a bit expensive, but is
2998          * not done often. We fill all what we can read,
2999          * and then release the locks again.
3000          */
3001
3002         while (!trace_empty(&iter)) {
3003
3004                 if (!cnt)
3005                         printk(KERN_TRACE "---------------------------------\n");
3006
3007                 cnt++;
3008
3009                 /* reset all but tr, trace, and overruns */
3010                 memset(&iter.seq, 0,
3011                        sizeof(struct trace_iterator) -
3012                        offsetof(struct trace_iterator, seq));
3013                 iter.iter_flags |= TRACE_FILE_LAT_FMT;
3014                 iter.pos = -1;
3015
3016                 if (find_next_entry_inc(&iter) != NULL) {
3017                         print_trace_line(&iter);
3018                         trace_consume(&iter);
3019                 }
3020
3021                 trace_printk_seq(&iter.seq);
3022         }
3023
3024         if (!cnt)
3025                 printk(KERN_TRACE "   (ftrace buffer empty)\n");
3026         else
3027                 printk(KERN_TRACE "---------------------------------\n");
3028
3029  out:
3030         spin_unlock_irqrestore(&ftrace_dump_lock, flags);
3031 }
3032
3033 __init static int tracer_alloc_buffers(void)
3034 {
3035         struct trace_array_cpu *data;
3036         int i;
3037         int ret = -ENOMEM;
3038
3039         if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
3040                 goto out;
3041
3042         if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL))
3043                 goto out_free_buffer_mask;
3044
3045         cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
3046         cpumask_copy(tracing_cpumask, cpu_all_mask);
3047
3048         /* TODO: make the number of buffers hot pluggable with CPUS */
3049         global_trace.buffer = ring_buffer_alloc(trace_buf_size,
3050                                                    TRACE_BUFFER_FLAGS);
3051         if (!global_trace.buffer) {
3052                 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
3053                 WARN_ON(1);
3054                 goto out_free_cpumask;
3055         }
3056         global_trace.entries = ring_buffer_size(global_trace.buffer);
3057
3058
3059 #ifdef CONFIG_TRACER_MAX_TRACE
3060         max_tr.buffer = ring_buffer_alloc(trace_buf_size,
3061                                              TRACE_BUFFER_FLAGS);
3062         if (!max_tr.buffer) {
3063                 printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n");
3064                 WARN_ON(1);
3065                 ring_buffer_free(global_trace.buffer);
3066                 goto out_free_cpumask;
3067         }
3068         max_tr.entries = ring_buffer_size(max_tr.buffer);
3069         WARN_ON(max_tr.entries != global_trace.entries);
3070 #endif
3071
3072         /* Allocate the first page for all buffers */
3073         for_each_tracing_cpu(i) {
3074                 data = global_trace.data[i] = &per_cpu(global_trace_cpu, i);
3075                 max_tr.data[i] = &per_cpu(max_data, i);
3076         }
3077
3078         trace_init_cmdlines();
3079
3080         register_tracer(&nop_trace);
3081         current_trace = &nop_trace;
3082 #ifdef CONFIG_BOOT_TRACER
3083         register_tracer(&boot_tracer);
3084 #endif
3085         /* All seems OK, enable tracing */
3086         tracing_disabled = 0;
3087
3088         atomic_notifier_chain_register(&panic_notifier_list,
3089                                        &trace_panic_notifier);
3090
3091         register_die_notifier(&trace_die_notifier);
3092         ret = 0;
3093
3094 out_free_cpumask:
3095         free_cpumask_var(tracing_cpumask);
3096 out_free_buffer_mask:
3097         free_cpumask_var(tracing_buffer_mask);
3098 out:
3099         return ret;
3100 }
3101
3102 __init static int clear_boot_tracer(void)
3103 {
3104         /*
3105          * The default tracer at boot buffer is an init section.
3106          * This function is called in lateinit. If we did not
3107          * find the boot tracer, then clear it out, to prevent
3108          * later registration from accessing the buffer that is
3109          * about to be freed.
3110          */
3111         if (!default_bootup_tracer)
3112                 return 0;
3113
3114         printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
3115                default_bootup_tracer);
3116         default_bootup_tracer = NULL;
3117
3118         return 0;
3119 }
3120
3121 early_initcall(tracer_alloc_buffers);
3122 fs_initcall(tracer_init_debugfs);
3123 late_initcall(clear_boot_tracer);