ftrace: binary and not logical for continue test
[linux-2.6] / kernel / trace / trace.c
1 /*
2  * ring buffer based function tracer
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally taken from the RT patch by:
8  *    Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code from the latency_tracer, that is:
11  *  Copyright (C) 2004-2006 Ingo Molnar
12  *  Copyright (C) 2004 William Lee Irwin III
13  */
14 #include <linux/utsrelease.h>
15 #include <linux/kallsyms.h>
16 #include <linux/seq_file.h>
17 #include <linux/notifier.h>
18 #include <linux/debugfs.h>
19 #include <linux/pagemap.h>
20 #include <linux/hardirq.h>
21 #include <linux/linkage.h>
22 #include <linux/uaccess.h>
23 #include <linux/ftrace.h>
24 #include <linux/module.h>
25 #include <linux/percpu.h>
26 #include <linux/kdebug.h>
27 #include <linux/ctype.h>
28 #include <linux/init.h>
29 #include <linux/poll.h>
30 #include <linux/gfp.h>
31 #include <linux/fs.h>
32 #include <linux/kprobes.h>
33 #include <linux/writeback.h>
34
35 #include <linux/stacktrace.h>
36
37 #include "trace.h"
38
39 unsigned long __read_mostly     tracing_max_latency = (cycle_t)ULONG_MAX;
40 unsigned long __read_mostly     tracing_thresh;
41
42 static unsigned long __read_mostly      tracing_nr_buffers;
43 static cpumask_t __read_mostly          tracing_buffer_mask;
44
45 #define for_each_tracing_cpu(cpu)       \
46         for_each_cpu_mask(cpu, tracing_buffer_mask)
47
48 static int trace_alloc_page(void);
49 static int trace_free_page(void);
50
51 static int tracing_disabled = 1;
52
53 static unsigned long tracing_pages_allocated;
54
55 long
56 ns2usecs(cycle_t nsec)
57 {
58         nsec += 500;
59         do_div(nsec, 1000);
60         return nsec;
61 }
62
63 cycle_t ftrace_now(int cpu)
64 {
65         return cpu_clock(cpu);
66 }
67
68 /*
69  * The global_trace is the descriptor that holds the tracing
70  * buffers for the live tracing. For each CPU, it contains
71  * a link list of pages that will store trace entries. The
72  * page descriptor of the pages in the memory is used to hold
73  * the link list by linking the lru item in the page descriptor
74  * to each of the pages in the buffer per CPU.
75  *
76  * For each active CPU there is a data field that holds the
77  * pages for the buffer for that CPU. Each CPU has the same number
78  * of pages allocated for its buffer.
79  */
80 static struct trace_array       global_trace;
81
82 static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu);
83
84 /*
85  * The max_tr is used to snapshot the global_trace when a maximum
86  * latency is reached. Some tracers will use this to store a maximum
87  * trace while it continues examining live traces.
88  *
89  * The buffers for the max_tr are set up the same as the global_trace.
90  * When a snapshot is taken, the link list of the max_tr is swapped
91  * with the link list of the global_trace and the buffers are reset for
92  * the global_trace so the tracing can continue.
93  */
94 static struct trace_array       max_tr;
95
96 static DEFINE_PER_CPU(struct trace_array_cpu, max_data);
97
98 /* tracer_enabled is used to toggle activation of a tracer */
99 static int                      tracer_enabled = 1;
100
101 /* function tracing enabled */
102 int                             ftrace_function_enabled;
103
104 /*
105  * trace_nr_entries is the number of entries that is allocated
106  * for a buffer. Note, the number of entries is always rounded
107  * to ENTRIES_PER_PAGE.
108  *
109  * This number is purposely set to a low number of 16384.
110  * If the dump on oops happens, it will be much appreciated
111  * to not have to wait for all that output. Anyway this can be
112  * boot time and run time configurable.
113  */
114 #define TRACE_ENTRIES_DEFAULT   16384UL
115
116 static unsigned long            trace_nr_entries = TRACE_ENTRIES_DEFAULT;
117
118 /* trace_types holds a link list of available tracers. */
119 static struct tracer            *trace_types __read_mostly;
120
121 /* current_trace points to the tracer that is currently active */
122 static struct tracer            *current_trace __read_mostly;
123
124 /*
125  * max_tracer_type_len is used to simplify the allocating of
126  * buffers to read userspace tracer names. We keep track of
127  * the longest tracer name registered.
128  */
129 static int                      max_tracer_type_len;
130
131 /*
132  * trace_types_lock is used to protect the trace_types list.
133  * This lock is also used to keep user access serialized.
134  * Accesses from userspace will grab this lock while userspace
135  * activities happen inside the kernel.
136  */
137 static DEFINE_MUTEX(trace_types_lock);
138
139 /* trace_wait is a waitqueue for tasks blocked on trace_poll */
140 static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
141
142 /* trace_flags holds iter_ctrl options */
143 unsigned long trace_flags = TRACE_ITER_PRINT_PARENT;
144
145 static notrace void no_trace_init(struct trace_array *tr)
146 {
147         int cpu;
148
149         ftrace_function_enabled = 0;
150         if(tr->ctrl)
151                 for_each_online_cpu(cpu)
152                         tracing_reset(tr->data[cpu]);
153         tracer_enabled = 0;
154 }
155
156 /* dummy trace to disable tracing */
157 static struct tracer no_tracer __read_mostly = {
158         .name           = "none",
159         .init           = no_trace_init
160 };
161
162
163 /**
164  * trace_wake_up - wake up tasks waiting for trace input
165  *
166  * Simply wakes up any task that is blocked on the trace_wait
167  * queue. These is used with trace_poll for tasks polling the trace.
168  */
169 void trace_wake_up(void)
170 {
171         /*
172          * The runqueue_is_locked() can fail, but this is the best we
173          * have for now:
174          */
175         if (!(trace_flags & TRACE_ITER_BLOCK) && !runqueue_is_locked())
176                 wake_up(&trace_wait);
177 }
178
179 #define ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(struct trace_entry))
180
181 static int __init set_nr_entries(char *str)
182 {
183         unsigned long nr_entries;
184         int ret;
185
186         if (!str)
187                 return 0;
188         ret = strict_strtoul(str, 0, &nr_entries);
189         /* nr_entries can not be zero */
190         if (ret < 0 || nr_entries == 0)
191                 return 0;
192         trace_nr_entries = nr_entries;
193         return 1;
194 }
195 __setup("trace_entries=", set_nr_entries);
196
197 unsigned long nsecs_to_usecs(unsigned long nsecs)
198 {
199         return nsecs / 1000;
200 }
201
202 /*
203  * trace_flag_type is an enumeration that holds different
204  * states when a trace occurs. These are:
205  *  IRQS_OFF    - interrupts were disabled
206  *  NEED_RESCED - reschedule is requested
207  *  HARDIRQ     - inside an interrupt handler
208  *  SOFTIRQ     - inside a softirq handler
209  *  CONT        - multiple entries hold the trace item
210  */
211 enum trace_flag_type {
212         TRACE_FLAG_IRQS_OFF             = 0x01,
213         TRACE_FLAG_NEED_RESCHED         = 0x02,
214         TRACE_FLAG_HARDIRQ              = 0x04,
215         TRACE_FLAG_SOFTIRQ              = 0x08,
216         TRACE_FLAG_CONT                 = 0x10,
217 };
218
219 /*
220  * TRACE_ITER_SYM_MASK masks the options in trace_flags that
221  * control the output of kernel symbols.
222  */
223 #define TRACE_ITER_SYM_MASK \
224         (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
225
226 /* These must match the bit postions in trace_iterator_flags */
227 static const char *trace_options[] = {
228         "print-parent",
229         "sym-offset",
230         "sym-addr",
231         "verbose",
232         "raw",
233         "hex",
234         "bin",
235         "block",
236         "stacktrace",
237         "sched-tree",
238         NULL
239 };
240
241 /*
242  * ftrace_max_lock is used to protect the swapping of buffers
243  * when taking a max snapshot. The buffers themselves are
244  * protected by per_cpu spinlocks. But the action of the swap
245  * needs its own lock.
246  *
247  * This is defined as a raw_spinlock_t in order to help
248  * with performance when lockdep debugging is enabled.
249  */
250 static raw_spinlock_t ftrace_max_lock =
251         (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
252
253 /*
254  * Copy the new maximum trace into the separate maximum-trace
255  * structure. (this way the maximum trace is permanently saved,
256  * for later retrieval via /debugfs/tracing/latency_trace)
257  */
258 static void
259 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
260 {
261         struct trace_array_cpu *data = tr->data[cpu];
262
263         max_tr.cpu = cpu;
264         max_tr.time_start = data->preempt_timestamp;
265
266         data = max_tr.data[cpu];
267         data->saved_latency = tracing_max_latency;
268
269         memcpy(data->comm, tsk->comm, TASK_COMM_LEN);
270         data->pid = tsk->pid;
271         data->uid = tsk->uid;
272         data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
273         data->policy = tsk->policy;
274         data->rt_priority = tsk->rt_priority;
275
276         /* record this tasks comm */
277         tracing_record_cmdline(current);
278 }
279
280 #define CHECK_COND(cond)                        \
281         if (unlikely(cond)) {                   \
282                 tracing_disabled = 1;           \
283                 WARN_ON(1);                     \
284                 return -1;                      \
285         }
286
287 /**
288  * check_pages - integrity check of trace buffers
289  *
290  * As a safty measure we check to make sure the data pages have not
291  * been corrupted.
292  */
293 int check_pages(struct trace_array_cpu *data)
294 {
295         struct page *page, *tmp;
296
297         CHECK_COND(data->trace_pages.next->prev != &data->trace_pages);
298         CHECK_COND(data->trace_pages.prev->next != &data->trace_pages);
299
300         list_for_each_entry_safe(page, tmp, &data->trace_pages, lru) {
301                 CHECK_COND(page->lru.next->prev != &page->lru);
302                 CHECK_COND(page->lru.prev->next != &page->lru);
303         }
304
305         return 0;
306 }
307
308 /**
309  * head_page - page address of the first page in per_cpu buffer.
310  *
311  * head_page returns the page address of the first page in
312  * a per_cpu buffer. This also preforms various consistency
313  * checks to make sure the buffer has not been corrupted.
314  */
315 void *head_page(struct trace_array_cpu *data)
316 {
317         struct page *page;
318
319         if (list_empty(&data->trace_pages))
320                 return NULL;
321
322         page = list_entry(data->trace_pages.next, struct page, lru);
323         BUG_ON(&page->lru == &data->trace_pages);
324
325         return page_address(page);
326 }
327
328 /**
329  * trace_seq_printf - sequence printing of trace information
330  * @s: trace sequence descriptor
331  * @fmt: printf format string
332  *
333  * The tracer may use either sequence operations or its own
334  * copy to user routines. To simplify formating of a trace
335  * trace_seq_printf is used to store strings into a special
336  * buffer (@s). Then the output may be either used by
337  * the sequencer or pulled into another buffer.
338  */
339 int
340 trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
341 {
342         int len = (PAGE_SIZE - 1) - s->len;
343         va_list ap;
344         int ret;
345
346         if (!len)
347                 return 0;
348
349         va_start(ap, fmt);
350         ret = vsnprintf(s->buffer + s->len, len, fmt, ap);
351         va_end(ap);
352
353         /* If we can't write it all, don't bother writing anything */
354         if (ret >= len)
355                 return 0;
356
357         s->len += ret;
358
359         return len;
360 }
361
362 /**
363  * trace_seq_puts - trace sequence printing of simple string
364  * @s: trace sequence descriptor
365  * @str: simple string to record
366  *
367  * The tracer may use either the sequence operations or its own
368  * copy to user routines. This function records a simple string
369  * into a special buffer (@s) for later retrieval by a sequencer
370  * or other mechanism.
371  */
372 static int
373 trace_seq_puts(struct trace_seq *s, const char *str)
374 {
375         int len = strlen(str);
376
377         if (len > ((PAGE_SIZE - 1) - s->len))
378                 return 0;
379
380         memcpy(s->buffer + s->len, str, len);
381         s->len += len;
382
383         return len;
384 }
385
386 static int
387 trace_seq_putc(struct trace_seq *s, unsigned char c)
388 {
389         if (s->len >= (PAGE_SIZE - 1))
390                 return 0;
391
392         s->buffer[s->len++] = c;
393
394         return 1;
395 }
396
397 static int
398 trace_seq_putmem(struct trace_seq *s, void *mem, size_t len)
399 {
400         if (len > ((PAGE_SIZE - 1) - s->len))
401                 return 0;
402
403         memcpy(s->buffer + s->len, mem, len);
404         s->len += len;
405
406         return len;
407 }
408
409 #define HEX_CHARS 17
410 static const char hex2asc[] = "0123456789abcdef";
411
412 static int
413 trace_seq_putmem_hex(struct trace_seq *s, void *mem, size_t len)
414 {
415         unsigned char hex[HEX_CHARS];
416         unsigned char *data = mem;
417         unsigned char byte;
418         int i, j;
419
420         BUG_ON(len >= HEX_CHARS);
421
422 #ifdef __BIG_ENDIAN
423         for (i = 0, j = 0; i < len; i++) {
424 #else
425         for (i = len-1, j = 0; i >= 0; i--) {
426 #endif
427                 byte = data[i];
428
429                 hex[j++] = hex2asc[byte & 0x0f];
430                 hex[j++] = hex2asc[byte >> 4];
431         }
432         hex[j++] = ' ';
433
434         return trace_seq_putmem(s, hex, j);
435 }
436
437 static void
438 trace_seq_reset(struct trace_seq *s)
439 {
440         s->len = 0;
441         s->readpos = 0;
442 }
443
444 ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
445 {
446         int len;
447         int ret;
448
449         if (s->len <= s->readpos)
450                 return -EBUSY;
451
452         len = s->len - s->readpos;
453         if (cnt > len)
454                 cnt = len;
455         ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
456         if (ret)
457                 return -EFAULT;
458
459         s->readpos += len;
460         return cnt;
461 }
462
463 static void
464 trace_print_seq(struct seq_file *m, struct trace_seq *s)
465 {
466         int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len;
467
468         s->buffer[len] = 0;
469         seq_puts(m, s->buffer);
470
471         trace_seq_reset(s);
472 }
473
474 /*
475  * flip the trace buffers between two trace descriptors.
476  * This usually is the buffers between the global_trace and
477  * the max_tr to record a snapshot of a current trace.
478  *
479  * The ftrace_max_lock must be held.
480  */
481 static void
482 flip_trace(struct trace_array_cpu *tr1, struct trace_array_cpu *tr2)
483 {
484         struct list_head flip_pages;
485
486         INIT_LIST_HEAD(&flip_pages);
487
488         memcpy(&tr1->trace_head_idx, &tr2->trace_head_idx,
489                 sizeof(struct trace_array_cpu) -
490                 offsetof(struct trace_array_cpu, trace_head_idx));
491
492         check_pages(tr1);
493         check_pages(tr2);
494         list_splice_init(&tr1->trace_pages, &flip_pages);
495         list_splice_init(&tr2->trace_pages, &tr1->trace_pages);
496         list_splice_init(&flip_pages, &tr2->trace_pages);
497         BUG_ON(!list_empty(&flip_pages));
498         check_pages(tr1);
499         check_pages(tr2);
500 }
501
502 /**
503  * update_max_tr - snapshot all trace buffers from global_trace to max_tr
504  * @tr: tracer
505  * @tsk: the task with the latency
506  * @cpu: The cpu that initiated the trace.
507  *
508  * Flip the buffers between the @tr and the max_tr and record information
509  * about which task was the cause of this latency.
510  */
511 void
512 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
513 {
514         struct trace_array_cpu *data;
515         int i;
516
517         WARN_ON_ONCE(!irqs_disabled());
518         __raw_spin_lock(&ftrace_max_lock);
519         /* clear out all the previous traces */
520         for_each_tracing_cpu(i) {
521                 data = tr->data[i];
522                 flip_trace(max_tr.data[i], data);
523                 tracing_reset(data);
524         }
525
526         __update_max_tr(tr, tsk, cpu);
527         __raw_spin_unlock(&ftrace_max_lock);
528 }
529
530 /**
531  * update_max_tr_single - only copy one trace over, and reset the rest
532  * @tr - tracer
533  * @tsk - task with the latency
534  * @cpu - the cpu of the buffer to copy.
535  *
536  * Flip the trace of a single CPU buffer between the @tr and the max_tr.
537  */
538 void
539 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
540 {
541         struct trace_array_cpu *data = tr->data[cpu];
542         int i;
543
544         WARN_ON_ONCE(!irqs_disabled());
545         __raw_spin_lock(&ftrace_max_lock);
546         for_each_tracing_cpu(i)
547                 tracing_reset(max_tr.data[i]);
548
549         flip_trace(max_tr.data[cpu], data);
550         tracing_reset(data);
551
552         __update_max_tr(tr, tsk, cpu);
553         __raw_spin_unlock(&ftrace_max_lock);
554 }
555
556 /**
557  * register_tracer - register a tracer with the ftrace system.
558  * @type - the plugin for the tracer
559  *
560  * Register a new plugin tracer.
561  */
562 int register_tracer(struct tracer *type)
563 {
564         struct tracer *t;
565         int len;
566         int ret = 0;
567
568         if (!type->name) {
569                 pr_info("Tracer must have a name\n");
570                 return -1;
571         }
572
573         mutex_lock(&trace_types_lock);
574         for (t = trace_types; t; t = t->next) {
575                 if (strcmp(type->name, t->name) == 0) {
576                         /* already found */
577                         pr_info("Trace %s already registered\n",
578                                 type->name);
579                         ret = -1;
580                         goto out;
581                 }
582         }
583
584 #ifdef CONFIG_FTRACE_STARTUP_TEST
585         if (type->selftest) {
586                 struct tracer *saved_tracer = current_trace;
587                 struct trace_array_cpu *data;
588                 struct trace_array *tr = &global_trace;
589                 int saved_ctrl = tr->ctrl;
590                 int i;
591                 /*
592                  * Run a selftest on this tracer.
593                  * Here we reset the trace buffer, and set the current
594                  * tracer to be this tracer. The tracer can then run some
595                  * internal tracing to verify that everything is in order.
596                  * If we fail, we do not register this tracer.
597                  */
598                 for_each_tracing_cpu(i) {
599                         data = tr->data[i];
600                         if (!head_page(data))
601                                 continue;
602                         tracing_reset(data);
603                 }
604                 current_trace = type;
605                 tr->ctrl = 0;
606                 /* the test is responsible for initializing and enabling */
607                 pr_info("Testing tracer %s: ", type->name);
608                 ret = type->selftest(type, tr);
609                 /* the test is responsible for resetting too */
610                 current_trace = saved_tracer;
611                 tr->ctrl = saved_ctrl;
612                 if (ret) {
613                         printk(KERN_CONT "FAILED!\n");
614                         goto out;
615                 }
616                 /* Only reset on passing, to avoid touching corrupted buffers */
617                 for_each_tracing_cpu(i) {
618                         data = tr->data[i];
619                         if (!head_page(data))
620                                 continue;
621                         tracing_reset(data);
622                 }
623                 printk(KERN_CONT "PASSED\n");
624         }
625 #endif
626
627         type->next = trace_types;
628         trace_types = type;
629         len = strlen(type->name);
630         if (len > max_tracer_type_len)
631                 max_tracer_type_len = len;
632
633  out:
634         mutex_unlock(&trace_types_lock);
635
636         return ret;
637 }
638
639 void unregister_tracer(struct tracer *type)
640 {
641         struct tracer **t;
642         int len;
643
644         mutex_lock(&trace_types_lock);
645         for (t = &trace_types; *t; t = &(*t)->next) {
646                 if (*t == type)
647                         goto found;
648         }
649         pr_info("Trace %s not registered\n", type->name);
650         goto out;
651
652  found:
653         *t = (*t)->next;
654         if (strlen(type->name) != max_tracer_type_len)
655                 goto out;
656
657         max_tracer_type_len = 0;
658         for (t = &trace_types; *t; t = &(*t)->next) {
659                 len = strlen((*t)->name);
660                 if (len > max_tracer_type_len)
661                         max_tracer_type_len = len;
662         }
663  out:
664         mutex_unlock(&trace_types_lock);
665 }
666
667 void tracing_reset(struct trace_array_cpu *data)
668 {
669         data->trace_idx = 0;
670         data->overrun = 0;
671         data->trace_head = data->trace_tail = head_page(data);
672         data->trace_head_idx = 0;
673         data->trace_tail_idx = 0;
674 }
675
676 #define SAVED_CMDLINES 128
677 static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
678 static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
679 static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
680 static int cmdline_idx;
681 static DEFINE_SPINLOCK(trace_cmdline_lock);
682
683 /* temporary disable recording */
684 atomic_t trace_record_cmdline_disabled __read_mostly;
685
686 static void trace_init_cmdlines(void)
687 {
688         memset(&map_pid_to_cmdline, -1, sizeof(map_pid_to_cmdline));
689         memset(&map_cmdline_to_pid, -1, sizeof(map_cmdline_to_pid));
690         cmdline_idx = 0;
691 }
692
693 void trace_stop_cmdline_recording(void);
694
695 static void trace_save_cmdline(struct task_struct *tsk)
696 {
697         unsigned map;
698         unsigned idx;
699
700         if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
701                 return;
702
703         /*
704          * It's not the end of the world if we don't get
705          * the lock, but we also don't want to spin
706          * nor do we want to disable interrupts,
707          * so if we miss here, then better luck next time.
708          */
709         if (!spin_trylock(&trace_cmdline_lock))
710                 return;
711
712         idx = map_pid_to_cmdline[tsk->pid];
713         if (idx >= SAVED_CMDLINES) {
714                 idx = (cmdline_idx + 1) % SAVED_CMDLINES;
715
716                 map = map_cmdline_to_pid[idx];
717                 if (map <= PID_MAX_DEFAULT)
718                         map_pid_to_cmdline[map] = (unsigned)-1;
719
720                 map_pid_to_cmdline[tsk->pid] = idx;
721
722                 cmdline_idx = idx;
723         }
724
725         memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
726
727         spin_unlock(&trace_cmdline_lock);
728 }
729
730 static char *trace_find_cmdline(int pid)
731 {
732         char *cmdline = "<...>";
733         unsigned map;
734
735         if (!pid)
736                 return "<idle>";
737
738         if (pid > PID_MAX_DEFAULT)
739                 goto out;
740
741         map = map_pid_to_cmdline[pid];
742         if (map >= SAVED_CMDLINES)
743                 goto out;
744
745         cmdline = saved_cmdlines[map];
746
747  out:
748         return cmdline;
749 }
750
751 void tracing_record_cmdline(struct task_struct *tsk)
752 {
753         if (atomic_read(&trace_record_cmdline_disabled))
754                 return;
755
756         trace_save_cmdline(tsk);
757 }
758
759 static inline struct list_head *
760 trace_next_list(struct trace_array_cpu *data, struct list_head *next)
761 {
762         /*
763          * Roundrobin - but skip the head (which is not a real page):
764          */
765         next = next->next;
766         if (unlikely(next == &data->trace_pages))
767                 next = next->next;
768         BUG_ON(next == &data->trace_pages);
769
770         return next;
771 }
772
773 static inline void *
774 trace_next_page(struct trace_array_cpu *data, void *addr)
775 {
776         struct list_head *next;
777         struct page *page;
778
779         page = virt_to_page(addr);
780
781         next = trace_next_list(data, &page->lru);
782         page = list_entry(next, struct page, lru);
783
784         return page_address(page);
785 }
786
787 static inline struct trace_entry *
788 tracing_get_trace_entry(struct trace_array *tr, struct trace_array_cpu *data)
789 {
790         unsigned long idx, idx_next;
791         struct trace_entry *entry;
792
793         data->trace_idx++;
794         idx = data->trace_head_idx;
795         idx_next = idx + 1;
796
797         BUG_ON(idx * TRACE_ENTRY_SIZE >= PAGE_SIZE);
798
799         entry = data->trace_head + idx * TRACE_ENTRY_SIZE;
800
801         if (unlikely(idx_next >= ENTRIES_PER_PAGE)) {
802                 data->trace_head = trace_next_page(data, data->trace_head);
803                 idx_next = 0;
804         }
805
806         if (data->trace_head == data->trace_tail &&
807             idx_next == data->trace_tail_idx) {
808                 /* overrun */
809                 data->overrun++;
810                 data->trace_tail_idx++;
811                 if (data->trace_tail_idx >= ENTRIES_PER_PAGE) {
812                         data->trace_tail =
813                                 trace_next_page(data, data->trace_tail);
814                         data->trace_tail_idx = 0;
815                 }
816         }
817
818         data->trace_head_idx = idx_next;
819
820         return entry;
821 }
822
823 static inline void
824 tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags)
825 {
826         struct task_struct *tsk = current;
827         unsigned long pc;
828
829         pc = preempt_count();
830
831         entry->field.preempt_count      = pc & 0xff;
832         entry->field.pid                = (tsk) ? tsk->pid : 0;
833         entry->field.t                  = ftrace_now(raw_smp_processor_id());
834         entry->field.flags =
835                 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
836                 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
837                 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
838                 (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
839 }
840
841 void
842 trace_function(struct trace_array *tr, struct trace_array_cpu *data,
843                unsigned long ip, unsigned long parent_ip, unsigned long flags)
844 {
845         struct trace_entry *entry;
846         unsigned long irq_flags;
847
848         raw_local_irq_save(irq_flags);
849         __raw_spin_lock(&data->lock);
850         entry                           = tracing_get_trace_entry(tr, data);
851         tracing_generic_entry_update(entry, flags);
852         entry->type                     = TRACE_FN;
853         entry->field.fn.ip              = ip;
854         entry->field.fn.parent_ip       = parent_ip;
855         __raw_spin_unlock(&data->lock);
856         raw_local_irq_restore(irq_flags);
857 }
858
859 void
860 ftrace(struct trace_array *tr, struct trace_array_cpu *data,
861        unsigned long ip, unsigned long parent_ip, unsigned long flags)
862 {
863         if (likely(!atomic_read(&data->disabled)))
864                 trace_function(tr, data, ip, parent_ip, flags);
865 }
866
867 #ifdef CONFIG_MMIOTRACE
868 void __trace_mmiotrace_rw(struct trace_array *tr, struct trace_array_cpu *data,
869                                                 struct mmiotrace_rw *rw)
870 {
871         struct trace_entry *entry;
872         unsigned long irq_flags;
873
874         raw_local_irq_save(irq_flags);
875         __raw_spin_lock(&data->lock);
876
877         entry                           = tracing_get_trace_entry(tr, data);
878         tracing_generic_entry_update(entry, 0);
879         entry->type                     = TRACE_MMIO_RW;
880         entry->field.mmiorw             = *rw;
881
882         __raw_spin_unlock(&data->lock);
883         raw_local_irq_restore(irq_flags);
884
885         trace_wake_up();
886 }
887
888 void __trace_mmiotrace_map(struct trace_array *tr, struct trace_array_cpu *data,
889                                                 struct mmiotrace_map *map)
890 {
891         struct trace_entry *entry;
892         unsigned long irq_flags;
893
894         raw_local_irq_save(irq_flags);
895         __raw_spin_lock(&data->lock);
896
897         entry                           = tracing_get_trace_entry(tr, data);
898         tracing_generic_entry_update(entry, 0);
899         entry->type                     = TRACE_MMIO_MAP;
900         entry->field.mmiomap            = *map;
901
902         __raw_spin_unlock(&data->lock);
903         raw_local_irq_restore(irq_flags);
904
905         trace_wake_up();
906 }
907 #endif
908
909 void __trace_stack(struct trace_array *tr,
910                    struct trace_array_cpu *data,
911                    unsigned long flags,
912                    int skip)
913 {
914         struct trace_entry *entry;
915         struct stack_trace trace;
916
917         if (!(trace_flags & TRACE_ITER_STACKTRACE))
918                 return;
919
920         entry                   = tracing_get_trace_entry(tr, data);
921         tracing_generic_entry_update(entry, flags);
922         entry->type             = TRACE_STACK;
923
924         memset(&entry->field.stack, 0, sizeof(entry->field.stack));
925
926         trace.nr_entries        = 0;
927         trace.max_entries       = FTRACE_STACK_ENTRIES;
928         trace.skip              = skip;
929         trace.entries           = entry->field.stack.caller;
930
931         save_stack_trace(&trace);
932 }
933
934 void
935 __trace_special(void *__tr, void *__data,
936                 unsigned long arg1, unsigned long arg2, unsigned long arg3)
937 {
938         struct trace_array_cpu *data = __data;
939         struct trace_array *tr = __tr;
940         struct trace_entry *entry;
941         unsigned long irq_flags;
942
943         raw_local_irq_save(irq_flags);
944         __raw_spin_lock(&data->lock);
945         entry                           = tracing_get_trace_entry(tr, data);
946         tracing_generic_entry_update(entry, 0);
947         entry->type                     = TRACE_SPECIAL;
948         entry->field.special.arg1       = arg1;
949         entry->field.special.arg2       = arg2;
950         entry->field.special.arg3       = arg3;
951         __trace_stack(tr, data, irq_flags, 4);
952         __raw_spin_unlock(&data->lock);
953         raw_local_irq_restore(irq_flags);
954
955         trace_wake_up();
956 }
957
958 void
959 tracing_sched_switch_trace(struct trace_array *tr,
960                            struct trace_array_cpu *data,
961                            struct task_struct *prev,
962                            struct task_struct *next,
963                            unsigned long flags)
964 {
965         struct trace_entry *entry;
966         unsigned long irq_flags;
967
968         raw_local_irq_save(irq_flags);
969         __raw_spin_lock(&data->lock);
970         entry                           = tracing_get_trace_entry(tr, data);
971         tracing_generic_entry_update(entry, flags);
972         entry->type                     = TRACE_CTX;
973         entry->field.ctx.prev_pid       = prev->pid;
974         entry->field.ctx.prev_prio      = prev->prio;
975         entry->field.ctx.prev_state     = prev->state;
976         entry->field.ctx.next_pid       = next->pid;
977         entry->field.ctx.next_prio      = next->prio;
978         entry->field.ctx.next_state     = next->state;
979         __trace_stack(tr, data, flags, 5);
980         __raw_spin_unlock(&data->lock);
981         raw_local_irq_restore(irq_flags);
982 }
983
984 void
985 tracing_sched_wakeup_trace(struct trace_array *tr,
986                            struct trace_array_cpu *data,
987                            struct task_struct *wakee,
988                            struct task_struct *curr,
989                            unsigned long flags)
990 {
991         struct trace_entry *entry;
992         unsigned long irq_flags;
993
994         raw_local_irq_save(irq_flags);
995         __raw_spin_lock(&data->lock);
996         entry                   = tracing_get_trace_entry(tr, data);
997         tracing_generic_entry_update(entry, flags);
998         entry->type             = TRACE_WAKE;
999         entry->field.ctx.prev_pid       = curr->pid;
1000         entry->field.ctx.prev_prio      = curr->prio;
1001         entry->field.ctx.prev_state     = curr->state;
1002         entry->field.ctx.next_pid       = wakee->pid;
1003         entry->field.ctx.next_prio      = wakee->prio;
1004         entry->field.ctx.next_state     = wakee->state;
1005         __trace_stack(tr, data, flags, 6);
1006         __raw_spin_unlock(&data->lock);
1007         raw_local_irq_restore(irq_flags);
1008
1009         trace_wake_up();
1010 }
1011
1012 void
1013 ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
1014 {
1015         struct trace_array *tr = &global_trace;
1016         struct trace_array_cpu *data;
1017         unsigned long flags;
1018         long disabled;
1019         int cpu;
1020
1021         if (tracing_disabled || current_trace == &no_tracer || !tr->ctrl)
1022                 return;
1023
1024         local_irq_save(flags);
1025         cpu = raw_smp_processor_id();
1026         data = tr->data[cpu];
1027         disabled = atomic_inc_return(&data->disabled);
1028
1029         if (likely(disabled == 1))
1030                 __trace_special(tr, data, arg1, arg2, arg3);
1031
1032         atomic_dec(&data->disabled);
1033         local_irq_restore(flags);
1034 }
1035
1036 #ifdef CONFIG_FTRACE
1037 static void
1038 function_trace_call(unsigned long ip, unsigned long parent_ip)
1039 {
1040         struct trace_array *tr = &global_trace;
1041         struct trace_array_cpu *data;
1042         unsigned long flags;
1043         long disabled;
1044         int cpu;
1045
1046         if (unlikely(!ftrace_function_enabled))
1047                 return;
1048
1049         if (skip_trace(ip))
1050                 return;
1051
1052         local_irq_save(flags);
1053         cpu = raw_smp_processor_id();
1054         data = tr->data[cpu];
1055         disabled = atomic_inc_return(&data->disabled);
1056
1057         if (likely(disabled == 1))
1058                 trace_function(tr, data, ip, parent_ip, flags);
1059
1060         atomic_dec(&data->disabled);
1061         local_irq_restore(flags);
1062 }
1063
1064 static struct ftrace_ops trace_ops __read_mostly =
1065 {
1066         .func = function_trace_call,
1067 };
1068
1069 void tracing_start_function_trace(void)
1070 {
1071         ftrace_function_enabled = 0;
1072         register_ftrace_function(&trace_ops);
1073         if (tracer_enabled)
1074                 ftrace_function_enabled = 1;
1075 }
1076
1077 void tracing_stop_function_trace(void)
1078 {
1079         ftrace_function_enabled = 0;
1080         unregister_ftrace_function(&trace_ops);
1081 }
1082 #endif
1083
1084 enum trace_file_type {
1085         TRACE_FILE_LAT_FMT      = 1,
1086 };
1087
1088 /* Return the current entry.  */
1089 static struct trace_entry *
1090 trace_entry_idx(struct trace_array *tr, struct trace_array_cpu *data,
1091                 struct trace_iterator *iter, int cpu)
1092 {
1093         struct page *page;
1094         struct trace_entry *array;
1095
1096         if (iter->next_idx[cpu] >= tr->entries ||
1097             iter->next_idx[cpu] >= data->trace_idx ||
1098             (data->trace_head == data->trace_tail &&
1099              data->trace_head_idx == data->trace_tail_idx))
1100                 return NULL;
1101
1102         if (!iter->next_page[cpu]) {
1103                 /* Initialize the iterator for this cpu trace buffer */
1104                 WARN_ON(!data->trace_tail);
1105                 page = virt_to_page(data->trace_tail);
1106                 iter->next_page[cpu] = &page->lru;
1107                 iter->next_page_idx[cpu] = data->trace_tail_idx;
1108         }
1109
1110         page = list_entry(iter->next_page[cpu], struct page, lru);
1111         BUG_ON(&data->trace_pages == &page->lru);
1112
1113         array = page_address(page);
1114
1115         WARN_ON(iter->next_page_idx[cpu] >= ENTRIES_PER_PAGE);
1116         return &array[iter->next_page_idx[cpu]];
1117 }
1118
1119 /* Increment the index counter of an iterator by one */
1120 static void trace_iterator_increment(struct trace_iterator *iter, int cpu)
1121 {
1122         iter->idx++;
1123         iter->next_idx[cpu]++;
1124         iter->next_page_idx[cpu]++;
1125
1126         if (iter->next_page_idx[cpu] >= ENTRIES_PER_PAGE) {
1127                 struct trace_array_cpu *data = iter->tr->data[cpu];
1128
1129                 iter->next_page_idx[cpu] = 0;
1130                 iter->next_page[cpu] =
1131                         trace_next_list(data, iter->next_page[cpu]);
1132         }
1133 }
1134
1135 static struct trace_entry *
1136 trace_entry_next(struct trace_array *tr, struct trace_array_cpu *data,
1137                  struct trace_iterator *iter, int cpu)
1138 {
1139         struct list_head *next_page;
1140         struct trace_entry *ent;
1141         int idx, next_idx, next_page_idx;
1142
1143         ent = trace_entry_idx(tr, tr->data[cpu], iter, cpu);
1144
1145         if (likely(!ent || ent->type != TRACE_CONT))
1146                 return ent;
1147
1148         /* save the iterator details */
1149         idx             = iter->idx;
1150         next_idx        = iter->next_idx[cpu];
1151         next_page_idx   = iter->next_page_idx[cpu];
1152         next_page       = iter->next_page[cpu];
1153
1154         /* find a real entry */
1155         do {
1156                 trace_iterator_increment(iter, cpu);
1157                 ent = trace_entry_idx(tr, tr->data[cpu], iter, cpu);
1158         } while (ent && ent->type != TRACE_CONT);
1159
1160         /* reset the iterator */
1161         iter->idx                       = idx;
1162         iter->next_idx[cpu]             = next_idx;
1163         iter->next_page_idx[cpu]        = next_page_idx;
1164         iter->next_page[cpu]            = next_page;
1165
1166         return ent;
1167 }
1168
1169 static struct trace_entry *
1170 __find_next_entry(struct trace_iterator *iter, int *ent_cpu, int inc)
1171 {
1172         struct trace_array *tr = iter->tr;
1173         struct trace_entry *ent, *next = NULL;
1174         int next_cpu = -1;
1175         int cpu;
1176
1177         for_each_tracing_cpu(cpu) {
1178                 if (!head_page(tr->data[cpu]))
1179                         continue;
1180
1181                 ent = trace_entry_idx(tr, tr->data[cpu], iter, cpu);
1182
1183                 if (ent && ent->type == TRACE_CONT) {
1184                         struct trace_array_cpu *data = tr->data[cpu];
1185
1186                         if (!inc)
1187                                 ent = trace_entry_next(tr, data, iter, cpu);
1188                         else {
1189                                 while (ent && ent->type == TRACE_CONT) {
1190                                         trace_iterator_increment(iter, cpu);
1191                                         ent = trace_entry_idx(tr, tr->data[cpu],
1192                                                               iter, cpu);
1193                                 }
1194                         }
1195                 }
1196
1197                 /*
1198                  * Pick the entry with the smallest timestamp:
1199                  */
1200                 if (ent && (!next || ent->field.t < next->field.t)) {
1201                         next = ent;
1202                         next_cpu = cpu;
1203                 }
1204         }
1205
1206         if (ent_cpu)
1207                 *ent_cpu = next_cpu;
1208
1209         return next;
1210 }
1211
1212 /* Find the next real entry, without updating the iterator itself */
1213 static struct trace_entry *
1214 find_next_entry(struct trace_iterator *iter, int *ent_cpu)
1215 {
1216         return __find_next_entry(iter, ent_cpu, 0);
1217 }
1218
1219 /* Find the next real entry, and increment the iterator to the next entry */
1220 static void *find_next_entry_inc(struct trace_iterator *iter)
1221 {
1222         struct trace_entry *next;
1223         int next_cpu = -1;
1224
1225         next = __find_next_entry(iter, &next_cpu, 1);
1226
1227         iter->prev_ent = iter->ent;
1228         iter->prev_cpu = iter->cpu;
1229
1230         iter->ent = next;
1231         iter->cpu = next_cpu;
1232
1233         if (next)
1234                 trace_iterator_increment(iter, iter->cpu);
1235
1236         return next ? iter : NULL;
1237 }
1238
1239 static void trace_consume(struct trace_iterator *iter)
1240 {
1241         struct trace_array_cpu *data = iter->tr->data[iter->cpu];
1242         struct trace_entry *ent;
1243
1244  again:
1245         data->trace_tail_idx++;
1246         if (data->trace_tail_idx >= ENTRIES_PER_PAGE) {
1247                 data->trace_tail = trace_next_page(data, data->trace_tail);
1248                 data->trace_tail_idx = 0;
1249         }
1250
1251         /* Check if we empty it, then reset the index */
1252         if (data->trace_head == data->trace_tail &&
1253             data->trace_head_idx == data->trace_tail_idx)
1254                 data->trace_idx = 0;
1255
1256         ent = trace_entry_idx(iter->tr, iter->tr->data[iter->cpu],
1257                               iter, iter->cpu);
1258         if (ent && ent->type == TRACE_CONT)
1259                 goto again;
1260 }
1261
1262 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
1263 {
1264         struct trace_iterator *iter = m->private;
1265         int i = (int)*pos;
1266         void *ent;
1267
1268         (*pos)++;
1269
1270         /* can't go backwards */
1271         if (iter->idx > i)
1272                 return NULL;
1273
1274         if (iter->idx < 0)
1275                 ent = find_next_entry_inc(iter);
1276         else
1277                 ent = iter;
1278
1279         while (ent && iter->idx < i)
1280                 ent = find_next_entry_inc(iter);
1281
1282         iter->pos = *pos;
1283
1284         return ent;
1285 }
1286
1287 static void *s_start(struct seq_file *m, loff_t *pos)
1288 {
1289         struct trace_iterator *iter = m->private;
1290         void *p = NULL;
1291         loff_t l = 0;
1292         int i;
1293
1294         mutex_lock(&trace_types_lock);
1295
1296         if (!current_trace || current_trace != iter->trace) {
1297                 mutex_unlock(&trace_types_lock);
1298                 return NULL;
1299         }
1300
1301         atomic_inc(&trace_record_cmdline_disabled);
1302
1303         /* let the tracer grab locks here if needed */
1304         if (current_trace->start)
1305                 current_trace->start(iter);
1306
1307         if (*pos != iter->pos) {
1308                 iter->ent = NULL;
1309                 iter->cpu = 0;
1310                 iter->idx = -1;
1311                 iter->prev_ent = NULL;
1312                 iter->prev_cpu = -1;
1313
1314                 for_each_tracing_cpu(i) {
1315                         iter->next_idx[i] = 0;
1316                         iter->next_page[i] = NULL;
1317                 }
1318
1319                 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
1320                         ;
1321
1322         } else {
1323                 l = *pos - 1;
1324                 p = s_next(m, p, &l);
1325         }
1326
1327         return p;
1328 }
1329
1330 static void s_stop(struct seq_file *m, void *p)
1331 {
1332         struct trace_iterator *iter = m->private;
1333
1334         atomic_dec(&trace_record_cmdline_disabled);
1335
1336         /* let the tracer release locks here if needed */
1337         if (current_trace && current_trace == iter->trace && iter->trace->stop)
1338                 iter->trace->stop(iter);
1339
1340         mutex_unlock(&trace_types_lock);
1341 }
1342
1343 #define KRETPROBE_MSG "[unknown/kretprobe'd]"
1344
1345 #ifdef CONFIG_KRETPROBES
1346 static inline int kretprobed(unsigned long addr)
1347 {
1348         return addr == (unsigned long)kretprobe_trampoline;
1349 }
1350 #else
1351 static inline int kretprobed(unsigned long addr)
1352 {
1353         return 0;
1354 }
1355 #endif /* CONFIG_KRETPROBES */
1356
1357 static int
1358 seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address)
1359 {
1360 #ifdef CONFIG_KALLSYMS
1361         char str[KSYM_SYMBOL_LEN];
1362
1363         kallsyms_lookup(address, NULL, NULL, NULL, str);
1364
1365         return trace_seq_printf(s, fmt, str);
1366 #endif
1367         return 1;
1368 }
1369
1370 static int
1371 seq_print_sym_offset(struct trace_seq *s, const char *fmt,
1372                      unsigned long address)
1373 {
1374 #ifdef CONFIG_KALLSYMS
1375         char str[KSYM_SYMBOL_LEN];
1376
1377         sprint_symbol(str, address);
1378         return trace_seq_printf(s, fmt, str);
1379 #endif
1380         return 1;
1381 }
1382
1383 #ifndef CONFIG_64BIT
1384 # define IP_FMT "%08lx"
1385 #else
1386 # define IP_FMT "%016lx"
1387 #endif
1388
1389 static int
1390 seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
1391 {
1392         int ret;
1393
1394         if (!ip)
1395                 return trace_seq_printf(s, "0");
1396
1397         if (sym_flags & TRACE_ITER_SYM_OFFSET)
1398                 ret = seq_print_sym_offset(s, "%s", ip);
1399         else
1400                 ret = seq_print_sym_short(s, "%s", ip);
1401
1402         if (!ret)
1403                 return 0;
1404
1405         if (sym_flags & TRACE_ITER_SYM_ADDR)
1406                 ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
1407         return ret;
1408 }
1409
1410 static void print_lat_help_header(struct seq_file *m)
1411 {
1412         seq_puts(m, "#                  _------=> CPU#            \n");
1413         seq_puts(m, "#                 / _-----=> irqs-off        \n");
1414         seq_puts(m, "#                | / _----=> need-resched    \n");
1415         seq_puts(m, "#                || / _---=> hardirq/softirq \n");
1416         seq_puts(m, "#                ||| / _--=> preempt-depth   \n");
1417         seq_puts(m, "#                |||| /                      \n");
1418         seq_puts(m, "#                |||||     delay             \n");
1419         seq_puts(m, "#  cmd     pid   ||||| time  |   caller      \n");
1420         seq_puts(m, "#     \\   /      |||||   \\   |   /           \n");
1421 }
1422
1423 static void print_func_help_header(struct seq_file *m)
1424 {
1425         seq_puts(m, "#           TASK-PID    CPU#    TIMESTAMP  FUNCTION\n");
1426         seq_puts(m, "#              | |       |          |         |\n");
1427 }
1428
1429
1430 static void
1431 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
1432 {
1433         unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
1434         struct trace_array *tr = iter->tr;
1435         struct trace_array_cpu *data = tr->data[tr->cpu];
1436         struct tracer *type = current_trace;
1437         unsigned long total   = 0;
1438         unsigned long entries = 0;
1439         int cpu;
1440         const char *name = "preemption";
1441
1442         if (type)
1443                 name = type->name;
1444
1445         for_each_tracing_cpu(cpu) {
1446                 if (head_page(tr->data[cpu])) {
1447                         total += tr->data[cpu]->trace_idx;
1448                         if (tr->data[cpu]->trace_idx > tr->entries)
1449                                 entries += tr->entries;
1450                         else
1451                                 entries += tr->data[cpu]->trace_idx;
1452                 }
1453         }
1454
1455         seq_printf(m, "%s latency trace v1.1.5 on %s\n",
1456                    name, UTS_RELEASE);
1457         seq_puts(m, "-----------------------------------"
1458                  "---------------------------------\n");
1459         seq_printf(m, " latency: %lu us, #%lu/%lu, CPU#%d |"
1460                    " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
1461                    nsecs_to_usecs(data->saved_latency),
1462                    entries,
1463                    total,
1464                    tr->cpu,
1465 #if defined(CONFIG_PREEMPT_NONE)
1466                    "server",
1467 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
1468                    "desktop",
1469 #elif defined(CONFIG_PREEMPT)
1470                    "preempt",
1471 #else
1472                    "unknown",
1473 #endif
1474                    /* These are reserved for later use */
1475                    0, 0, 0, 0);
1476 #ifdef CONFIG_SMP
1477         seq_printf(m, " #P:%d)\n", num_online_cpus());
1478 #else
1479         seq_puts(m, ")\n");
1480 #endif
1481         seq_puts(m, "    -----------------\n");
1482         seq_printf(m, "    | task: %.16s-%d "
1483                    "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
1484                    data->comm, data->pid, data->uid, data->nice,
1485                    data->policy, data->rt_priority);
1486         seq_puts(m, "    -----------------\n");
1487
1488         if (data->critical_start) {
1489                 seq_puts(m, " => started at: ");
1490                 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
1491                 trace_print_seq(m, &iter->seq);
1492                 seq_puts(m, "\n => ended at:   ");
1493                 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
1494                 trace_print_seq(m, &iter->seq);
1495                 seq_puts(m, "\n");
1496         }
1497
1498         seq_puts(m, "\n");
1499 }
1500
1501 static void
1502 lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
1503 {
1504         struct trace_field *field = &entry->field;
1505         int hardirq, softirq;
1506         char *comm;
1507
1508         comm = trace_find_cmdline(field->pid);
1509
1510         trace_seq_printf(s, "%8.8s-%-5d ", comm, field->pid);
1511         trace_seq_printf(s, "%3d", cpu);
1512         trace_seq_printf(s, "%c%c",
1513                         (field->flags & TRACE_FLAG_IRQS_OFF) ? 'd' : '.',
1514                         ((field->flags & TRACE_FLAG_NEED_RESCHED) ? 'N' : '.'));
1515
1516         hardirq = field->flags & TRACE_FLAG_HARDIRQ;
1517         softirq = field->flags & TRACE_FLAG_SOFTIRQ;
1518         if (hardirq && softirq) {
1519                 trace_seq_putc(s, 'H');
1520         } else {
1521                 if (hardirq) {
1522                         trace_seq_putc(s, 'h');
1523                 } else {
1524                         if (softirq)
1525                                 trace_seq_putc(s, 's');
1526                         else
1527                                 trace_seq_putc(s, '.');
1528                 }
1529         }
1530
1531         if (field->preempt_count)
1532                 trace_seq_printf(s, "%x", field->preempt_count);
1533         else
1534                 trace_seq_puts(s, ".");
1535 }
1536
1537 unsigned long preempt_mark_thresh = 100;
1538
1539 static void
1540 lat_print_timestamp(struct trace_seq *s, unsigned long long abs_usecs,
1541                     unsigned long rel_usecs)
1542 {
1543         trace_seq_printf(s, " %4lldus", abs_usecs);
1544         if (rel_usecs > preempt_mark_thresh)
1545                 trace_seq_puts(s, "!: ");
1546         else if (rel_usecs > 1)
1547                 trace_seq_puts(s, "+: ");
1548         else
1549                 trace_seq_puts(s, " : ");
1550 }
1551
1552 static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
1553
1554 static void
1555 trace_seq_print_cont(struct trace_seq *s, struct trace_iterator *iter)
1556 {
1557         struct trace_array *tr = iter->tr;
1558         struct trace_array_cpu *data = tr->data[iter->cpu];
1559         struct trace_entry *ent;
1560
1561         ent = trace_entry_idx(tr, data, iter, iter->cpu);
1562         if (!ent || ent->type != TRACE_CONT) {
1563                 trace_seq_putc(s, '\n');
1564                 return;
1565         }
1566
1567         do {
1568                 trace_seq_printf(s, "%s", ent->cont.buf);
1569                 trace_iterator_increment(iter, iter->cpu);
1570                 ent = trace_entry_idx(tr, data, iter, iter->cpu);
1571         } while (ent && ent->type == TRACE_CONT);
1572 }
1573
1574 static int
1575 print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
1576 {
1577         struct trace_seq *s = &iter->seq;
1578         unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
1579         struct trace_entry *next_entry = find_next_entry(iter, NULL);
1580         unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE);
1581         struct trace_entry *entry = iter->ent;
1582         struct trace_field *field = &entry->field;
1583         unsigned long abs_usecs;
1584         unsigned long rel_usecs;
1585         char *comm;
1586         int S, T;
1587         int i;
1588         unsigned state;
1589
1590         if (!next_entry)
1591                 next_entry = entry;
1592
1593         if (entry->type == TRACE_CONT)
1594                 return 1;
1595
1596         rel_usecs = ns2usecs(next_entry->field.t - entry->field.t);
1597         abs_usecs = ns2usecs(entry->field.t - iter->tr->time_start);
1598
1599         if (verbose) {
1600                 comm = trace_find_cmdline(field->pid);
1601                 trace_seq_printf(s, "%16s %5d %3d %d %08x %08x [%08lx]"
1602                                  " %ld.%03ldms (+%ld.%03ldms): ",
1603                                  comm,
1604                                  field->pid, cpu, field->flags,
1605                                  field->preempt_count, trace_idx,
1606                                  ns2usecs(field->t),
1607                                  abs_usecs/1000,
1608                                  abs_usecs % 1000, rel_usecs/1000,
1609                                  rel_usecs % 1000);
1610         } else {
1611                 lat_print_generic(s, entry, cpu);
1612                 lat_print_timestamp(s, abs_usecs, rel_usecs);
1613         }
1614         switch (entry->type) {
1615         case TRACE_FN:
1616                 seq_print_ip_sym(s, field->fn.ip, sym_flags);
1617                 trace_seq_puts(s, " (");
1618                 if (kretprobed(field->fn.parent_ip))
1619                         trace_seq_puts(s, KRETPROBE_MSG);
1620                 else
1621                         seq_print_ip_sym(s, field->fn.parent_ip, sym_flags);
1622                 trace_seq_puts(s, ")\n");
1623                 break;
1624         case TRACE_CTX:
1625         case TRACE_WAKE:
1626                 T = field->ctx.next_state < sizeof(state_to_char) ?
1627                         state_to_char[field->ctx.next_state] : 'X';
1628
1629                 state = field->ctx.prev_state ?
1630                         __ffs(field->ctx.prev_state) + 1 : 0;
1631                 S = state < sizeof(state_to_char) - 1 ? state_to_char[state] : 'X';
1632                 comm = trace_find_cmdline(field->ctx.next_pid);
1633                 trace_seq_printf(s, " %5d:%3d:%c %s %5d:%3d:%c %s\n",
1634                                  field->ctx.prev_pid,
1635                                  field->ctx.prev_prio,
1636                                  S, entry->type == TRACE_CTX ? "==>" : "  +",
1637                                  field->ctx.next_pid,
1638                                  field->ctx.next_prio,
1639                                  T, comm);
1640                 break;
1641         case TRACE_SPECIAL:
1642                 trace_seq_printf(s, "# %ld %ld %ld\n",
1643                                  field->special.arg1,
1644                                  field->special.arg2,
1645                                  field->special.arg3);
1646                 break;
1647         case TRACE_STACK:
1648                 for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
1649                         if (i)
1650                                 trace_seq_puts(s, " <= ");
1651                         seq_print_ip_sym(s, field->stack.caller[i], sym_flags);
1652                 }
1653                 trace_seq_puts(s, "\n");
1654                 break;
1655         case TRACE_PRINT:
1656                 seq_print_ip_sym(s, field->print.ip, sym_flags);
1657                 trace_seq_printf(s, ": %s", field->print.buf);
1658                 if (field->flags & TRACE_FLAG_CONT)
1659                         trace_seq_print_cont(s, iter);
1660                 break;
1661         default:
1662                 trace_seq_printf(s, "Unknown type %d\n", entry->type);
1663         }
1664         return 1;
1665 }
1666
1667 static int print_trace_fmt(struct trace_iterator *iter)
1668 {
1669         struct trace_seq *s = &iter->seq;
1670         unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
1671         struct trace_entry *entry;
1672         struct trace_field *field;
1673         unsigned long usec_rem;
1674         unsigned long long t;
1675         unsigned long secs;
1676         char *comm;
1677         int ret;
1678         int S, T;
1679         int i;
1680
1681         entry = iter->ent;
1682
1683         if (entry->type == TRACE_CONT)
1684                 return 1;
1685
1686         field = &entry->field;
1687
1688         comm = trace_find_cmdline(iter->ent->field.pid);
1689
1690         t = ns2usecs(field->t);
1691         usec_rem = do_div(t, 1000000ULL);
1692         secs = (unsigned long)t;
1693
1694         ret = trace_seq_printf(s, "%16s-%-5d ", comm, field->pid);
1695         if (!ret)
1696                 return 0;
1697         ret = trace_seq_printf(s, "[%03d] ", iter->cpu);
1698         if (!ret)
1699                 return 0;
1700         ret = trace_seq_printf(s, "%5lu.%06lu: ", secs, usec_rem);
1701         if (!ret)
1702                 return 0;
1703
1704         switch (entry->type) {
1705         case TRACE_FN:
1706                 ret = seq_print_ip_sym(s, field->fn.ip, sym_flags);
1707                 if (!ret)
1708                         return 0;
1709                 if ((sym_flags & TRACE_ITER_PRINT_PARENT) &&
1710                                                 field->fn.parent_ip) {
1711                         ret = trace_seq_printf(s, " <-");
1712                         if (!ret)
1713                                 return 0;
1714                         if (kretprobed(field->fn.parent_ip))
1715                                 ret = trace_seq_puts(s, KRETPROBE_MSG);
1716                         else
1717                                 ret = seq_print_ip_sym(s,
1718                                                        field->fn.parent_ip,
1719                                                        sym_flags);
1720                         if (!ret)
1721                                 return 0;
1722                 }
1723                 ret = trace_seq_printf(s, "\n");
1724                 if (!ret)
1725                         return 0;
1726                 break;
1727         case TRACE_CTX:
1728         case TRACE_WAKE:
1729                 S = field->ctx.prev_state < sizeof(state_to_char) ?
1730                         state_to_char[field->ctx.prev_state] : 'X';
1731                 T = field->ctx.next_state < sizeof(state_to_char) ?
1732                         state_to_char[field->ctx.next_state] : 'X';
1733                 ret = trace_seq_printf(s, " %5d:%3d:%c %s %5d:%3d:%c\n",
1734                                        field->ctx.prev_pid,
1735                                        field->ctx.prev_prio,
1736                                        S,
1737                                        entry->type == TRACE_CTX ? "==>" : "  +",
1738                                        field->ctx.next_pid,
1739                                        field->ctx.next_prio,
1740                                        T);
1741                 if (!ret)
1742                         return 0;
1743                 break;
1744         case TRACE_SPECIAL:
1745                 ret = trace_seq_printf(s, "# %ld %ld %ld\n",
1746                                  field->special.arg1,
1747                                  field->special.arg2,
1748                                  field->special.arg3);
1749                 if (!ret)
1750                         return 0;
1751                 break;
1752         case TRACE_STACK:
1753                 for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
1754                         if (i) {
1755                                 ret = trace_seq_puts(s, " <= ");
1756                                 if (!ret)
1757                                         return 0;
1758                         }
1759                         ret = seq_print_ip_sym(s, field->stack.caller[i],
1760                                                sym_flags);
1761                         if (!ret)
1762                                 return 0;
1763                 }
1764                 ret = trace_seq_puts(s, "\n");
1765                 if (!ret)
1766                         return 0;
1767                 break;
1768         case TRACE_PRINT:
1769                 seq_print_ip_sym(s, field->print.ip, sym_flags);
1770                 trace_seq_printf(s, ": %s", field->print.buf);
1771                 if (field->flags & TRACE_FLAG_CONT)
1772                         trace_seq_print_cont(s, iter);
1773                 break;
1774         }
1775         return 1;
1776 }
1777
1778 static int print_raw_fmt(struct trace_iterator *iter)
1779 {
1780         struct trace_seq *s = &iter->seq;
1781         struct trace_entry *entry;
1782         struct trace_field *field;
1783         int ret;
1784         int S, T;
1785
1786         entry = iter->ent;
1787
1788         if (entry->type == TRACE_CONT)
1789                 return 1;
1790
1791         field = &entry->field;
1792
1793         ret = trace_seq_printf(s, "%d %d %llu ",
1794                 field->pid, iter->cpu, field->t);
1795         if (!ret)
1796                 return 0;
1797
1798         switch (entry->type) {
1799         case TRACE_FN:
1800                 ret = trace_seq_printf(s, "%x %x\n",
1801                                         field->fn.ip,
1802                                         field->fn.parent_ip);
1803                 if (!ret)
1804                         return 0;
1805                 break;
1806         case TRACE_CTX:
1807         case TRACE_WAKE:
1808                 S = field->ctx.prev_state < sizeof(state_to_char) ?
1809                         state_to_char[field->ctx.prev_state] : 'X';
1810                 T = field->ctx.next_state < sizeof(state_to_char) ?
1811                         state_to_char[field->ctx.next_state] : 'X';
1812                 if (entry->type == TRACE_WAKE)
1813                         S = '+';
1814                 ret = trace_seq_printf(s, "%d %d %c %d %d %c\n",
1815                                        field->ctx.prev_pid,
1816                                        field->ctx.prev_prio,
1817                                        S,
1818                                        field->ctx.next_pid,
1819                                        field->ctx.next_prio,
1820                                        T);
1821                 if (!ret)
1822                         return 0;
1823                 break;
1824         case TRACE_SPECIAL:
1825         case TRACE_STACK:
1826                 ret = trace_seq_printf(s, "# %ld %ld %ld\n",
1827                                  field->special.arg1,
1828                                  field->special.arg2,
1829                                  field->special.arg3);
1830                 if (!ret)
1831                         return 0;
1832                 break;
1833         case TRACE_PRINT:
1834                 trace_seq_printf(s, "# %lx %s",
1835                                  field->print.ip, field->print.buf);
1836                 if (field->flags & TRACE_FLAG_CONT)
1837                         trace_seq_print_cont(s, iter);
1838                 break;
1839         }
1840         return 1;
1841 }
1842
1843 #define SEQ_PUT_FIELD_RET(s, x)                         \
1844 do {                                                    \
1845         if (!trace_seq_putmem(s, &(x), sizeof(x)))      \
1846                 return 0;                               \
1847 } while (0)
1848
1849 #define SEQ_PUT_HEX_FIELD_RET(s, x)                     \
1850 do {                                                    \
1851         if (!trace_seq_putmem_hex(s, &(x), sizeof(x)))  \
1852                 return 0;                               \
1853 } while (0)
1854
1855 static int print_hex_fmt(struct trace_iterator *iter)
1856 {
1857         struct trace_seq *s = &iter->seq;
1858         unsigned char newline = '\n';
1859         struct trace_entry *entry;
1860         struct trace_field *field;
1861         int S, T;
1862
1863         entry = iter->ent;
1864
1865         if (entry->type == TRACE_CONT)
1866                 return 1;
1867
1868         field = &entry->field;
1869
1870         SEQ_PUT_HEX_FIELD_RET(s, field->pid);
1871         SEQ_PUT_HEX_FIELD_RET(s, iter->cpu);
1872         SEQ_PUT_HEX_FIELD_RET(s, field->t);
1873
1874         switch (entry->type) {
1875         case TRACE_FN:
1876                 SEQ_PUT_HEX_FIELD_RET(s, field->fn.ip);
1877                 SEQ_PUT_HEX_FIELD_RET(s, field->fn.parent_ip);
1878                 break;
1879         case TRACE_CTX:
1880         case TRACE_WAKE:
1881                 S = field->ctx.prev_state < sizeof(state_to_char) ?
1882                         state_to_char[field->ctx.prev_state] : 'X';
1883                 T = field->ctx.next_state < sizeof(state_to_char) ?
1884                         state_to_char[field->ctx.next_state] : 'X';
1885                 if (entry->type == TRACE_WAKE)
1886                         S = '+';
1887                 SEQ_PUT_HEX_FIELD_RET(s, field->ctx.prev_pid);
1888                 SEQ_PUT_HEX_FIELD_RET(s, field->ctx.prev_prio);
1889                 SEQ_PUT_HEX_FIELD_RET(s, S);
1890                 SEQ_PUT_HEX_FIELD_RET(s, field->ctx.next_pid);
1891                 SEQ_PUT_HEX_FIELD_RET(s, field->ctx.next_prio);
1892                 SEQ_PUT_HEX_FIELD_RET(s, T);
1893                 break;
1894         case TRACE_SPECIAL:
1895         case TRACE_STACK:
1896                 SEQ_PUT_HEX_FIELD_RET(s, field->special.arg1);
1897                 SEQ_PUT_HEX_FIELD_RET(s, field->special.arg2);
1898                 SEQ_PUT_HEX_FIELD_RET(s, field->special.arg3);
1899                 break;
1900         }
1901         SEQ_PUT_FIELD_RET(s, newline);
1902
1903         return 1;
1904 }
1905
1906 static int print_bin_fmt(struct trace_iterator *iter)
1907 {
1908         struct trace_seq *s = &iter->seq;
1909         struct trace_entry *entry;
1910         struct trace_field *field;
1911
1912         entry = iter->ent;
1913
1914         if (entry->type == TRACE_CONT)
1915                 return 1;
1916
1917         field = &entry->field;
1918
1919         SEQ_PUT_FIELD_RET(s, field->pid);
1920         SEQ_PUT_FIELD_RET(s, field->cpu);
1921         SEQ_PUT_FIELD_RET(s, field->t);
1922
1923         switch (entry->type) {
1924         case TRACE_FN:
1925                 SEQ_PUT_FIELD_RET(s, field->fn.ip);
1926                 SEQ_PUT_FIELD_RET(s, field->fn.parent_ip);
1927                 break;
1928         case TRACE_CTX:
1929                 SEQ_PUT_FIELD_RET(s, field->ctx.prev_pid);
1930                 SEQ_PUT_FIELD_RET(s, field->ctx.prev_prio);
1931                 SEQ_PUT_FIELD_RET(s, field->ctx.prev_state);
1932                 SEQ_PUT_FIELD_RET(s, field->ctx.next_pid);
1933                 SEQ_PUT_FIELD_RET(s, field->ctx.next_prio);
1934                 SEQ_PUT_FIELD_RET(s, field->ctx.next_state);
1935                 break;
1936         case TRACE_SPECIAL:
1937         case TRACE_STACK:
1938                 SEQ_PUT_FIELD_RET(s, field->special.arg1);
1939                 SEQ_PUT_FIELD_RET(s, field->special.arg2);
1940                 SEQ_PUT_FIELD_RET(s, field->special.arg3);
1941                 break;
1942         }
1943         return 1;
1944 }
1945
1946 static int trace_empty(struct trace_iterator *iter)
1947 {
1948         struct trace_array_cpu *data;
1949         int cpu;
1950
1951         for_each_tracing_cpu(cpu) {
1952                 data = iter->tr->data[cpu];
1953
1954                 if (head_page(data) && data->trace_idx &&
1955                     (data->trace_tail != data->trace_head ||
1956                      data->trace_tail_idx != data->trace_head_idx))
1957                         return 0;
1958         }
1959         return 1;
1960 }
1961
1962 static int print_trace_line(struct trace_iterator *iter)
1963 {
1964         if (iter->trace && iter->trace->print_line)
1965                 return iter->trace->print_line(iter);
1966
1967         if (trace_flags & TRACE_ITER_BIN)
1968                 return print_bin_fmt(iter);
1969
1970         if (trace_flags & TRACE_ITER_HEX)
1971                 return print_hex_fmt(iter);
1972
1973         if (trace_flags & TRACE_ITER_RAW)
1974                 return print_raw_fmt(iter);
1975
1976         if (iter->iter_flags & TRACE_FILE_LAT_FMT)
1977                 return print_lat_fmt(iter, iter->idx, iter->cpu);
1978
1979         return print_trace_fmt(iter);
1980 }
1981
1982 static int s_show(struct seq_file *m, void *v)
1983 {
1984         struct trace_iterator *iter = v;
1985
1986         if (iter->ent == NULL) {
1987                 if (iter->tr) {
1988                         seq_printf(m, "# tracer: %s\n", iter->trace->name);
1989                         seq_puts(m, "#\n");
1990                 }
1991                 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
1992                         /* print nothing if the buffers are empty */
1993                         if (trace_empty(iter))
1994                                 return 0;
1995                         print_trace_header(m, iter);
1996                         if (!(trace_flags & TRACE_ITER_VERBOSE))
1997                                 print_lat_help_header(m);
1998                 } else {
1999                         if (!(trace_flags & TRACE_ITER_VERBOSE))
2000                                 print_func_help_header(m);
2001                 }
2002         } else {
2003                 print_trace_line(iter);
2004                 trace_print_seq(m, &iter->seq);
2005         }
2006
2007         return 0;
2008 }
2009
2010 static struct seq_operations tracer_seq_ops = {
2011         .start          = s_start,
2012         .next           = s_next,
2013         .stop           = s_stop,
2014         .show           = s_show,
2015 };
2016
2017 static struct trace_iterator *
2018 __tracing_open(struct inode *inode, struct file *file, int *ret)
2019 {
2020         struct trace_iterator *iter;
2021
2022         if (tracing_disabled) {
2023                 *ret = -ENODEV;
2024                 return NULL;
2025         }
2026
2027         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2028         if (!iter) {
2029                 *ret = -ENOMEM;
2030                 goto out;
2031         }
2032
2033         mutex_lock(&trace_types_lock);
2034         if (current_trace && current_trace->print_max)
2035                 iter->tr = &max_tr;
2036         else
2037                 iter->tr = inode->i_private;
2038         iter->trace = current_trace;
2039         iter->pos = -1;
2040
2041         /* TODO stop tracer */
2042         *ret = seq_open(file, &tracer_seq_ops);
2043         if (!*ret) {
2044                 struct seq_file *m = file->private_data;
2045                 m->private = iter;
2046
2047                 /* stop the trace while dumping */
2048                 if (iter->tr->ctrl) {
2049                         tracer_enabled = 0;
2050                         ftrace_function_enabled = 0;
2051                 }
2052
2053                 if (iter->trace && iter->trace->open)
2054                         iter->trace->open(iter);
2055         } else {
2056                 kfree(iter);
2057                 iter = NULL;
2058         }
2059         mutex_unlock(&trace_types_lock);
2060
2061  out:
2062         return iter;
2063 }
2064
2065 int tracing_open_generic(struct inode *inode, struct file *filp)
2066 {
2067         if (tracing_disabled)
2068                 return -ENODEV;
2069
2070         filp->private_data = inode->i_private;
2071         return 0;
2072 }
2073
2074 int tracing_release(struct inode *inode, struct file *file)
2075 {
2076         struct seq_file *m = (struct seq_file *)file->private_data;
2077         struct trace_iterator *iter = m->private;
2078
2079         mutex_lock(&trace_types_lock);
2080         if (iter->trace && iter->trace->close)
2081                 iter->trace->close(iter);
2082
2083         /* reenable tracing if it was previously enabled */
2084         if (iter->tr->ctrl) {
2085                 tracer_enabled = 1;
2086                 /*
2087                  * It is safe to enable function tracing even if it
2088                  * isn't used
2089                  */
2090                 ftrace_function_enabled = 1;
2091         }
2092         mutex_unlock(&trace_types_lock);
2093
2094         seq_release(inode, file);
2095         kfree(iter);
2096         return 0;
2097 }
2098
2099 static int tracing_open(struct inode *inode, struct file *file)
2100 {
2101         int ret;
2102
2103         __tracing_open(inode, file, &ret);
2104
2105         return ret;
2106 }
2107
2108 static int tracing_lt_open(struct inode *inode, struct file *file)
2109 {
2110         struct trace_iterator *iter;
2111         int ret;
2112
2113         iter = __tracing_open(inode, file, &ret);
2114
2115         if (!ret)
2116                 iter->iter_flags |= TRACE_FILE_LAT_FMT;
2117
2118         return ret;
2119 }
2120
2121
2122 static void *
2123 t_next(struct seq_file *m, void *v, loff_t *pos)
2124 {
2125         struct tracer *t = m->private;
2126
2127         (*pos)++;
2128
2129         if (t)
2130                 t = t->next;
2131
2132         m->private = t;
2133
2134         return t;
2135 }
2136
2137 static void *t_start(struct seq_file *m, loff_t *pos)
2138 {
2139         struct tracer *t = m->private;
2140         loff_t l = 0;
2141
2142         mutex_lock(&trace_types_lock);
2143         for (; t && l < *pos; t = t_next(m, t, &l))
2144                 ;
2145
2146         return t;
2147 }
2148
2149 static void t_stop(struct seq_file *m, void *p)
2150 {
2151         mutex_unlock(&trace_types_lock);
2152 }
2153
2154 static int t_show(struct seq_file *m, void *v)
2155 {
2156         struct tracer *t = v;
2157
2158         if (!t)
2159                 return 0;
2160
2161         seq_printf(m, "%s", t->name);
2162         if (t->next)
2163                 seq_putc(m, ' ');
2164         else
2165                 seq_putc(m, '\n');
2166
2167         return 0;
2168 }
2169
2170 static struct seq_operations show_traces_seq_ops = {
2171         .start          = t_start,
2172         .next           = t_next,
2173         .stop           = t_stop,
2174         .show           = t_show,
2175 };
2176
2177 static int show_traces_open(struct inode *inode, struct file *file)
2178 {
2179         int ret;
2180
2181         if (tracing_disabled)
2182                 return -ENODEV;
2183
2184         ret = seq_open(file, &show_traces_seq_ops);
2185         if (!ret) {
2186                 struct seq_file *m = file->private_data;
2187                 m->private = trace_types;
2188         }
2189
2190         return ret;
2191 }
2192
2193 static struct file_operations tracing_fops = {
2194         .open           = tracing_open,
2195         .read           = seq_read,
2196         .llseek         = seq_lseek,
2197         .release        = tracing_release,
2198 };
2199
2200 static struct file_operations tracing_lt_fops = {
2201         .open           = tracing_lt_open,
2202         .read           = seq_read,
2203         .llseek         = seq_lseek,
2204         .release        = tracing_release,
2205 };
2206
2207 static struct file_operations show_traces_fops = {
2208         .open           = show_traces_open,
2209         .read           = seq_read,
2210         .release        = seq_release,
2211 };
2212
2213 /*
2214  * Only trace on a CPU if the bitmask is set:
2215  */
2216 static cpumask_t tracing_cpumask = CPU_MASK_ALL;
2217
2218 /*
2219  * When tracing/tracing_cpu_mask is modified then this holds
2220  * the new bitmask we are about to install:
2221  */
2222 static cpumask_t tracing_cpumask_new;
2223
2224 /*
2225  * The tracer itself will not take this lock, but still we want
2226  * to provide a consistent cpumask to user-space:
2227  */
2228 static DEFINE_MUTEX(tracing_cpumask_update_lock);
2229
2230 /*
2231  * Temporary storage for the character representation of the
2232  * CPU bitmask (and one more byte for the newline):
2233  */
2234 static char mask_str[NR_CPUS + 1];
2235
2236 static ssize_t
2237 tracing_cpumask_read(struct file *filp, char __user *ubuf,
2238                      size_t count, loff_t *ppos)
2239 {
2240         int len;
2241
2242         mutex_lock(&tracing_cpumask_update_lock);
2243
2244         len = cpumask_scnprintf(mask_str, count, tracing_cpumask);
2245         if (count - len < 2) {
2246                 count = -EINVAL;
2247                 goto out_err;
2248         }
2249         len += sprintf(mask_str + len, "\n");
2250         count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
2251
2252 out_err:
2253         mutex_unlock(&tracing_cpumask_update_lock);
2254
2255         return count;
2256 }
2257
2258 static ssize_t
2259 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
2260                       size_t count, loff_t *ppos)
2261 {
2262         int err, cpu;
2263
2264         mutex_lock(&tracing_cpumask_update_lock);
2265         err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
2266         if (err)
2267                 goto err_unlock;
2268
2269         raw_local_irq_disable();
2270         __raw_spin_lock(&ftrace_max_lock);
2271         for_each_tracing_cpu(cpu) {
2272                 /*
2273                  * Increase/decrease the disabled counter if we are
2274                  * about to flip a bit in the cpumask:
2275                  */
2276                 if (cpu_isset(cpu, tracing_cpumask) &&
2277                                 !cpu_isset(cpu, tracing_cpumask_new)) {
2278                         atomic_inc(&global_trace.data[cpu]->disabled);
2279                 }
2280                 if (!cpu_isset(cpu, tracing_cpumask) &&
2281                                 cpu_isset(cpu, tracing_cpumask_new)) {
2282                         atomic_dec(&global_trace.data[cpu]->disabled);
2283                 }
2284         }
2285         __raw_spin_unlock(&ftrace_max_lock);
2286         raw_local_irq_enable();
2287
2288         tracing_cpumask = tracing_cpumask_new;
2289
2290         mutex_unlock(&tracing_cpumask_update_lock);
2291
2292         return count;
2293
2294 err_unlock:
2295         mutex_unlock(&tracing_cpumask_update_lock);
2296
2297         return err;
2298 }
2299
2300 static struct file_operations tracing_cpumask_fops = {
2301         .open           = tracing_open_generic,
2302         .read           = tracing_cpumask_read,
2303         .write          = tracing_cpumask_write,
2304 };
2305
2306 static ssize_t
2307 tracing_iter_ctrl_read(struct file *filp, char __user *ubuf,
2308                        size_t cnt, loff_t *ppos)
2309 {
2310         char *buf;
2311         int r = 0;
2312         int len = 0;
2313         int i;
2314
2315         /* calulate max size */
2316         for (i = 0; trace_options[i]; i++) {
2317                 len += strlen(trace_options[i]);
2318                 len += 3; /* "no" and space */
2319         }
2320
2321         /* +2 for \n and \0 */
2322         buf = kmalloc(len + 2, GFP_KERNEL);
2323         if (!buf)
2324                 return -ENOMEM;
2325
2326         for (i = 0; trace_options[i]; i++) {
2327                 if (trace_flags & (1 << i))
2328                         r += sprintf(buf + r, "%s ", trace_options[i]);
2329                 else
2330                         r += sprintf(buf + r, "no%s ", trace_options[i]);
2331         }
2332
2333         r += sprintf(buf + r, "\n");
2334         WARN_ON(r >= len + 2);
2335
2336         r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2337
2338         kfree(buf);
2339
2340         return r;
2341 }
2342
2343 static ssize_t
2344 tracing_iter_ctrl_write(struct file *filp, const char __user *ubuf,
2345                         size_t cnt, loff_t *ppos)
2346 {
2347         char buf[64];
2348         char *cmp = buf;
2349         int neg = 0;
2350         int i;
2351
2352         if (cnt >= sizeof(buf))
2353                 return -EINVAL;
2354
2355         if (copy_from_user(&buf, ubuf, cnt))
2356                 return -EFAULT;
2357
2358         buf[cnt] = 0;
2359
2360         if (strncmp(buf, "no", 2) == 0) {
2361                 neg = 1;
2362                 cmp += 2;
2363         }
2364
2365         for (i = 0; trace_options[i]; i++) {
2366                 int len = strlen(trace_options[i]);
2367
2368                 if (strncmp(cmp, trace_options[i], len) == 0) {
2369                         if (neg)
2370                                 trace_flags &= ~(1 << i);
2371                         else
2372                                 trace_flags |= (1 << i);
2373                         break;
2374                 }
2375         }
2376         /*
2377          * If no option could be set, return an error:
2378          */
2379         if (!trace_options[i])
2380                 return -EINVAL;
2381
2382         filp->f_pos += cnt;
2383
2384         return cnt;
2385 }
2386
2387 static struct file_operations tracing_iter_fops = {
2388         .open           = tracing_open_generic,
2389         .read           = tracing_iter_ctrl_read,
2390         .write          = tracing_iter_ctrl_write,
2391 };
2392
2393 static const char readme_msg[] =
2394         "tracing mini-HOWTO:\n\n"
2395         "# mkdir /debug\n"
2396         "# mount -t debugfs nodev /debug\n\n"
2397         "# cat /debug/tracing/available_tracers\n"
2398         "wakeup preemptirqsoff preemptoff irqsoff ftrace sched_switch none\n\n"
2399         "# cat /debug/tracing/current_tracer\n"
2400         "none\n"
2401         "# echo sched_switch > /debug/tracing/current_tracer\n"
2402         "# cat /debug/tracing/current_tracer\n"
2403         "sched_switch\n"
2404         "# cat /debug/tracing/iter_ctrl\n"
2405         "noprint-parent nosym-offset nosym-addr noverbose\n"
2406         "# echo print-parent > /debug/tracing/iter_ctrl\n"
2407         "# echo 1 > /debug/tracing/tracing_enabled\n"
2408         "# cat /debug/tracing/trace > /tmp/trace.txt\n"
2409         "echo 0 > /debug/tracing/tracing_enabled\n"
2410 ;
2411
2412 static ssize_t
2413 tracing_readme_read(struct file *filp, char __user *ubuf,
2414                        size_t cnt, loff_t *ppos)
2415 {
2416         return simple_read_from_buffer(ubuf, cnt, ppos,
2417                                         readme_msg, strlen(readme_msg));
2418 }
2419
2420 static struct file_operations tracing_readme_fops = {
2421         .open           = tracing_open_generic,
2422         .read           = tracing_readme_read,
2423 };
2424
2425 static ssize_t
2426 tracing_ctrl_read(struct file *filp, char __user *ubuf,
2427                   size_t cnt, loff_t *ppos)
2428 {
2429         struct trace_array *tr = filp->private_data;
2430         char buf[64];
2431         int r;
2432
2433         r = sprintf(buf, "%ld\n", tr->ctrl);
2434         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2435 }
2436
2437 static ssize_t
2438 tracing_ctrl_write(struct file *filp, const char __user *ubuf,
2439                    size_t cnt, loff_t *ppos)
2440 {
2441         struct trace_array *tr = filp->private_data;
2442         char buf[64];
2443         long val;
2444         int ret;
2445
2446         if (cnt >= sizeof(buf))
2447                 return -EINVAL;
2448
2449         if (copy_from_user(&buf, ubuf, cnt))
2450                 return -EFAULT;
2451
2452         buf[cnt] = 0;
2453
2454         ret = strict_strtoul(buf, 10, &val);
2455         if (ret < 0)
2456                 return ret;
2457
2458         val = !!val;
2459
2460         mutex_lock(&trace_types_lock);
2461         if (tr->ctrl ^ val) {
2462                 if (val)
2463                         tracer_enabled = 1;
2464                 else
2465                         tracer_enabled = 0;
2466
2467                 tr->ctrl = val;
2468
2469                 if (current_trace && current_trace->ctrl_update)
2470                         current_trace->ctrl_update(tr);
2471         }
2472         mutex_unlock(&trace_types_lock);
2473
2474         filp->f_pos += cnt;
2475
2476         return cnt;
2477 }
2478
2479 static ssize_t
2480 tracing_set_trace_read(struct file *filp, char __user *ubuf,
2481                        size_t cnt, loff_t *ppos)
2482 {
2483         char buf[max_tracer_type_len+2];
2484         int r;
2485
2486         mutex_lock(&trace_types_lock);
2487         if (current_trace)
2488                 r = sprintf(buf, "%s\n", current_trace->name);
2489         else
2490                 r = sprintf(buf, "\n");
2491         mutex_unlock(&trace_types_lock);
2492
2493         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2494 }
2495
2496 static ssize_t
2497 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
2498                         size_t cnt, loff_t *ppos)
2499 {
2500         struct trace_array *tr = &global_trace;
2501         struct tracer *t;
2502         char buf[max_tracer_type_len+1];
2503         int i;
2504
2505         if (cnt > max_tracer_type_len)
2506                 cnt = max_tracer_type_len;
2507
2508         if (copy_from_user(&buf, ubuf, cnt))
2509                 return -EFAULT;
2510
2511         buf[cnt] = 0;
2512
2513         /* strip ending whitespace. */
2514         for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
2515                 buf[i] = 0;
2516
2517         mutex_lock(&trace_types_lock);
2518         for (t = trace_types; t; t = t->next) {
2519                 if (strcmp(t->name, buf) == 0)
2520                         break;
2521         }
2522         if (!t || t == current_trace)
2523                 goto out;
2524
2525         if (current_trace && current_trace->reset)
2526                 current_trace->reset(tr);
2527
2528         current_trace = t;
2529         if (t->init)
2530                 t->init(tr);
2531
2532  out:
2533         mutex_unlock(&trace_types_lock);
2534
2535         filp->f_pos += cnt;
2536
2537         return cnt;
2538 }
2539
2540 static ssize_t
2541 tracing_max_lat_read(struct file *filp, char __user *ubuf,
2542                      size_t cnt, loff_t *ppos)
2543 {
2544         unsigned long *ptr = filp->private_data;
2545         char buf[64];
2546         int r;
2547
2548         r = snprintf(buf, sizeof(buf), "%ld\n",
2549                      *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
2550         if (r > sizeof(buf))
2551                 r = sizeof(buf);
2552         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2553 }
2554
2555 static ssize_t
2556 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
2557                       size_t cnt, loff_t *ppos)
2558 {
2559         long *ptr = filp->private_data;
2560         char buf[64];
2561         long val;
2562         int ret;
2563
2564         if (cnt >= sizeof(buf))
2565                 return -EINVAL;
2566
2567         if (copy_from_user(&buf, ubuf, cnt))
2568                 return -EFAULT;
2569
2570         buf[cnt] = 0;
2571
2572         ret = strict_strtoul(buf, 10, &val);
2573         if (ret < 0)
2574                 return ret;
2575
2576         *ptr = val * 1000;
2577
2578         return cnt;
2579 }
2580
2581 static atomic_t tracing_reader;
2582
2583 static int tracing_open_pipe(struct inode *inode, struct file *filp)
2584 {
2585         struct trace_iterator *iter;
2586
2587         if (tracing_disabled)
2588                 return -ENODEV;
2589
2590         /* We only allow for reader of the pipe */
2591         if (atomic_inc_return(&tracing_reader) != 1) {
2592                 atomic_dec(&tracing_reader);
2593                 return -EBUSY;
2594         }
2595
2596         /* create a buffer to store the information to pass to userspace */
2597         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2598         if (!iter)
2599                 return -ENOMEM;
2600
2601         mutex_lock(&trace_types_lock);
2602         iter->tr = &global_trace;
2603         iter->trace = current_trace;
2604         filp->private_data = iter;
2605
2606         if (iter->trace->pipe_open)
2607                 iter->trace->pipe_open(iter);
2608         mutex_unlock(&trace_types_lock);
2609
2610         return 0;
2611 }
2612
2613 static int tracing_release_pipe(struct inode *inode, struct file *file)
2614 {
2615         struct trace_iterator *iter = file->private_data;
2616
2617         kfree(iter);
2618         atomic_dec(&tracing_reader);
2619
2620         return 0;
2621 }
2622
2623 static unsigned int
2624 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
2625 {
2626         struct trace_iterator *iter = filp->private_data;
2627
2628         if (trace_flags & TRACE_ITER_BLOCK) {
2629                 /*
2630                  * Always select as readable when in blocking mode
2631                  */
2632                 return POLLIN | POLLRDNORM;
2633         } else {
2634                 if (!trace_empty(iter))
2635                         return POLLIN | POLLRDNORM;
2636                 poll_wait(filp, &trace_wait, poll_table);
2637                 if (!trace_empty(iter))
2638                         return POLLIN | POLLRDNORM;
2639
2640                 return 0;
2641         }
2642 }
2643
2644 /*
2645  * Consumer reader.
2646  */
2647 static ssize_t
2648 tracing_read_pipe(struct file *filp, char __user *ubuf,
2649                   size_t cnt, loff_t *ppos)
2650 {
2651         struct trace_iterator *iter = filp->private_data;
2652         struct trace_array_cpu *data;
2653         static cpumask_t mask;
2654         unsigned long flags;
2655 #ifdef CONFIG_FTRACE
2656         int ftrace_save;
2657 #endif
2658         int cpu;
2659         ssize_t sret;
2660
2661         /* return any leftover data */
2662         sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
2663         if (sret != -EBUSY)
2664                 return sret;
2665         sret = 0;
2666
2667         trace_seq_reset(&iter->seq);
2668
2669         mutex_lock(&trace_types_lock);
2670         if (iter->trace->read) {
2671                 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
2672                 if (sret)
2673                         goto out;
2674         }
2675
2676         while (trace_empty(iter)) {
2677
2678                 if ((filp->f_flags & O_NONBLOCK)) {
2679                         sret = -EAGAIN;
2680                         goto out;
2681                 }
2682
2683                 /*
2684                  * This is a make-shift waitqueue. The reason we don't use
2685                  * an actual wait queue is because:
2686                  *  1) we only ever have one waiter
2687                  *  2) the tracing, traces all functions, we don't want
2688                  *     the overhead of calling wake_up and friends
2689                  *     (and tracing them too)
2690                  *     Anyway, this is really very primitive wakeup.
2691                  */
2692                 set_current_state(TASK_INTERRUPTIBLE);
2693                 iter->tr->waiter = current;
2694
2695                 mutex_unlock(&trace_types_lock);
2696
2697                 /* sleep for 100 msecs, and try again. */
2698                 schedule_timeout(HZ/10);
2699
2700                 mutex_lock(&trace_types_lock);
2701
2702                 iter->tr->waiter = NULL;
2703
2704                 if (signal_pending(current)) {
2705                         sret = -EINTR;
2706                         goto out;
2707                 }
2708
2709                 if (iter->trace != current_trace)
2710                         goto out;
2711
2712                 /*
2713                  * We block until we read something and tracing is disabled.
2714                  * We still block if tracing is disabled, but we have never
2715                  * read anything. This allows a user to cat this file, and
2716                  * then enable tracing. But after we have read something,
2717                  * we give an EOF when tracing is again disabled.
2718                  *
2719                  * iter->pos will be 0 if we haven't read anything.
2720                  */
2721                 if (!tracer_enabled && iter->pos)
2722                         break;
2723
2724                 continue;
2725         }
2726
2727         /* stop when tracing is finished */
2728         if (trace_empty(iter))
2729                 goto out;
2730
2731         if (cnt >= PAGE_SIZE)
2732                 cnt = PAGE_SIZE - 1;
2733
2734         /* reset all but tr, trace, and overruns */
2735         memset(&iter->seq, 0,
2736                sizeof(struct trace_iterator) -
2737                offsetof(struct trace_iterator, seq));
2738         iter->pos = -1;
2739
2740         /*
2741          * We need to stop all tracing on all CPUS to read the
2742          * the next buffer. This is a bit expensive, but is
2743          * not done often. We fill all what we can read,
2744          * and then release the locks again.
2745          */
2746
2747         cpus_clear(mask);
2748         local_irq_save(flags);
2749 #ifdef CONFIG_FTRACE
2750         ftrace_save = ftrace_enabled;
2751         ftrace_enabled = 0;
2752 #endif
2753         smp_wmb();
2754         for_each_tracing_cpu(cpu) {
2755                 data = iter->tr->data[cpu];
2756
2757                 if (!head_page(data) || !data->trace_idx)
2758                         continue;
2759
2760                 atomic_inc(&data->disabled);
2761                 cpu_set(cpu, mask);
2762         }
2763
2764         for_each_cpu_mask(cpu, mask) {
2765                 data = iter->tr->data[cpu];
2766                 __raw_spin_lock(&data->lock);
2767
2768                 if (data->overrun > iter->last_overrun[cpu])
2769                         iter->overrun[cpu] +=
2770                                 data->overrun - iter->last_overrun[cpu];
2771                 iter->last_overrun[cpu] = data->overrun;
2772         }
2773
2774         while (find_next_entry_inc(iter) != NULL) {
2775                 int ret;
2776                 int len = iter->seq.len;
2777
2778                 ret = print_trace_line(iter);
2779                 if (!ret) {
2780                         /* don't print partial lines */
2781                         iter->seq.len = len;
2782                         break;
2783                 }
2784
2785                 trace_consume(iter);
2786
2787                 if (iter->seq.len >= cnt)
2788                         break;
2789         }
2790
2791         for_each_cpu_mask(cpu, mask) {
2792                 data = iter->tr->data[cpu];
2793                 __raw_spin_unlock(&data->lock);
2794         }
2795
2796         for_each_cpu_mask(cpu, mask) {
2797                 data = iter->tr->data[cpu];
2798                 atomic_dec(&data->disabled);
2799         }
2800 #ifdef CONFIG_FTRACE
2801         ftrace_enabled = ftrace_save;
2802 #endif
2803         local_irq_restore(flags);
2804
2805         /* Now copy what we have to the user */
2806         sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
2807         if (iter->seq.readpos >= iter->seq.len)
2808                 trace_seq_reset(&iter->seq);
2809         if (sret == -EBUSY)
2810                 sret = 0;
2811
2812 out:
2813         mutex_unlock(&trace_types_lock);
2814
2815         return sret;
2816 }
2817
2818 static ssize_t
2819 tracing_entries_read(struct file *filp, char __user *ubuf,
2820                      size_t cnt, loff_t *ppos)
2821 {
2822         struct trace_array *tr = filp->private_data;
2823         char buf[64];
2824         int r;
2825
2826         r = sprintf(buf, "%lu\n", tr->entries);
2827         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2828 }
2829
2830 static ssize_t
2831 tracing_entries_write(struct file *filp, const char __user *ubuf,
2832                       size_t cnt, loff_t *ppos)
2833 {
2834         unsigned long val;
2835         char buf[64];
2836         int i, ret;
2837
2838         if (cnt >= sizeof(buf))
2839                 return -EINVAL;
2840
2841         if (copy_from_user(&buf, ubuf, cnt))
2842                 return -EFAULT;
2843
2844         buf[cnt] = 0;
2845
2846         ret = strict_strtoul(buf, 10, &val);
2847         if (ret < 0)
2848                 return ret;
2849
2850         /* must have at least 1 entry */
2851         if (!val)
2852                 return -EINVAL;
2853
2854         mutex_lock(&trace_types_lock);
2855
2856         if (current_trace != &no_tracer) {
2857                 cnt = -EBUSY;
2858                 pr_info("ftrace: set current_tracer to none"
2859                         " before modifying buffer size\n");
2860                 goto out;
2861         }
2862
2863         if (val > global_trace.entries) {
2864                 long pages_requested;
2865                 unsigned long freeable_pages;
2866
2867                 /* make sure we have enough memory before mapping */
2868                 pages_requested =
2869                         (val + (ENTRIES_PER_PAGE-1)) / ENTRIES_PER_PAGE;
2870
2871                 /* account for each buffer (and max_tr) */
2872                 pages_requested *= tracing_nr_buffers * 2;
2873
2874                 /* Check for overflow */
2875                 if (pages_requested < 0) {
2876                         cnt = -ENOMEM;
2877                         goto out;
2878                 }
2879
2880                 freeable_pages = determine_dirtyable_memory();
2881
2882                 /* we only allow to request 1/4 of useable memory */
2883                 if (pages_requested >
2884                     ((freeable_pages + tracing_pages_allocated) / 4)) {
2885                         cnt = -ENOMEM;
2886                         goto out;
2887                 }
2888
2889                 while (global_trace.entries < val) {
2890                         if (trace_alloc_page()) {
2891                                 cnt = -ENOMEM;
2892                                 goto out;
2893                         }
2894                         /* double check that we don't go over the known pages */
2895                         if (tracing_pages_allocated > pages_requested)
2896                                 break;
2897                 }
2898
2899         } else {
2900                 /* include the number of entries in val (inc of page entries) */
2901                 while (global_trace.entries > val + (ENTRIES_PER_PAGE - 1))
2902                         trace_free_page();
2903         }
2904
2905         /* check integrity */
2906         for_each_tracing_cpu(i)
2907                 check_pages(global_trace.data[i]);
2908
2909         filp->f_pos += cnt;
2910
2911         /* If check pages failed, return ENOMEM */
2912         if (tracing_disabled)
2913                 cnt = -ENOMEM;
2914  out:
2915         max_tr.entries = global_trace.entries;
2916         mutex_unlock(&trace_types_lock);
2917
2918         return cnt;
2919 }
2920
2921 static struct file_operations tracing_max_lat_fops = {
2922         .open           = tracing_open_generic,
2923         .read           = tracing_max_lat_read,
2924         .write          = tracing_max_lat_write,
2925 };
2926
2927 static struct file_operations tracing_ctrl_fops = {
2928         .open           = tracing_open_generic,
2929         .read           = tracing_ctrl_read,
2930         .write          = tracing_ctrl_write,
2931 };
2932
2933 static struct file_operations set_tracer_fops = {
2934         .open           = tracing_open_generic,
2935         .read           = tracing_set_trace_read,
2936         .write          = tracing_set_trace_write,
2937 };
2938
2939 static struct file_operations tracing_pipe_fops = {
2940         .open           = tracing_open_pipe,
2941         .poll           = tracing_poll_pipe,
2942         .read           = tracing_read_pipe,
2943         .release        = tracing_release_pipe,
2944 };
2945
2946 static struct file_operations tracing_entries_fops = {
2947         .open           = tracing_open_generic,
2948         .read           = tracing_entries_read,
2949         .write          = tracing_entries_write,
2950 };
2951
2952 #ifdef CONFIG_DYNAMIC_FTRACE
2953
2954 static ssize_t
2955 tracing_read_long(struct file *filp, char __user *ubuf,
2956                   size_t cnt, loff_t *ppos)
2957 {
2958         unsigned long *p = filp->private_data;
2959         char buf[64];
2960         int r;
2961
2962         r = sprintf(buf, "%ld\n", *p);
2963
2964         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2965 }
2966
2967 static struct file_operations tracing_read_long_fops = {
2968         .open           = tracing_open_generic,
2969         .read           = tracing_read_long,
2970 };
2971 #endif
2972
2973 static struct dentry *d_tracer;
2974
2975 struct dentry *tracing_init_dentry(void)
2976 {
2977         static int once;
2978
2979         if (d_tracer)
2980                 return d_tracer;
2981
2982         d_tracer = debugfs_create_dir("tracing", NULL);
2983
2984         if (!d_tracer && !once) {
2985                 once = 1;
2986                 pr_warning("Could not create debugfs directory 'tracing'\n");
2987                 return NULL;
2988         }
2989
2990         return d_tracer;
2991 }
2992
2993 #ifdef CONFIG_FTRACE_SELFTEST
2994 /* Let selftest have access to static functions in this file */
2995 #include "trace_selftest.c"
2996 #endif
2997
2998 static __init void tracer_init_debugfs(void)
2999 {
3000         struct dentry *d_tracer;
3001         struct dentry *entry;
3002
3003         d_tracer = tracing_init_dentry();
3004
3005         entry = debugfs_create_file("tracing_enabled", 0644, d_tracer,
3006                                     &global_trace, &tracing_ctrl_fops);
3007         if (!entry)
3008                 pr_warning("Could not create debugfs 'tracing_enabled' entry\n");
3009
3010         entry = debugfs_create_file("iter_ctrl", 0644, d_tracer,
3011                                     NULL, &tracing_iter_fops);
3012         if (!entry)
3013                 pr_warning("Could not create debugfs 'iter_ctrl' entry\n");
3014
3015         entry = debugfs_create_file("tracing_cpumask", 0644, d_tracer,
3016                                     NULL, &tracing_cpumask_fops);
3017         if (!entry)
3018                 pr_warning("Could not create debugfs 'tracing_cpumask' entry\n");
3019
3020         entry = debugfs_create_file("latency_trace", 0444, d_tracer,
3021                                     &global_trace, &tracing_lt_fops);
3022         if (!entry)
3023                 pr_warning("Could not create debugfs 'latency_trace' entry\n");
3024
3025         entry = debugfs_create_file("trace", 0444, d_tracer,
3026                                     &global_trace, &tracing_fops);
3027         if (!entry)
3028                 pr_warning("Could not create debugfs 'trace' entry\n");
3029
3030         entry = debugfs_create_file("available_tracers", 0444, d_tracer,
3031                                     &global_trace, &show_traces_fops);
3032         if (!entry)
3033                 pr_warning("Could not create debugfs 'available_tracers' entry\n");
3034
3035         entry = debugfs_create_file("current_tracer", 0444, d_tracer,
3036                                     &global_trace, &set_tracer_fops);
3037         if (!entry)
3038                 pr_warning("Could not create debugfs 'current_tracer' entry\n");
3039
3040         entry = debugfs_create_file("tracing_max_latency", 0644, d_tracer,
3041                                     &tracing_max_latency,
3042                                     &tracing_max_lat_fops);
3043         if (!entry)
3044                 pr_warning("Could not create debugfs "
3045                            "'tracing_max_latency' entry\n");
3046
3047         entry = debugfs_create_file("tracing_thresh", 0644, d_tracer,
3048                                     &tracing_thresh, &tracing_max_lat_fops);
3049         if (!entry)
3050                 pr_warning("Could not create debugfs "
3051                            "'tracing_thresh' entry\n");
3052         entry = debugfs_create_file("README", 0644, d_tracer,
3053                                     NULL, &tracing_readme_fops);
3054         if (!entry)
3055                 pr_warning("Could not create debugfs 'README' entry\n");
3056
3057         entry = debugfs_create_file("trace_pipe", 0644, d_tracer,
3058                                     NULL, &tracing_pipe_fops);
3059         if (!entry)
3060                 pr_warning("Could not create debugfs "
3061                            "'trace_pipe' entry\n");
3062
3063         entry = debugfs_create_file("trace_entries", 0644, d_tracer,
3064                                     &global_trace, &tracing_entries_fops);
3065         if (!entry)
3066                 pr_warning("Could not create debugfs "
3067                            "'trace_entries' entry\n");
3068
3069 #ifdef CONFIG_DYNAMIC_FTRACE
3070         entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer,
3071                                     &ftrace_update_tot_cnt,
3072                                     &tracing_read_long_fops);
3073         if (!entry)
3074                 pr_warning("Could not create debugfs "
3075                            "'dyn_ftrace_total_info' entry\n");
3076 #endif
3077 #ifdef CONFIG_SYSPROF_TRACER
3078         init_tracer_sysprof_debugfs(d_tracer);
3079 #endif
3080 }
3081
3082 #define TRACE_BUF_SIZE 1024
3083 #define TRACE_PRINT_BUF_SIZE \
3084         (sizeof(struct trace_field) - offsetof(struct trace_field, print.buf))
3085 #define TRACE_CONT_BUF_SIZE sizeof(struct trace_field)
3086
3087 int __ftrace_printk(unsigned long ip, const char *fmt, ...)
3088 {
3089         struct trace_array *tr = &global_trace;
3090         static DEFINE_SPINLOCK(trace_buf_lock);
3091         static char trace_buf[TRACE_BUF_SIZE];
3092         struct trace_array_cpu *data;
3093         struct trace_entry *entry;
3094         unsigned long flags;
3095         long disabled;
3096         va_list ap;
3097         int cpu, len = 0, write, written = 0;
3098
3099         if (likely(!ftrace_function_enabled))
3100                 return 0;
3101
3102         local_irq_save(flags);
3103         cpu = raw_smp_processor_id();
3104         data = tr->data[cpu];
3105         disabled = atomic_inc_return(&data->disabled);
3106
3107         if (unlikely(disabled != 1 || !ftrace_function_enabled))
3108                 goto out;
3109
3110         spin_lock(&trace_buf_lock);
3111         va_start(ap, fmt);
3112         len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, ap);
3113         va_end(ap);
3114
3115         len = min(len, TRACE_BUF_SIZE-1);
3116         trace_buf[len] = 0;
3117
3118         __raw_spin_lock(&data->lock);
3119         entry                           = tracing_get_trace_entry(tr, data);
3120         tracing_generic_entry_update(entry, flags);
3121         entry->type                     = TRACE_PRINT;
3122         entry->field.print.ip           = ip;
3123
3124         write = min(len, (int)(TRACE_PRINT_BUF_SIZE-1));
3125
3126         memcpy(&entry->field.print.buf, trace_buf, write);
3127         entry->field.print.buf[write] = 0;
3128         written = write;
3129
3130         if (written != len)
3131                 entry->field.flags |= TRACE_FLAG_CONT;
3132
3133         while (written != len) {
3134                 entry = tracing_get_trace_entry(tr, data);
3135
3136                 entry->type = TRACE_CONT;
3137                 write = min(len - written, (int)(TRACE_CONT_BUF_SIZE-1));
3138                 memcpy(&entry->cont.buf, trace_buf+written, write);
3139                 entry->cont.buf[write] = 0;
3140                 written += write;
3141         }
3142         __raw_spin_unlock(&data->lock);
3143
3144         spin_unlock(&trace_buf_lock);
3145
3146  out:
3147         atomic_dec(&data->disabled);
3148         local_irq_restore(flags);
3149
3150         return len;
3151 }
3152 EXPORT_SYMBOL_GPL(__ftrace_printk);
3153
3154 static int trace_panic_handler(struct notifier_block *this,
3155                                unsigned long event, void *unused)
3156 {
3157         ftrace_dump();
3158         return NOTIFY_OK;
3159 }
3160
3161 static struct notifier_block trace_panic_notifier = {
3162         .notifier_call  = trace_panic_handler,
3163         .next           = NULL,
3164         .priority       = 150   /* priority: INT_MAX >= x >= 0 */
3165 };
3166
3167 static int trace_die_handler(struct notifier_block *self,
3168                              unsigned long val,
3169                              void *data)
3170 {
3171         switch (val) {
3172         case DIE_OOPS:
3173                 ftrace_dump();
3174                 break;
3175         default:
3176                 break;
3177         }
3178         return NOTIFY_OK;
3179 }
3180
3181 static struct notifier_block trace_die_notifier = {
3182         .notifier_call = trace_die_handler,
3183         .priority = 200
3184 };
3185
3186 /*
3187  * printk is set to max of 1024, we really don't need it that big.
3188  * Nothing should be printing 1000 characters anyway.
3189  */
3190 #define TRACE_MAX_PRINT         1000
3191
3192 /*
3193  * Define here KERN_TRACE so that we have one place to modify
3194  * it if we decide to change what log level the ftrace dump
3195  * should be at.
3196  */
3197 #define KERN_TRACE              KERN_INFO
3198
3199 static void
3200 trace_printk_seq(struct trace_seq *s)
3201 {
3202         /* Probably should print a warning here. */
3203         if (s->len >= 1000)
3204                 s->len = 1000;
3205
3206         /* should be zero ended, but we are paranoid. */
3207         s->buffer[s->len] = 0;
3208
3209         printk(KERN_TRACE "%s", s->buffer);
3210
3211         trace_seq_reset(s);
3212 }
3213
3214
3215 void ftrace_dump(void)
3216 {
3217         static DEFINE_SPINLOCK(ftrace_dump_lock);
3218         /* use static because iter can be a bit big for the stack */
3219         static struct trace_iterator iter;
3220         struct trace_array_cpu *data;
3221         static cpumask_t mask;
3222         static int dump_ran;
3223         unsigned long flags;
3224         int cnt = 0;
3225         int cpu;
3226
3227         /* only one dump */
3228         spin_lock_irqsave(&ftrace_dump_lock, flags);
3229         if (dump_ran)
3230                 goto out;
3231
3232         dump_ran = 1;
3233
3234         /* No turning back! */
3235         ftrace_kill_atomic();
3236
3237         printk(KERN_TRACE "Dumping ftrace buffer:\n");
3238
3239         iter.tr = &global_trace;
3240         iter.trace = current_trace;
3241
3242         /*
3243          * We need to stop all tracing on all CPUS to read the
3244          * the next buffer. This is a bit expensive, but is
3245          * not done often. We fill all what we can read,
3246          * and then release the locks again.
3247          */
3248
3249         cpus_clear(mask);
3250
3251         for_each_tracing_cpu(cpu) {
3252                 data = iter.tr->data[cpu];
3253
3254                 if (!head_page(data) || !data->trace_idx)
3255                         continue;
3256
3257                 atomic_inc(&data->disabled);
3258                 cpu_set(cpu, mask);
3259         }
3260
3261         for_each_cpu_mask(cpu, mask) {
3262                 data = iter.tr->data[cpu];
3263                 __raw_spin_lock(&data->lock);
3264
3265                 if (data->overrun > iter.last_overrun[cpu])
3266                         iter.overrun[cpu] +=
3267                                 data->overrun - iter.last_overrun[cpu];
3268                 iter.last_overrun[cpu] = data->overrun;
3269         }
3270
3271         while (!trace_empty(&iter)) {
3272
3273                 if (!cnt)
3274                         printk(KERN_TRACE "---------------------------------\n");
3275
3276                 cnt++;
3277
3278                 /* reset all but tr, trace, and overruns */
3279                 memset(&iter.seq, 0,
3280                        sizeof(struct trace_iterator) -
3281                        offsetof(struct trace_iterator, seq));
3282                 iter.iter_flags |= TRACE_FILE_LAT_FMT;
3283                 iter.pos = -1;
3284
3285                 if (find_next_entry_inc(&iter) != NULL) {
3286                         print_trace_line(&iter);
3287                         trace_consume(&iter);
3288                 }
3289
3290                 trace_printk_seq(&iter.seq);
3291         }
3292
3293         if (!cnt)
3294                 printk(KERN_TRACE "   (ftrace buffer empty)\n");
3295         else
3296                 printk(KERN_TRACE "---------------------------------\n");
3297
3298         for_each_cpu_mask(cpu, mask) {
3299                 data = iter.tr->data[cpu];
3300                 __raw_spin_unlock(&data->lock);
3301         }
3302
3303         for_each_cpu_mask(cpu, mask) {
3304                 data = iter.tr->data[cpu];
3305                 atomic_dec(&data->disabled);
3306         }
3307
3308
3309  out:
3310         spin_unlock_irqrestore(&ftrace_dump_lock, flags);
3311 }
3312
3313 static int trace_alloc_page(void)
3314 {
3315         struct trace_array_cpu *data;
3316         struct page *page, *tmp;
3317         LIST_HEAD(pages);
3318         void *array;
3319         unsigned pages_allocated = 0;
3320         int i;
3321
3322         /* first allocate a page for each CPU */
3323         for_each_tracing_cpu(i) {
3324                 array = (void *)__get_free_page(GFP_KERNEL);
3325                 if (array == NULL) {
3326                         printk(KERN_ERR "tracer: failed to allocate page"
3327                                "for trace buffer!\n");
3328                         goto free_pages;
3329                 }
3330
3331                 pages_allocated++;
3332                 page = virt_to_page(array);
3333                 list_add(&page->lru, &pages);
3334
3335 /* Only allocate if we are actually using the max trace */
3336 #ifdef CONFIG_TRACER_MAX_TRACE
3337                 array = (void *)__get_free_page(GFP_KERNEL);
3338                 if (array == NULL) {
3339                         printk(KERN_ERR "tracer: failed to allocate page"
3340                                "for trace buffer!\n");
3341                         goto free_pages;
3342                 }
3343                 pages_allocated++;
3344                 page = virt_to_page(array);
3345                 list_add(&page->lru, &pages);
3346 #endif
3347         }
3348
3349         /* Now that we successfully allocate a page per CPU, add them */
3350         for_each_tracing_cpu(i) {
3351                 data = global_trace.data[i];
3352                 page = list_entry(pages.next, struct page, lru);
3353                 list_del_init(&page->lru);
3354                 list_add_tail(&page->lru, &data->trace_pages);
3355                 ClearPageLRU(page);
3356
3357 #ifdef CONFIG_TRACER_MAX_TRACE
3358                 data = max_tr.data[i];
3359                 page = list_entry(pages.next, struct page, lru);
3360                 list_del_init(&page->lru);
3361                 list_add_tail(&page->lru, &data->trace_pages);
3362                 SetPageLRU(page);
3363 #endif
3364         }
3365         tracing_pages_allocated += pages_allocated;
3366         global_trace.entries += ENTRIES_PER_PAGE;
3367
3368         return 0;
3369
3370  free_pages:
3371         list_for_each_entry_safe(page, tmp, &pages, lru) {
3372                 list_del_init(&page->lru);
3373                 __free_page(page);
3374         }
3375         return -ENOMEM;
3376 }
3377
3378 static int trace_free_page(void)
3379 {
3380         struct trace_array_cpu *data;
3381         struct page *page;
3382         struct list_head *p;
3383         int i;
3384         int ret = 0;
3385
3386         /* free one page from each buffer */
3387         for_each_tracing_cpu(i) {
3388                 data = global_trace.data[i];
3389                 p = data->trace_pages.next;
3390                 if (p == &data->trace_pages) {
3391                         /* should never happen */
3392                         WARN_ON(1);
3393                         tracing_disabled = 1;
3394                         ret = -1;
3395                         break;
3396                 }
3397                 page = list_entry(p, struct page, lru);
3398                 ClearPageLRU(page);
3399                 list_del(&page->lru);
3400                 tracing_pages_allocated--;
3401                 tracing_pages_allocated--;
3402                 __free_page(page);
3403
3404                 tracing_reset(data);
3405
3406 #ifdef CONFIG_TRACER_MAX_TRACE
3407                 data = max_tr.data[i];
3408                 p = data->trace_pages.next;
3409                 if (p == &data->trace_pages) {
3410                         /* should never happen */
3411                         WARN_ON(1);
3412                         tracing_disabled = 1;
3413                         ret = -1;
3414                         break;
3415                 }
3416                 page = list_entry(p, struct page, lru);
3417                 ClearPageLRU(page);
3418                 list_del(&page->lru);
3419                 __free_page(page);
3420
3421                 tracing_reset(data);
3422 #endif
3423         }
3424         global_trace.entries -= ENTRIES_PER_PAGE;
3425
3426         return ret;
3427 }
3428
3429 __init static int tracer_alloc_buffers(void)
3430 {
3431         struct trace_array_cpu *data;
3432         void *array;
3433         struct page *page;
3434         int pages = 0;
3435         int ret = -ENOMEM;
3436         int i;
3437
3438         /* TODO: make the number of buffers hot pluggable with CPUS */
3439         tracing_nr_buffers = num_possible_cpus();
3440         tracing_buffer_mask = cpu_possible_map;
3441
3442         /* Allocate the first page for all buffers */
3443         for_each_tracing_cpu(i) {
3444                 data = global_trace.data[i] = &per_cpu(global_trace_cpu, i);
3445                 max_tr.data[i] = &per_cpu(max_data, i);
3446
3447                 array = (void *)__get_free_page(GFP_KERNEL);
3448                 if (array == NULL) {
3449                         printk(KERN_ERR "tracer: failed to allocate page"
3450                                "for trace buffer!\n");
3451                         goto free_buffers;
3452                 }
3453
3454                 /* set the array to the list */
3455                 INIT_LIST_HEAD(&data->trace_pages);
3456                 page = virt_to_page(array);
3457                 list_add(&page->lru, &data->trace_pages);
3458                 /* use the LRU flag to differentiate the two buffers */
3459                 ClearPageLRU(page);
3460
3461                 data->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
3462                 max_tr.data[i]->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
3463
3464 /* Only allocate if we are actually using the max trace */
3465 #ifdef CONFIG_TRACER_MAX_TRACE
3466                 array = (void *)__get_free_page(GFP_KERNEL);
3467                 if (array == NULL) {
3468                         printk(KERN_ERR "tracer: failed to allocate page"
3469                                "for trace buffer!\n");
3470                         goto free_buffers;
3471                 }
3472
3473                 INIT_LIST_HEAD(&max_tr.data[i]->trace_pages);
3474                 page = virt_to_page(array);
3475                 list_add(&page->lru, &max_tr.data[i]->trace_pages);
3476                 SetPageLRU(page);
3477 #endif
3478         }
3479
3480         /*
3481          * Since we allocate by orders of pages, we may be able to
3482          * round up a bit.
3483          */
3484         global_trace.entries = ENTRIES_PER_PAGE;
3485         pages++;
3486
3487         while (global_trace.entries < trace_nr_entries) {
3488                 if (trace_alloc_page())
3489                         break;
3490                 pages++;
3491         }
3492         max_tr.entries = global_trace.entries;
3493
3494         pr_info("tracer: %d pages allocated for %ld entries of %ld bytes\n",
3495                 pages, trace_nr_entries, (long)TRACE_ENTRY_SIZE);
3496         pr_info("   actual entries %ld\n", global_trace.entries);
3497
3498         tracer_init_debugfs();
3499
3500         trace_init_cmdlines();
3501
3502         register_tracer(&no_tracer);
3503         current_trace = &no_tracer;
3504
3505         /* All seems OK, enable tracing */
3506         global_trace.ctrl = tracer_enabled;
3507         tracing_disabled = 0;
3508
3509         atomic_notifier_chain_register(&panic_notifier_list,
3510                                        &trace_panic_notifier);
3511
3512         register_die_notifier(&trace_die_notifier);
3513
3514         return 0;
3515
3516  free_buffers:
3517         for (i-- ; i >= 0; i--) {
3518                 struct page *page, *tmp;
3519                 struct trace_array_cpu *data = global_trace.data[i];
3520
3521                 if (data) {
3522                         list_for_each_entry_safe(page, tmp,
3523                                                  &data->trace_pages, lru) {
3524                                 list_del_init(&page->lru);
3525                                 __free_page(page);
3526                         }
3527                 }
3528
3529 #ifdef CONFIG_TRACER_MAX_TRACE
3530                 data = max_tr.data[i];
3531                 if (data) {
3532                         list_for_each_entry_safe(page, tmp,
3533                                                  &data->trace_pages, lru) {
3534                                 list_del_init(&page->lru);
3535                                 __free_page(page);
3536                         }
3537                 }
3538 #endif
3539         }
3540         return ret;
3541 }
3542 fs_initcall(tracer_alloc_buffers);