ftrace: sched special
[linux-2.6] / kernel / trace / trace.c
1 /*
2  * ring buffer based function tracer
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally taken from the RT patch by:
8  *    Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code from the latency_tracer, that is:
11  *  Copyright (C) 2004-2006 Ingo Molnar
12  *  Copyright (C) 2004 William Lee Irwin III
13  */
14 #include <linux/utsrelease.h>
15 #include <linux/kallsyms.h>
16 #include <linux/seq_file.h>
17 #include <linux/debugfs.h>
18 #include <linux/pagemap.h>
19 #include <linux/hardirq.h>
20 #include <linux/linkage.h>
21 #include <linux/uaccess.h>
22 #include <linux/ftrace.h>
23 #include <linux/module.h>
24 #include <linux/percpu.h>
25 #include <linux/ctype.h>
26 #include <linux/init.h>
27 #include <linux/poll.h>
28 #include <linux/gfp.h>
29 #include <linux/fs.h>
30
31 #include <linux/stacktrace.h>
32
33 #include "trace.h"
34
35 unsigned long __read_mostly     tracing_max_latency = (cycle_t)ULONG_MAX;
36 unsigned long __read_mostly     tracing_thresh;
37
38 static int tracing_disabled = 1;
39
40 static long
41 ns2usecs(cycle_t nsec)
42 {
43         nsec += 500;
44         do_div(nsec, 1000);
45         return nsec;
46 }
47
48 cycle_t ftrace_now(int cpu)
49 {
50         return cpu_clock(cpu);
51 }
52
53 static struct trace_array       global_trace;
54
55 static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu);
56
57 static struct trace_array       max_tr;
58
59 static DEFINE_PER_CPU(struct trace_array_cpu, max_data);
60
61 static int                      tracer_enabled = 1;
62 static unsigned long            trace_nr_entries = 65536UL;
63
64 static struct tracer            *trace_types __read_mostly;
65 static struct tracer            *current_trace __read_mostly;
66 static int                      max_tracer_type_len;
67
68 static DEFINE_MUTEX(trace_types_lock);
69 static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
70
71 unsigned long trace_flags = TRACE_ITER_PRINT_PARENT;
72
73 void trace_wake_up(void)
74 {
75         /*
76          * The runqueue_is_locked() can fail, but this is the best we
77          * have for now:
78          */
79         if (!(trace_flags & TRACE_ITER_BLOCK) && !runqueue_is_locked())
80                 wake_up(&trace_wait);
81 }
82
83 #define ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(struct trace_entry))
84
85 static int __init set_nr_entries(char *str)
86 {
87         if (!str)
88                 return 0;
89         trace_nr_entries = simple_strtoul(str, &str, 0);
90         return 1;
91 }
92 __setup("trace_entries=", set_nr_entries);
93
94 unsigned long nsecs_to_usecs(unsigned long nsecs)
95 {
96         return nsecs / 1000;
97 }
98
99 enum trace_type {
100         __TRACE_FIRST_TYPE = 0,
101
102         TRACE_FN,
103         TRACE_CTX,
104         TRACE_WAKE,
105         TRACE_STACK,
106         TRACE_SPECIAL,
107
108         __TRACE_LAST_TYPE
109 };
110
111 enum trace_flag_type {
112         TRACE_FLAG_IRQS_OFF             = 0x01,
113         TRACE_FLAG_NEED_RESCHED         = 0x02,
114         TRACE_FLAG_HARDIRQ              = 0x04,
115         TRACE_FLAG_SOFTIRQ              = 0x08,
116 };
117
118 #define TRACE_ITER_SYM_MASK \
119         (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
120
121 /* These must match the bit postions above */
122 static const char *trace_options[] = {
123         "print-parent",
124         "sym-offset",
125         "sym-addr",
126         "verbose",
127         "raw",
128         "hex",
129         "bin",
130         "block",
131         "stacktrace",
132         "sched-tree",
133         NULL
134 };
135
136 static DEFINE_SPINLOCK(ftrace_max_lock);
137
138 /*
139  * Copy the new maximum trace into the separate maximum-trace
140  * structure. (this way the maximum trace is permanently saved,
141  * for later retrieval via /debugfs/tracing/latency_trace)
142  */
143 static void
144 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
145 {
146         struct trace_array_cpu *data = tr->data[cpu];
147
148         max_tr.cpu = cpu;
149         max_tr.time_start = data->preempt_timestamp;
150
151         data = max_tr.data[cpu];
152         data->saved_latency = tracing_max_latency;
153
154         memcpy(data->comm, tsk->comm, TASK_COMM_LEN);
155         data->pid = tsk->pid;
156         data->uid = tsk->uid;
157         data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
158         data->policy = tsk->policy;
159         data->rt_priority = tsk->rt_priority;
160
161         /* record this tasks comm */
162         tracing_record_cmdline(current);
163 }
164
165 void check_pages(struct trace_array_cpu *data)
166 {
167         struct page *page, *tmp;
168
169         BUG_ON(data->trace_pages.next->prev != &data->trace_pages);
170         BUG_ON(data->trace_pages.prev->next != &data->trace_pages);
171
172         list_for_each_entry_safe(page, tmp, &data->trace_pages, lru) {
173                 BUG_ON(page->lru.next->prev != &page->lru);
174                 BUG_ON(page->lru.prev->next != &page->lru);
175         }
176 }
177
178 void *head_page(struct trace_array_cpu *data)
179 {
180         struct page *page;
181
182         check_pages(data);
183         if (list_empty(&data->trace_pages))
184                 return NULL;
185
186         page = list_entry(data->trace_pages.next, struct page, lru);
187         BUG_ON(&page->lru == &data->trace_pages);
188
189         return page_address(page);
190 }
191
192 static int
193 trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
194 {
195         int len = (PAGE_SIZE - 1) - s->len;
196         va_list ap;
197         int ret;
198
199         if (!len)
200                 return 0;
201
202         va_start(ap, fmt);
203         ret = vsnprintf(s->buffer + s->len, len, fmt, ap);
204         va_end(ap);
205
206         /* If we can't write it all, don't bother writing anything */
207         if (ret > len)
208                 return 0;
209
210         s->len += ret;
211
212         return len;
213 }
214
215 static int
216 trace_seq_puts(struct trace_seq *s, const char *str)
217 {
218         int len = strlen(str);
219
220         if (len > ((PAGE_SIZE - 1) - s->len))
221                 return 0;
222
223         memcpy(s->buffer + s->len, str, len);
224         s->len += len;
225
226         return len;
227 }
228
229 static int
230 trace_seq_putc(struct trace_seq *s, unsigned char c)
231 {
232         if (s->len >= (PAGE_SIZE - 1))
233                 return 0;
234
235         s->buffer[s->len++] = c;
236
237         return 1;
238 }
239
240 static int
241 trace_seq_putmem(struct trace_seq *s, void *mem, size_t len)
242 {
243         if (len > ((PAGE_SIZE - 1) - s->len))
244                 return 0;
245
246         memcpy(s->buffer + s->len, mem, len);
247         s->len += len;
248
249         return len;
250 }
251
252 #define HEX_CHARS 17
253
254 static int
255 trace_seq_putmem_hex(struct trace_seq *s, void *mem, size_t len)
256 {
257         unsigned char hex[HEX_CHARS];
258         unsigned char *data;
259         unsigned char byte;
260         int i, j;
261
262         BUG_ON(len >= HEX_CHARS);
263
264         data = mem;
265
266 #ifdef __BIG_ENDIAN
267         for (i = 0, j = 0; i < len; i++) {
268 #else
269         for (i = len-1, j = 0; i >= 0; i--) {
270 #endif
271                 byte = data[i];
272
273                 hex[j]   = byte & 0x0f;
274                 if (hex[j] >= 10)
275                         hex[j] += 'a' - 10;
276                 else
277                         hex[j] += '0';
278                 j++;
279
280                 hex[j] = byte >> 4;
281                 if (hex[j] >= 10)
282                         hex[j] += 'a' - 10;
283                 else
284                         hex[j] += '0';
285                 j++;
286         }
287         hex[j] = ' ';
288         j++;
289
290         return trace_seq_putmem(s, hex, j);
291 }
292
293 static void
294 trace_seq_reset(struct trace_seq *s)
295 {
296         s->len = 0;
297 }
298
299 static void
300 trace_print_seq(struct seq_file *m, struct trace_seq *s)
301 {
302         int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len;
303
304         s->buffer[len] = 0;
305         seq_puts(m, s->buffer);
306
307         trace_seq_reset(s);
308 }
309
310 static void
311 flip_trace(struct trace_array_cpu *tr1, struct trace_array_cpu *tr2)
312 {
313         struct list_head flip_pages;
314
315         INIT_LIST_HEAD(&flip_pages);
316
317         memcpy(&tr1->trace_head_idx, &tr2->trace_head_idx,
318                 sizeof(struct trace_array_cpu) -
319                 offsetof(struct trace_array_cpu, trace_head_idx));
320
321         check_pages(tr1);
322         check_pages(tr2);
323         list_splice_init(&tr1->trace_pages, &flip_pages);
324         list_splice_init(&tr2->trace_pages, &tr1->trace_pages);
325         list_splice_init(&flip_pages, &tr2->trace_pages);
326         BUG_ON(!list_empty(&flip_pages));
327         check_pages(tr1);
328         check_pages(tr2);
329 }
330
331 void
332 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
333 {
334         struct trace_array_cpu *data;
335         int i;
336
337         WARN_ON_ONCE(!irqs_disabled());
338         spin_lock(&ftrace_max_lock);
339         /* clear out all the previous traces */
340         for_each_possible_cpu(i) {
341                 data = tr->data[i];
342                 flip_trace(max_tr.data[i], data);
343                 tracing_reset(data);
344         }
345
346         __update_max_tr(tr, tsk, cpu);
347         spin_unlock(&ftrace_max_lock);
348 }
349
350 /**
351  * update_max_tr_single - only copy one trace over, and reset the rest
352  * @tr - tracer
353  * @tsk - task with the latency
354  * @cpu - the cpu of the buffer to copy.
355  */
356 void
357 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
358 {
359         struct trace_array_cpu *data = tr->data[cpu];
360         int i;
361
362         WARN_ON_ONCE(!irqs_disabled());
363         spin_lock(&ftrace_max_lock);
364         for_each_possible_cpu(i)
365                 tracing_reset(max_tr.data[i]);
366
367         flip_trace(max_tr.data[cpu], data);
368         tracing_reset(data);
369
370         __update_max_tr(tr, tsk, cpu);
371         spin_unlock(&ftrace_max_lock);
372 }
373
374 int register_tracer(struct tracer *type)
375 {
376         struct tracer *t;
377         int len;
378         int ret = 0;
379
380         if (!type->name) {
381                 pr_info("Tracer must have a name\n");
382                 return -1;
383         }
384
385         mutex_lock(&trace_types_lock);
386         for (t = trace_types; t; t = t->next) {
387                 if (strcmp(type->name, t->name) == 0) {
388                         /* already found */
389                         pr_info("Trace %s already registered\n",
390                                 type->name);
391                         ret = -1;
392                         goto out;
393                 }
394         }
395
396 #ifdef CONFIG_FTRACE_STARTUP_TEST
397         if (type->selftest) {
398                 struct tracer *saved_tracer = current_trace;
399                 struct trace_array_cpu *data;
400                 struct trace_array *tr = &global_trace;
401                 int saved_ctrl = tr->ctrl;
402                 int i;
403                 /*
404                  * Run a selftest on this tracer.
405                  * Here we reset the trace buffer, and set the current
406                  * tracer to be this tracer. The tracer can then run some
407                  * internal tracing to verify that everything is in order.
408                  * If we fail, we do not register this tracer.
409                  */
410                 for_each_possible_cpu(i) {
411                         data = tr->data[i];
412                         if (!head_page(data))
413                                 continue;
414                         tracing_reset(data);
415                 }
416                 current_trace = type;
417                 tr->ctrl = 0;
418                 /* the test is responsible for initializing and enabling */
419                 pr_info("Testing tracer %s: ", type->name);
420                 ret = type->selftest(type, tr);
421                 /* the test is responsible for resetting too */
422                 current_trace = saved_tracer;
423                 tr->ctrl = saved_ctrl;
424                 if (ret) {
425                         printk(KERN_CONT "FAILED!\n");
426                         goto out;
427                 }
428                 /* Only reset on passing, to avoid touching corrupted buffers */
429                 for_each_possible_cpu(i) {
430                         data = tr->data[i];
431                         if (!head_page(data))
432                                 continue;
433                         tracing_reset(data);
434                 }
435                 printk(KERN_CONT "PASSED\n");
436         }
437 #endif
438
439         type->next = trace_types;
440         trace_types = type;
441         len = strlen(type->name);
442         if (len > max_tracer_type_len)
443                 max_tracer_type_len = len;
444
445  out:
446         mutex_unlock(&trace_types_lock);
447
448         return ret;
449 }
450
451 void unregister_tracer(struct tracer *type)
452 {
453         struct tracer **t;
454         int len;
455
456         mutex_lock(&trace_types_lock);
457         for (t = &trace_types; *t; t = &(*t)->next) {
458                 if (*t == type)
459                         goto found;
460         }
461         pr_info("Trace %s not registered\n", type->name);
462         goto out;
463
464  found:
465         *t = (*t)->next;
466         if (strlen(type->name) != max_tracer_type_len)
467                 goto out;
468
469         max_tracer_type_len = 0;
470         for (t = &trace_types; *t; t = &(*t)->next) {
471                 len = strlen((*t)->name);
472                 if (len > max_tracer_type_len)
473                         max_tracer_type_len = len;
474         }
475  out:
476         mutex_unlock(&trace_types_lock);
477 }
478
479 void tracing_reset(struct trace_array_cpu *data)
480 {
481         data->trace_idx = 0;
482         data->trace_head = data->trace_tail = head_page(data);
483         data->trace_head_idx = 0;
484         data->trace_tail_idx = 0;
485 }
486
487 #define SAVED_CMDLINES 128
488 static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
489 static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
490 static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
491 static int cmdline_idx;
492 static DEFINE_SPINLOCK(trace_cmdline_lock);
493 atomic_t trace_record_cmdline_disabled;
494
495 static void trace_init_cmdlines(void)
496 {
497         memset(&map_pid_to_cmdline, -1, sizeof(map_pid_to_cmdline));
498         memset(&map_cmdline_to_pid, -1, sizeof(map_cmdline_to_pid));
499         cmdline_idx = 0;
500 }
501
502 void trace_stop_cmdline_recording(void);
503
504 static void trace_save_cmdline(struct task_struct *tsk)
505 {
506         unsigned map;
507         unsigned idx;
508
509         if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
510                 return;
511
512         /*
513          * It's not the end of the world if we don't get
514          * the lock, but we also don't want to spin
515          * nor do we want to disable interrupts,
516          * so if we miss here, then better luck next time.
517          */
518         if (!spin_trylock(&trace_cmdline_lock))
519                 return;
520
521         idx = map_pid_to_cmdline[tsk->pid];
522         if (idx >= SAVED_CMDLINES) {
523                 idx = (cmdline_idx + 1) % SAVED_CMDLINES;
524
525                 map = map_cmdline_to_pid[idx];
526                 if (map <= PID_MAX_DEFAULT)
527                         map_pid_to_cmdline[map] = (unsigned)-1;
528
529                 map_pid_to_cmdline[tsk->pid] = idx;
530
531                 cmdline_idx = idx;
532         }
533
534         memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
535
536         spin_unlock(&trace_cmdline_lock);
537 }
538
539 static char *trace_find_cmdline(int pid)
540 {
541         char *cmdline = "<...>";
542         unsigned map;
543
544         if (!pid)
545                 return "<idle>";
546
547         if (pid > PID_MAX_DEFAULT)
548                 goto out;
549
550         map = map_pid_to_cmdline[pid];
551         if (map >= SAVED_CMDLINES)
552                 goto out;
553
554         cmdline = saved_cmdlines[map];
555
556  out:
557         return cmdline;
558 }
559
560 void tracing_record_cmdline(struct task_struct *tsk)
561 {
562         if (atomic_read(&trace_record_cmdline_disabled))
563                 return;
564
565         trace_save_cmdline(tsk);
566 }
567
568 static inline struct list_head *
569 trace_next_list(struct trace_array_cpu *data, struct list_head *next)
570 {
571         /*
572          * Roundrobin - but skip the head (which is not a real page):
573          */
574         next = next->next;
575         if (unlikely(next == &data->trace_pages))
576                 next = next->next;
577         BUG_ON(next == &data->trace_pages);
578
579         return next;
580 }
581
582 static inline void *
583 trace_next_page(struct trace_array_cpu *data, void *addr)
584 {
585         struct list_head *next;
586         struct page *page;
587
588         page = virt_to_page(addr);
589
590         next = trace_next_list(data, &page->lru);
591         page = list_entry(next, struct page, lru);
592
593         return page_address(page);
594 }
595
596 static inline struct trace_entry *
597 tracing_get_trace_entry(struct trace_array *tr, struct trace_array_cpu *data)
598 {
599         unsigned long idx, idx_next;
600         struct trace_entry *entry;
601
602         data->trace_idx++;
603         idx = data->trace_head_idx;
604         idx_next = idx + 1;
605
606         BUG_ON(idx * TRACE_ENTRY_SIZE >= PAGE_SIZE);
607
608         entry = data->trace_head + idx * TRACE_ENTRY_SIZE;
609
610         if (unlikely(idx_next >= ENTRIES_PER_PAGE)) {
611                 data->trace_head = trace_next_page(data, data->trace_head);
612                 idx_next = 0;
613         }
614
615         if (data->trace_head == data->trace_tail &&
616             idx_next == data->trace_tail_idx) {
617                 /* overrun */
618                 data->trace_tail_idx++;
619                 if (data->trace_tail_idx >= ENTRIES_PER_PAGE) {
620                         data->trace_tail =
621                                 trace_next_page(data, data->trace_tail);
622                         data->trace_tail_idx = 0;
623                 }
624         }
625
626         data->trace_head_idx = idx_next;
627
628         return entry;
629 }
630
631 static inline void
632 tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags)
633 {
634         struct task_struct *tsk = current;
635         unsigned long pc;
636
637         pc = preempt_count();
638
639         entry->preempt_count    = pc & 0xff;
640         entry->pid              = tsk->pid;
641         entry->t                = ftrace_now(raw_smp_processor_id());
642         entry->flags = (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
643                 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
644                 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
645                 (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
646 }
647
648 void
649 trace_function(struct trace_array *tr, struct trace_array_cpu *data,
650                unsigned long ip, unsigned long parent_ip, unsigned long flags)
651 {
652         struct trace_entry *entry;
653         unsigned long irq_flags;
654
655         spin_lock_irqsave(&data->lock, irq_flags);
656         entry                   = tracing_get_trace_entry(tr, data);
657         tracing_generic_entry_update(entry, flags);
658         entry->type             = TRACE_FN;
659         entry->fn.ip            = ip;
660         entry->fn.parent_ip     = parent_ip;
661         spin_unlock_irqrestore(&data->lock, irq_flags);
662
663         trace_wake_up();
664 }
665
666 void
667 ftrace(struct trace_array *tr, struct trace_array_cpu *data,
668        unsigned long ip, unsigned long parent_ip, unsigned long flags)
669 {
670         if (likely(!atomic_read(&data->disabled)))
671                 trace_function(tr, data, ip, parent_ip, flags);
672 }
673
674 void
675 __trace_special(void *__tr, void *__data,
676                 unsigned long arg1, unsigned long arg2, unsigned long arg3)
677 {
678         struct trace_array_cpu *data = __data;
679         struct trace_array *tr = __tr;
680         struct trace_entry *entry;
681         unsigned long irq_flags;
682
683         spin_lock_irqsave(&data->lock, irq_flags);
684         entry                   = tracing_get_trace_entry(tr, data);
685         tracing_generic_entry_update(entry, 0);
686         entry->type             = TRACE_SPECIAL;
687         entry->special.arg1     = arg1;
688         entry->special.arg2     = arg2;
689         entry->special.arg3     = arg3;
690         spin_unlock_irqrestore(&data->lock, irq_flags);
691
692         trace_wake_up();
693 }
694
695 void __trace_stack(struct trace_array *tr,
696                    struct trace_array_cpu *data,
697                    unsigned long flags,
698                    int skip)
699 {
700         struct trace_entry *entry;
701         struct stack_trace trace;
702
703         if (!(trace_flags & TRACE_ITER_STACKTRACE))
704                 return;
705
706         entry                   = tracing_get_trace_entry(tr, data);
707         tracing_generic_entry_update(entry, flags);
708         entry->type             = TRACE_STACK;
709
710         memset(&entry->stack, 0, sizeof(entry->stack));
711
712         trace.nr_entries        = 0;
713         trace.max_entries       = FTRACE_STACK_ENTRIES;
714         trace.skip              = skip;
715         trace.entries           = entry->stack.caller;
716
717         save_stack_trace(&trace);
718 }
719
720 void
721 tracing_sched_switch_trace(struct trace_array *tr,
722                            struct trace_array_cpu *data,
723                            struct task_struct *prev,
724                            struct task_struct *next,
725                            unsigned long flags)
726 {
727         struct trace_entry *entry;
728         unsigned long irq_flags;
729
730         spin_lock_irqsave(&data->lock, irq_flags);
731         entry                   = tracing_get_trace_entry(tr, data);
732         tracing_generic_entry_update(entry, flags);
733         entry->type             = TRACE_CTX;
734         entry->ctx.prev_pid     = prev->pid;
735         entry->ctx.prev_prio    = prev->prio;
736         entry->ctx.prev_state   = prev->state;
737         entry->ctx.next_pid     = next->pid;
738         entry->ctx.next_prio    = next->prio;
739         __trace_stack(tr, data, flags, 4);
740         spin_unlock_irqrestore(&data->lock, irq_flags);
741 }
742
743 void
744 tracing_sched_wakeup_trace(struct trace_array *tr,
745                            struct trace_array_cpu *data,
746                            struct task_struct *wakee,
747                            struct task_struct *curr,
748                            unsigned long flags)
749 {
750         struct trace_entry *entry;
751         unsigned long irq_flags;
752
753         spin_lock_irqsave(&data->lock, irq_flags);
754         entry                   = tracing_get_trace_entry(tr, data);
755         tracing_generic_entry_update(entry, flags);
756         entry->type             = TRACE_WAKE;
757         entry->ctx.prev_pid     = curr->pid;
758         entry->ctx.prev_prio    = curr->prio;
759         entry->ctx.prev_state   = curr->state;
760         entry->ctx.next_pid     = wakee->pid;
761         entry->ctx.next_prio    = wakee->prio;
762         __trace_stack(tr, data, flags, 5);
763         spin_unlock_irqrestore(&data->lock, irq_flags);
764
765         trace_wake_up();
766 }
767
768 #ifdef CONFIG_FTRACE
769 static void
770 function_trace_call(unsigned long ip, unsigned long parent_ip)
771 {
772         struct trace_array *tr = &global_trace;
773         struct trace_array_cpu *data;
774         unsigned long flags;
775         long disabled;
776         int cpu;
777
778         if (unlikely(!tracer_enabled))
779                 return;
780
781         local_irq_save(flags);
782         cpu = raw_smp_processor_id();
783         data = tr->data[cpu];
784         disabled = atomic_inc_return(&data->disabled);
785
786         if (likely(disabled == 1))
787                 trace_function(tr, data, ip, parent_ip, flags);
788
789         atomic_dec(&data->disabled);
790         local_irq_restore(flags);
791 }
792
793 static struct ftrace_ops trace_ops __read_mostly =
794 {
795         .func = function_trace_call,
796 };
797
798 void tracing_start_function_trace(void)
799 {
800         register_ftrace_function(&trace_ops);
801 }
802
803 void tracing_stop_function_trace(void)
804 {
805         unregister_ftrace_function(&trace_ops);
806 }
807 #endif
808
809 enum trace_file_type {
810         TRACE_FILE_LAT_FMT      = 1,
811 };
812
813 static struct trace_entry *
814 trace_entry_idx(struct trace_array *tr, struct trace_array_cpu *data,
815                 struct trace_iterator *iter, int cpu)
816 {
817         struct page *page;
818         struct trace_entry *array;
819
820         if (iter->next_idx[cpu] >= tr->entries ||
821             iter->next_idx[cpu] >= data->trace_idx ||
822             (data->trace_head == data->trace_tail &&
823              data->trace_head_idx == data->trace_tail_idx))
824                 return NULL;
825
826         if (!iter->next_page[cpu]) {
827                 /* Initialize the iterator for this cpu trace buffer */
828                 WARN_ON(!data->trace_tail);
829                 page = virt_to_page(data->trace_tail);
830                 iter->next_page[cpu] = &page->lru;
831                 iter->next_page_idx[cpu] = data->trace_tail_idx;
832         }
833
834         page = list_entry(iter->next_page[cpu], struct page, lru);
835         BUG_ON(&data->trace_pages == &page->lru);
836
837         array = page_address(page);
838
839         WARN_ON(iter->next_page_idx[cpu] >= ENTRIES_PER_PAGE);
840         return &array[iter->next_page_idx[cpu]];
841 }
842
843 static struct trace_entry *
844 find_next_entry(struct trace_iterator *iter, int *ent_cpu)
845 {
846         struct trace_array *tr = iter->tr;
847         struct trace_entry *ent, *next = NULL;
848         int next_cpu = -1;
849         int cpu;
850
851         for_each_possible_cpu(cpu) {
852                 if (!head_page(tr->data[cpu]))
853                         continue;
854                 ent = trace_entry_idx(tr, tr->data[cpu], iter, cpu);
855                 /*
856                  * Pick the entry with the smallest timestamp:
857                  */
858                 if (ent && (!next || ent->t < next->t)) {
859                         next = ent;
860                         next_cpu = cpu;
861                 }
862         }
863
864         if (ent_cpu)
865                 *ent_cpu = next_cpu;
866
867         return next;
868 }
869
870 static void trace_iterator_increment(struct trace_iterator *iter)
871 {
872         iter->idx++;
873         iter->next_idx[iter->cpu]++;
874         iter->next_page_idx[iter->cpu]++;
875
876         if (iter->next_page_idx[iter->cpu] >= ENTRIES_PER_PAGE) {
877                 struct trace_array_cpu *data = iter->tr->data[iter->cpu];
878
879                 iter->next_page_idx[iter->cpu] = 0;
880                 iter->next_page[iter->cpu] =
881                         trace_next_list(data, iter->next_page[iter->cpu]);
882         }
883 }
884
885 static void trace_consume(struct trace_iterator *iter)
886 {
887         struct trace_array_cpu *data = iter->tr->data[iter->cpu];
888
889         data->trace_tail_idx++;
890         if (data->trace_tail_idx >= ENTRIES_PER_PAGE) {
891                 data->trace_tail = trace_next_page(data, data->trace_tail);
892                 data->trace_tail_idx = 0;
893         }
894
895         /* Check if we empty it, then reset the index */
896         if (data->trace_head == data->trace_tail &&
897             data->trace_head_idx == data->trace_tail_idx)
898                 data->trace_idx = 0;
899 }
900
901 static void *find_next_entry_inc(struct trace_iterator *iter)
902 {
903         struct trace_entry *next;
904         int next_cpu = -1;
905
906         next = find_next_entry(iter, &next_cpu);
907
908         iter->prev_ent = iter->ent;
909         iter->prev_cpu = iter->cpu;
910
911         iter->ent = next;
912         iter->cpu = next_cpu;
913
914         if (next)
915                 trace_iterator_increment(iter);
916
917         return next ? iter : NULL;
918 }
919
920 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
921 {
922         struct trace_iterator *iter = m->private;
923         void *last_ent = iter->ent;
924         int i = (int)*pos;
925         void *ent;
926
927         (*pos)++;
928
929         /* can't go backwards */
930         if (iter->idx > i)
931                 return NULL;
932
933         if (iter->idx < 0)
934                 ent = find_next_entry_inc(iter);
935         else
936                 ent = iter;
937
938         while (ent && iter->idx < i)
939                 ent = find_next_entry_inc(iter);
940
941         iter->pos = *pos;
942
943         if (last_ent && !ent)
944                 seq_puts(m, "\n\nvim:ft=help\n");
945
946         return ent;
947 }
948
949 static void *s_start(struct seq_file *m, loff_t *pos)
950 {
951         struct trace_iterator *iter = m->private;
952         void *p = NULL;
953         loff_t l = 0;
954         int i;
955
956         mutex_lock(&trace_types_lock);
957
958         if (!current_trace || current_trace != iter->trace)
959                 return NULL;
960
961         atomic_inc(&trace_record_cmdline_disabled);
962
963         /* let the tracer grab locks here if needed */
964         if (current_trace->start)
965                 current_trace->start(iter);
966
967         if (*pos != iter->pos) {
968                 iter->ent = NULL;
969                 iter->cpu = 0;
970                 iter->idx = -1;
971                 iter->prev_ent = NULL;
972                 iter->prev_cpu = -1;
973
974                 for_each_possible_cpu(i) {
975                         iter->next_idx[i] = 0;
976                         iter->next_page[i] = NULL;
977                 }
978
979                 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
980                         ;
981
982         } else {
983                 l = *pos - 1;
984                 p = s_next(m, p, &l);
985         }
986
987         return p;
988 }
989
990 static void s_stop(struct seq_file *m, void *p)
991 {
992         struct trace_iterator *iter = m->private;
993
994         atomic_dec(&trace_record_cmdline_disabled);
995
996         /* let the tracer release locks here if needed */
997         if (current_trace && current_trace == iter->trace && iter->trace->stop)
998                 iter->trace->stop(iter);
999
1000         mutex_unlock(&trace_types_lock);
1001 }
1002
1003 static int
1004 seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address)
1005 {
1006 #ifdef CONFIG_KALLSYMS
1007         char str[KSYM_SYMBOL_LEN];
1008
1009         kallsyms_lookup(address, NULL, NULL, NULL, str);
1010
1011         return trace_seq_printf(s, fmt, str);
1012 #endif
1013         return 1;
1014 }
1015
1016 static int
1017 seq_print_sym_offset(struct trace_seq *s, const char *fmt,
1018                      unsigned long address)
1019 {
1020 #ifdef CONFIG_KALLSYMS
1021         char str[KSYM_SYMBOL_LEN];
1022
1023         sprint_symbol(str, address);
1024         return trace_seq_printf(s, fmt, str);
1025 #endif
1026         return 1;
1027 }
1028
1029 #ifndef CONFIG_64BIT
1030 # define IP_FMT "%08lx"
1031 #else
1032 # define IP_FMT "%016lx"
1033 #endif
1034
1035 static int
1036 seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
1037 {
1038         int ret;
1039
1040         if (!ip)
1041                 return trace_seq_printf(s, "0");
1042
1043         if (sym_flags & TRACE_ITER_SYM_OFFSET)
1044                 ret = seq_print_sym_offset(s, "%s", ip);
1045         else
1046                 ret = seq_print_sym_short(s, "%s", ip);
1047
1048         if (!ret)
1049                 return 0;
1050
1051         if (sym_flags & TRACE_ITER_SYM_ADDR)
1052                 ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
1053         return ret;
1054 }
1055
1056 static void print_lat_help_header(struct seq_file *m)
1057 {
1058         seq_puts(m, "#                _------=> CPU#            \n");
1059         seq_puts(m, "#               / _-----=> irqs-off        \n");
1060         seq_puts(m, "#              | / _----=> need-resched    \n");
1061         seq_puts(m, "#              || / _---=> hardirq/softirq \n");
1062         seq_puts(m, "#              ||| / _--=> preempt-depth   \n");
1063         seq_puts(m, "#              |||| /                      \n");
1064         seq_puts(m, "#              |||||     delay             \n");
1065         seq_puts(m, "#  cmd     pid ||||| time  |   caller      \n");
1066         seq_puts(m, "#     \\   /    |||||   \\   |   /           \n");
1067 }
1068
1069 static void print_func_help_header(struct seq_file *m)
1070 {
1071         seq_puts(m, "#           TASK-PID   CPU#    TIMESTAMP  FUNCTION\n");
1072         seq_puts(m, "#              | |      |          |         |\n");
1073 }
1074
1075
1076 static void
1077 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
1078 {
1079         unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
1080         struct trace_array *tr = iter->tr;
1081         struct trace_array_cpu *data = tr->data[tr->cpu];
1082         struct tracer *type = current_trace;
1083         unsigned long total   = 0;
1084         unsigned long entries = 0;
1085         int cpu;
1086         const char *name = "preemption";
1087
1088         if (type)
1089                 name = type->name;
1090
1091         for_each_possible_cpu(cpu) {
1092                 if (head_page(tr->data[cpu])) {
1093                         total += tr->data[cpu]->trace_idx;
1094                         if (tr->data[cpu]->trace_idx > tr->entries)
1095                                 entries += tr->entries;
1096                         else
1097                                 entries += tr->data[cpu]->trace_idx;
1098                 }
1099         }
1100
1101         seq_printf(m, "%s latency trace v1.1.5 on %s\n",
1102                    name, UTS_RELEASE);
1103         seq_puts(m, "-----------------------------------"
1104                  "---------------------------------\n");
1105         seq_printf(m, " latency: %lu us, #%lu/%lu, CPU#%d |"
1106                    " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
1107                    nsecs_to_usecs(data->saved_latency),
1108                    entries,
1109                    total,
1110                    tr->cpu,
1111 #if defined(CONFIG_PREEMPT_NONE)
1112                    "server",
1113 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
1114                    "desktop",
1115 #elif defined(CONFIG_PREEMPT_DESKTOP)
1116                    "preempt",
1117 #else
1118                    "unknown",
1119 #endif
1120                    /* These are reserved for later use */
1121                    0, 0, 0, 0);
1122 #ifdef CONFIG_SMP
1123         seq_printf(m, " #P:%d)\n", num_online_cpus());
1124 #else
1125         seq_puts(m, ")\n");
1126 #endif
1127         seq_puts(m, "    -----------------\n");
1128         seq_printf(m, "    | task: %.16s-%d "
1129                    "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
1130                    data->comm, data->pid, data->uid, data->nice,
1131                    data->policy, data->rt_priority);
1132         seq_puts(m, "    -----------------\n");
1133
1134         if (data->critical_start) {
1135                 seq_puts(m, " => started at: ");
1136                 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
1137                 trace_print_seq(m, &iter->seq);
1138                 seq_puts(m, "\n => ended at:   ");
1139                 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
1140                 trace_print_seq(m, &iter->seq);
1141                 seq_puts(m, "\n");
1142         }
1143
1144         seq_puts(m, "\n");
1145 }
1146
1147 static void
1148 lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
1149 {
1150         int hardirq, softirq;
1151         char *comm;
1152
1153         comm = trace_find_cmdline(entry->pid);
1154
1155         trace_seq_printf(s, "%8.8s-%-5d ", comm, entry->pid);
1156         trace_seq_printf(s, "%d", cpu);
1157         trace_seq_printf(s, "%c%c",
1158                         (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' : '.',
1159                         ((entry->flags & TRACE_FLAG_NEED_RESCHED) ? 'N' : '.'));
1160
1161         hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
1162         softirq = entry->flags & TRACE_FLAG_SOFTIRQ;
1163         if (hardirq && softirq)
1164                 trace_seq_putc(s, 'H');
1165         else {
1166                 if (hardirq)
1167                         trace_seq_putc(s, 'h');
1168                 else {
1169                         if (softirq)
1170                                 trace_seq_putc(s, 's');
1171                         else
1172                                 trace_seq_putc(s, '.');
1173                 }
1174         }
1175
1176         if (entry->preempt_count)
1177                 trace_seq_printf(s, "%x", entry->preempt_count);
1178         else
1179                 trace_seq_puts(s, ".");
1180 }
1181
1182 unsigned long preempt_mark_thresh = 100;
1183
1184 static void
1185 lat_print_timestamp(struct trace_seq *s, unsigned long long abs_usecs,
1186                     unsigned long rel_usecs)
1187 {
1188         trace_seq_printf(s, " %4lldus", abs_usecs);
1189         if (rel_usecs > preempt_mark_thresh)
1190                 trace_seq_puts(s, "!: ");
1191         else if (rel_usecs > 1)
1192                 trace_seq_puts(s, "+: ");
1193         else
1194                 trace_seq_puts(s, " : ");
1195 }
1196
1197 static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
1198
1199 static int
1200 print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
1201 {
1202         struct trace_seq *s = &iter->seq;
1203         unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
1204         struct trace_entry *next_entry = find_next_entry(iter, NULL);
1205         unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE);
1206         struct trace_entry *entry = iter->ent;
1207         unsigned long abs_usecs;
1208         unsigned long rel_usecs;
1209         char *comm;
1210         int S;
1211         int i;
1212
1213         if (!next_entry)
1214                 next_entry = entry;
1215         rel_usecs = ns2usecs(next_entry->t - entry->t);
1216         abs_usecs = ns2usecs(entry->t - iter->tr->time_start);
1217
1218         if (verbose) {
1219                 comm = trace_find_cmdline(entry->pid);
1220                 trace_seq_printf(s, "%16s %5d %d %d %08x %08x [%08lx]"
1221                                  " %ld.%03ldms (+%ld.%03ldms): ",
1222                                  comm,
1223                                  entry->pid, cpu, entry->flags,
1224                                  entry->preempt_count, trace_idx,
1225                                  ns2usecs(entry->t),
1226                                  abs_usecs/1000,
1227                                  abs_usecs % 1000, rel_usecs/1000,
1228                                  rel_usecs % 1000);
1229         } else {
1230                 lat_print_generic(s, entry, cpu);
1231                 lat_print_timestamp(s, abs_usecs, rel_usecs);
1232         }
1233         switch (entry->type) {
1234         case TRACE_FN:
1235                 seq_print_ip_sym(s, entry->fn.ip, sym_flags);
1236                 trace_seq_puts(s, " (");
1237                 seq_print_ip_sym(s, entry->fn.parent_ip, sym_flags);
1238                 trace_seq_puts(s, ")\n");
1239                 break;
1240         case TRACE_CTX:
1241         case TRACE_WAKE:
1242                 S = entry->ctx.prev_state < sizeof(state_to_char) ?
1243                         state_to_char[entry->ctx.prev_state] : 'X';
1244                 comm = trace_find_cmdline(entry->ctx.next_pid);
1245                 trace_seq_printf(s, " %5d:%3d:%c %s %5d:%3d %s\n",
1246                                  entry->ctx.prev_pid,
1247                                  entry->ctx.prev_prio,
1248                                  S, entry->type == TRACE_CTX ? "==>" : "  +",
1249                                  entry->ctx.next_pid,
1250                                  entry->ctx.next_prio,
1251                                  comm);
1252                 break;
1253         case TRACE_SPECIAL:
1254                 trace_seq_printf(s, "# %ld %ld %ld\n",
1255                                  entry->special.arg1,
1256                                  entry->special.arg2,
1257                                  entry->special.arg3);
1258                 break;
1259         case TRACE_STACK:
1260                 for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
1261                         if (i)
1262                                 trace_seq_puts(s, " <= ");
1263                         seq_print_ip_sym(s, entry->stack.caller[i], sym_flags);
1264                 }
1265                 trace_seq_puts(s, "\n");
1266                 break;
1267         default:
1268                 trace_seq_printf(s, "Unknown type %d\n", entry->type);
1269         }
1270         return 1;
1271 }
1272
1273 static int print_trace_fmt(struct trace_iterator *iter)
1274 {
1275         struct trace_seq *s = &iter->seq;
1276         unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
1277         struct trace_entry *entry;
1278         unsigned long usec_rem;
1279         unsigned long long t;
1280         unsigned long secs;
1281         char *comm;
1282         int ret;
1283         int S;
1284         int i;
1285
1286         entry = iter->ent;
1287
1288         comm = trace_find_cmdline(iter->ent->pid);
1289
1290         t = ns2usecs(entry->t);
1291         usec_rem = do_div(t, 1000000ULL);
1292         secs = (unsigned long)t;
1293
1294         ret = trace_seq_printf(s, "%16s-%-5d ", comm, entry->pid);
1295         if (!ret)
1296                 return 0;
1297         ret = trace_seq_printf(s, "[%02d] ", iter->cpu);
1298         if (!ret)
1299                 return 0;
1300         ret = trace_seq_printf(s, "%5lu.%06lu: ", secs, usec_rem);
1301         if (!ret)
1302                 return 0;
1303
1304         switch (entry->type) {
1305         case TRACE_FN:
1306                 ret = seq_print_ip_sym(s, entry->fn.ip, sym_flags);
1307                 if (!ret)
1308                         return 0;
1309                 if ((sym_flags & TRACE_ITER_PRINT_PARENT) &&
1310                                                 entry->fn.parent_ip) {
1311                         ret = trace_seq_printf(s, " <-");
1312                         if (!ret)
1313                                 return 0;
1314                         ret = seq_print_ip_sym(s, entry->fn.parent_ip,
1315                                                sym_flags);
1316                         if (!ret)
1317                                 return 0;
1318                 }
1319                 ret = trace_seq_printf(s, "\n");
1320                 if (!ret)
1321                         return 0;
1322                 break;
1323         case TRACE_CTX:
1324         case TRACE_WAKE:
1325                 S = entry->ctx.prev_state < sizeof(state_to_char) ?
1326                         state_to_char[entry->ctx.prev_state] : 'X';
1327                 ret = trace_seq_printf(s, " %5d:%3d:%c %s %5d:%3d\n",
1328                                        entry->ctx.prev_pid,
1329                                        entry->ctx.prev_prio,
1330                                        S,
1331                                        entry->type == TRACE_CTX ? "==>" : "  +",
1332                                        entry->ctx.next_pid,
1333                                        entry->ctx.next_prio);
1334                 if (!ret)
1335                         return 0;
1336                 break;
1337         case TRACE_SPECIAL:
1338                 ret = trace_seq_printf(s, "# %ld %ld %ld\n",
1339                                  entry->special.arg1,
1340                                  entry->special.arg2,
1341                                  entry->special.arg3);
1342                 if (!ret)
1343                         return 0;
1344                 break;
1345         case TRACE_STACK:
1346                 for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
1347                         if (i) {
1348                                 ret = trace_seq_puts(s, " <= ");
1349                                 if (!ret)
1350                                         return 0;
1351                         }
1352                         ret = seq_print_ip_sym(s, entry->stack.caller[i],
1353                                                sym_flags);
1354                         if (!ret)
1355                                 return 0;
1356                 }
1357                 ret = trace_seq_puts(s, "\n");
1358                 if (!ret)
1359                         return 0;
1360                 break;
1361         }
1362         return 1;
1363 }
1364
1365 static int print_raw_fmt(struct trace_iterator *iter)
1366 {
1367         struct trace_seq *s = &iter->seq;
1368         struct trace_entry *entry;
1369         int ret;
1370         int S;
1371
1372         entry = iter->ent;
1373
1374         ret = trace_seq_printf(s, "%d %d %llu ",
1375                 entry->pid, iter->cpu, entry->t);
1376         if (!ret)
1377                 return 0;
1378
1379         switch (entry->type) {
1380         case TRACE_FN:
1381                 ret = trace_seq_printf(s, "%x %x\n",
1382                                         entry->fn.ip, entry->fn.parent_ip);
1383                 if (!ret)
1384                         return 0;
1385                 break;
1386         case TRACE_CTX:
1387         case TRACE_WAKE:
1388                 S = entry->ctx.prev_state < sizeof(state_to_char) ?
1389                         state_to_char[entry->ctx.prev_state] : 'X';
1390                 if (entry->type == TRACE_WAKE)
1391                         S = '+';
1392                 ret = trace_seq_printf(s, "%d %d %c %d %d\n",
1393                                        entry->ctx.prev_pid,
1394                                        entry->ctx.prev_prio,
1395                                        S,
1396                                        entry->ctx.next_pid,
1397                                        entry->ctx.next_prio);
1398                 if (!ret)
1399                         return 0;
1400                 break;
1401         case TRACE_SPECIAL:
1402         case TRACE_STACK:
1403                 ret = trace_seq_printf(s, "# %ld %ld %ld\n",
1404                                  entry->special.arg1,
1405                                  entry->special.arg2,
1406                                  entry->special.arg3);
1407                 if (!ret)
1408                         return 0;
1409                 break;
1410         }
1411         return 1;
1412 }
1413
1414 #define SEQ_PUT_FIELD_RET(s, x)                         \
1415 do {                                                    \
1416         if (!trace_seq_putmem(s, &(x), sizeof(x)))      \
1417                 return 0;                               \
1418 } while (0)
1419
1420 #define SEQ_PUT_HEX_FIELD_RET(s, x)                     \
1421 do {                                                    \
1422         if (!trace_seq_putmem_hex(s, &(x), sizeof(x)))  \
1423                 return 0;                               \
1424 } while (0)
1425
1426 static int print_hex_fmt(struct trace_iterator *iter)
1427 {
1428         struct trace_seq *s = &iter->seq;
1429         unsigned char newline = '\n';
1430         struct trace_entry *entry;
1431         int S;
1432
1433         entry = iter->ent;
1434
1435         SEQ_PUT_HEX_FIELD_RET(s, entry->pid);
1436         SEQ_PUT_HEX_FIELD_RET(s, iter->cpu);
1437         SEQ_PUT_HEX_FIELD_RET(s, entry->t);
1438
1439         switch (entry->type) {
1440         case TRACE_FN:
1441                 SEQ_PUT_HEX_FIELD_RET(s, entry->fn.ip);
1442                 SEQ_PUT_HEX_FIELD_RET(s, entry->fn.parent_ip);
1443                 break;
1444         case TRACE_CTX:
1445         case TRACE_WAKE:
1446                 S = entry->ctx.prev_state < sizeof(state_to_char) ?
1447                         state_to_char[entry->ctx.prev_state] : 'X';
1448                 if (entry->type == TRACE_WAKE)
1449                         S = '+';
1450                 SEQ_PUT_HEX_FIELD_RET(s, entry->ctx.prev_pid);
1451                 SEQ_PUT_HEX_FIELD_RET(s, entry->ctx.prev_prio);
1452                 SEQ_PUT_HEX_FIELD_RET(s, S);
1453                 SEQ_PUT_HEX_FIELD_RET(s, entry->ctx.next_pid);
1454                 SEQ_PUT_HEX_FIELD_RET(s, entry->ctx.next_prio);
1455                 SEQ_PUT_HEX_FIELD_RET(s, entry->fn.parent_ip);
1456                 break;
1457         case TRACE_SPECIAL:
1458         case TRACE_STACK:
1459                 SEQ_PUT_HEX_FIELD_RET(s, entry->special.arg1);
1460                 SEQ_PUT_HEX_FIELD_RET(s, entry->special.arg2);
1461                 SEQ_PUT_HEX_FIELD_RET(s, entry->special.arg3);
1462                 break;
1463         }
1464         SEQ_PUT_FIELD_RET(s, newline);
1465
1466         return 1;
1467 }
1468
1469 static int print_bin_fmt(struct trace_iterator *iter)
1470 {
1471         struct trace_seq *s = &iter->seq;
1472         struct trace_entry *entry;
1473
1474         entry = iter->ent;
1475
1476         SEQ_PUT_FIELD_RET(s, entry->pid);
1477         SEQ_PUT_FIELD_RET(s, entry->cpu);
1478         SEQ_PUT_FIELD_RET(s, entry->t);
1479
1480         switch (entry->type) {
1481         case TRACE_FN:
1482                 SEQ_PUT_FIELD_RET(s, entry->fn.ip);
1483                 SEQ_PUT_FIELD_RET(s, entry->fn.parent_ip);
1484                 break;
1485         case TRACE_CTX:
1486                 SEQ_PUT_FIELD_RET(s, entry->ctx.prev_pid);
1487                 SEQ_PUT_FIELD_RET(s, entry->ctx.prev_prio);
1488                 SEQ_PUT_FIELD_RET(s, entry->ctx.prev_state);
1489                 SEQ_PUT_FIELD_RET(s, entry->ctx.next_pid);
1490                 SEQ_PUT_FIELD_RET(s, entry->ctx.next_prio);
1491                 break;
1492         case TRACE_SPECIAL:
1493         case TRACE_STACK:
1494                 SEQ_PUT_FIELD_RET(s, entry->special.arg1);
1495                 SEQ_PUT_FIELD_RET(s, entry->special.arg2);
1496                 SEQ_PUT_FIELD_RET(s, entry->special.arg3);
1497                 break;
1498         }
1499         return 1;
1500 }
1501
1502 static int trace_empty(struct trace_iterator *iter)
1503 {
1504         struct trace_array_cpu *data;
1505         int cpu;
1506
1507         for_each_possible_cpu(cpu) {
1508                 data = iter->tr->data[cpu];
1509
1510                 if (head_page(data) && data->trace_idx &&
1511                     (data->trace_tail != data->trace_head ||
1512                      data->trace_tail_idx != data->trace_head_idx))
1513                         return 0;
1514         }
1515         return 1;
1516 }
1517
1518 static int print_trace_line(struct trace_iterator *iter)
1519 {
1520         if (trace_flags & TRACE_ITER_BIN)
1521                 return print_bin_fmt(iter);
1522
1523         if (trace_flags & TRACE_ITER_HEX)
1524                 return print_hex_fmt(iter);
1525
1526         if (trace_flags & TRACE_ITER_RAW)
1527                 return print_raw_fmt(iter);
1528
1529         if (iter->iter_flags & TRACE_FILE_LAT_FMT)
1530                 return print_lat_fmt(iter, iter->idx, iter->cpu);
1531
1532         return print_trace_fmt(iter);
1533 }
1534
1535 static int s_show(struct seq_file *m, void *v)
1536 {
1537         struct trace_iterator *iter = v;
1538
1539         if (iter->ent == NULL) {
1540                 if (iter->tr) {
1541                         seq_printf(m, "# tracer: %s\n", iter->trace->name);
1542                         seq_puts(m, "#\n");
1543                 }
1544                 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
1545                         /* print nothing if the buffers are empty */
1546                         if (trace_empty(iter))
1547                                 return 0;
1548                         print_trace_header(m, iter);
1549                         if (!(trace_flags & TRACE_ITER_VERBOSE))
1550                                 print_lat_help_header(m);
1551                 } else {
1552                         if (!(trace_flags & TRACE_ITER_VERBOSE))
1553                                 print_func_help_header(m);
1554                 }
1555         } else {
1556                 print_trace_line(iter);
1557                 trace_print_seq(m, &iter->seq);
1558         }
1559
1560         return 0;
1561 }
1562
1563 static struct seq_operations tracer_seq_ops = {
1564         .start          = s_start,
1565         .next           = s_next,
1566         .stop           = s_stop,
1567         .show           = s_show,
1568 };
1569
1570 static struct trace_iterator *
1571 __tracing_open(struct inode *inode, struct file *file, int *ret)
1572 {
1573         struct trace_iterator *iter;
1574
1575         if (tracing_disabled) {
1576                 *ret = -ENODEV;
1577                 return NULL;
1578         }
1579
1580         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1581         if (!iter) {
1582                 *ret = -ENOMEM;
1583                 goto out;
1584         }
1585
1586         mutex_lock(&trace_types_lock);
1587         if (current_trace && current_trace->print_max)
1588                 iter->tr = &max_tr;
1589         else
1590                 iter->tr = inode->i_private;
1591         iter->trace = current_trace;
1592         iter->pos = -1;
1593
1594         /* TODO stop tracer */
1595         *ret = seq_open(file, &tracer_seq_ops);
1596         if (!*ret) {
1597                 struct seq_file *m = file->private_data;
1598                 m->private = iter;
1599
1600                 /* stop the trace while dumping */
1601                 if (iter->tr->ctrl)
1602                         tracer_enabled = 0;
1603
1604                 if (iter->trace && iter->trace->open)
1605                         iter->trace->open(iter);
1606         } else {
1607                 kfree(iter);
1608                 iter = NULL;
1609         }
1610         mutex_unlock(&trace_types_lock);
1611
1612  out:
1613         return iter;
1614 }
1615
1616 int tracing_open_generic(struct inode *inode, struct file *filp)
1617 {
1618         if (tracing_disabled)
1619                 return -ENODEV;
1620
1621         filp->private_data = inode->i_private;
1622         return 0;
1623 }
1624
1625 int tracing_release(struct inode *inode, struct file *file)
1626 {
1627         struct seq_file *m = (struct seq_file *)file->private_data;
1628         struct trace_iterator *iter = m->private;
1629
1630         mutex_lock(&trace_types_lock);
1631         if (iter->trace && iter->trace->close)
1632                 iter->trace->close(iter);
1633
1634         /* reenable tracing if it was previously enabled */
1635         if (iter->tr->ctrl)
1636                 tracer_enabled = 1;
1637         mutex_unlock(&trace_types_lock);
1638
1639         seq_release(inode, file);
1640         kfree(iter);
1641         return 0;
1642 }
1643
1644 static int tracing_open(struct inode *inode, struct file *file)
1645 {
1646         int ret;
1647
1648         __tracing_open(inode, file, &ret);
1649
1650         return ret;
1651 }
1652
1653 static int tracing_lt_open(struct inode *inode, struct file *file)
1654 {
1655         struct trace_iterator *iter;
1656         int ret;
1657
1658         iter = __tracing_open(inode, file, &ret);
1659
1660         if (!ret)
1661                 iter->iter_flags |= TRACE_FILE_LAT_FMT;
1662
1663         return ret;
1664 }
1665
1666
1667 static void *
1668 t_next(struct seq_file *m, void *v, loff_t *pos)
1669 {
1670         struct tracer *t = m->private;
1671
1672         (*pos)++;
1673
1674         if (t)
1675                 t = t->next;
1676
1677         m->private = t;
1678
1679         return t;
1680 }
1681
1682 static void *t_start(struct seq_file *m, loff_t *pos)
1683 {
1684         struct tracer *t = m->private;
1685         loff_t l = 0;
1686
1687         mutex_lock(&trace_types_lock);
1688         for (; t && l < *pos; t = t_next(m, t, &l))
1689                 ;
1690
1691         return t;
1692 }
1693
1694 static void t_stop(struct seq_file *m, void *p)
1695 {
1696         mutex_unlock(&trace_types_lock);
1697 }
1698
1699 static int t_show(struct seq_file *m, void *v)
1700 {
1701         struct tracer *t = v;
1702
1703         if (!t)
1704                 return 0;
1705
1706         seq_printf(m, "%s", t->name);
1707         if (t->next)
1708                 seq_putc(m, ' ');
1709         else
1710                 seq_putc(m, '\n');
1711
1712         return 0;
1713 }
1714
1715 static struct seq_operations show_traces_seq_ops = {
1716         .start          = t_start,
1717         .next           = t_next,
1718         .stop           = t_stop,
1719         .show           = t_show,
1720 };
1721
1722 static int show_traces_open(struct inode *inode, struct file *file)
1723 {
1724         int ret;
1725
1726         if (tracing_disabled)
1727                 return -ENODEV;
1728
1729         ret = seq_open(file, &show_traces_seq_ops);
1730         if (!ret) {
1731                 struct seq_file *m = file->private_data;
1732                 m->private = trace_types;
1733         }
1734
1735         return ret;
1736 }
1737
1738 static struct file_operations tracing_fops = {
1739         .open           = tracing_open,
1740         .read           = seq_read,
1741         .llseek         = seq_lseek,
1742         .release        = tracing_release,
1743 };
1744
1745 static struct file_operations tracing_lt_fops = {
1746         .open           = tracing_lt_open,
1747         .read           = seq_read,
1748         .llseek         = seq_lseek,
1749         .release        = tracing_release,
1750 };
1751
1752 static struct file_operations show_traces_fops = {
1753         .open           = show_traces_open,
1754         .read           = seq_read,
1755         .release        = seq_release,
1756 };
1757
1758 /*
1759  * Only trace on a CPU if the bitmask is set:
1760  */
1761 static cpumask_t tracing_cpumask = CPU_MASK_ALL;
1762
1763 /*
1764  * When tracing/tracing_cpu_mask is modified then this holds
1765  * the new bitmask we are about to install:
1766  */
1767 static cpumask_t tracing_cpumask_new;
1768
1769 /*
1770  * The tracer itself will not take this lock, but still we want
1771  * to provide a consistent cpumask to user-space:
1772  */
1773 static DEFINE_MUTEX(tracing_cpumask_update_lock);
1774
1775 /*
1776  * Temporary storage for the character representation of the
1777  * CPU bitmask (and one more byte for the newline):
1778  */
1779 static char mask_str[NR_CPUS + 1];
1780
1781 static ssize_t
1782 tracing_cpumask_read(struct file *filp, char __user *ubuf,
1783                      size_t count, loff_t *ppos)
1784 {
1785         int len;
1786
1787         mutex_lock(&tracing_cpumask_update_lock);
1788
1789         len = cpumask_scnprintf(mask_str, count, tracing_cpumask);
1790         if (count - len < 2) {
1791                 count = -EINVAL;
1792                 goto out_err;
1793         }
1794         len += sprintf(mask_str + len, "\n");
1795         count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
1796
1797 out_err:
1798         mutex_unlock(&tracing_cpumask_update_lock);
1799
1800         return count;
1801 }
1802
1803 static ssize_t
1804 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
1805                       size_t count, loff_t *ppos)
1806 {
1807         int err, cpu;
1808
1809         mutex_lock(&tracing_cpumask_update_lock);
1810         err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
1811         if (err)
1812                 goto err_unlock;
1813
1814         spin_lock_irq(&ftrace_max_lock);
1815         for_each_possible_cpu(cpu) {
1816                 /*
1817                  * Increase/decrease the disabled counter if we are
1818                  * about to flip a bit in the cpumask:
1819                  */
1820                 if (cpu_isset(cpu, tracing_cpumask) &&
1821                                 !cpu_isset(cpu, tracing_cpumask_new)) {
1822                         atomic_inc(&global_trace.data[cpu]->disabled);
1823                 }
1824                 if (!cpu_isset(cpu, tracing_cpumask) &&
1825                                 cpu_isset(cpu, tracing_cpumask_new)) {
1826                         atomic_dec(&global_trace.data[cpu]->disabled);
1827                 }
1828         }
1829         spin_unlock_irq(&ftrace_max_lock);
1830
1831         tracing_cpumask = tracing_cpumask_new;
1832
1833         mutex_unlock(&tracing_cpumask_update_lock);
1834
1835         return count;
1836
1837 err_unlock:
1838         mutex_unlock(&tracing_cpumask_update_lock);
1839
1840         return err;
1841 }
1842
1843 static struct file_operations tracing_cpumask_fops = {
1844         .open           = tracing_open_generic,
1845         .read           = tracing_cpumask_read,
1846         .write          = tracing_cpumask_write,
1847 };
1848
1849 static ssize_t
1850 tracing_iter_ctrl_read(struct file *filp, char __user *ubuf,
1851                        size_t cnt, loff_t *ppos)
1852 {
1853         char *buf;
1854         int r = 0;
1855         int len = 0;
1856         int i;
1857
1858         /* calulate max size */
1859         for (i = 0; trace_options[i]; i++) {
1860                 len += strlen(trace_options[i]);
1861                 len += 3; /* "no" and space */
1862         }
1863
1864         /* +2 for \n and \0 */
1865         buf = kmalloc(len + 2, GFP_KERNEL);
1866         if (!buf)
1867                 return -ENOMEM;
1868
1869         for (i = 0; trace_options[i]; i++) {
1870                 if (trace_flags & (1 << i))
1871                         r += sprintf(buf + r, "%s ", trace_options[i]);
1872                 else
1873                         r += sprintf(buf + r, "no%s ", trace_options[i]);
1874         }
1875
1876         r += sprintf(buf + r, "\n");
1877         WARN_ON(r >= len + 2);
1878
1879         r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1880
1881         kfree(buf);
1882
1883         return r;
1884 }
1885
1886 static ssize_t
1887 tracing_iter_ctrl_write(struct file *filp, const char __user *ubuf,
1888                         size_t cnt, loff_t *ppos)
1889 {
1890         char buf[64];
1891         char *cmp = buf;
1892         int neg = 0;
1893         int i;
1894
1895         if (cnt > 63)
1896                 cnt = 63;
1897
1898         if (copy_from_user(&buf, ubuf, cnt))
1899                 return -EFAULT;
1900
1901         buf[cnt] = 0;
1902
1903         if (strncmp(buf, "no", 2) == 0) {
1904                 neg = 1;
1905                 cmp += 2;
1906         }
1907
1908         for (i = 0; trace_options[i]; i++) {
1909                 int len = strlen(trace_options[i]);
1910
1911                 if (strncmp(cmp, trace_options[i], len) == 0) {
1912                         if (neg)
1913                                 trace_flags &= ~(1 << i);
1914                         else
1915                                 trace_flags |= (1 << i);
1916                         break;
1917                 }
1918         }
1919         /*
1920          * If no option could be set, return an error:
1921          */
1922         if (!trace_options[i])
1923                 return -EINVAL;
1924
1925         filp->f_pos += cnt;
1926
1927         return cnt;
1928 }
1929
1930 static struct file_operations tracing_iter_fops = {
1931         .open           = tracing_open_generic,
1932         .read           = tracing_iter_ctrl_read,
1933         .write          = tracing_iter_ctrl_write,
1934 };
1935
1936 static const char readme_msg[] =
1937         "tracing mini-HOWTO:\n\n"
1938         "# mkdir /debug\n"
1939         "# mount -t debugfs nodev /debug\n\n"
1940         "# cat /debug/tracing/available_tracers\n"
1941         "wakeup preemptirqsoff preemptoff irqsoff ftrace sched_switch none\n\n"
1942         "# cat /debug/tracing/current_tracer\n"
1943         "none\n"
1944         "# echo sched_switch > /debug/tracing/current_tracer\n"
1945         "# cat /debug/tracing/current_tracer\n"
1946         "sched_switch\n"
1947         "# cat /debug/tracing/iter_ctrl\n"
1948         "noprint-parent nosym-offset nosym-addr noverbose\n"
1949         "# echo print-parent > /debug/tracing/iter_ctrl\n"
1950         "# echo 1 > /debug/tracing/tracing_enabled\n"
1951         "# cat /debug/tracing/trace > /tmp/trace.txt\n"
1952         "echo 0 > /debug/tracing/tracing_enabled\n"
1953 ;
1954
1955 static ssize_t
1956 tracing_readme_read(struct file *filp, char __user *ubuf,
1957                        size_t cnt, loff_t *ppos)
1958 {
1959         return simple_read_from_buffer(ubuf, cnt, ppos,
1960                                         readme_msg, strlen(readme_msg));
1961 }
1962
1963 static struct file_operations tracing_readme_fops = {
1964         .open           = tracing_open_generic,
1965         .read           = tracing_readme_read,
1966 };
1967
1968 static ssize_t
1969 tracing_ctrl_read(struct file *filp, char __user *ubuf,
1970                   size_t cnt, loff_t *ppos)
1971 {
1972         struct trace_array *tr = filp->private_data;
1973         char buf[64];
1974         int r;
1975
1976         r = sprintf(buf, "%ld\n", tr->ctrl);
1977         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1978 }
1979
1980 static ssize_t
1981 tracing_ctrl_write(struct file *filp, const char __user *ubuf,
1982                    size_t cnt, loff_t *ppos)
1983 {
1984         struct trace_array *tr = filp->private_data;
1985         long val;
1986         char buf[64];
1987
1988         if (cnt > 63)
1989                 cnt = 63;
1990
1991         if (copy_from_user(&buf, ubuf, cnt))
1992                 return -EFAULT;
1993
1994         buf[cnt] = 0;
1995
1996         val = simple_strtoul(buf, NULL, 10);
1997
1998         val = !!val;
1999
2000         mutex_lock(&trace_types_lock);
2001         if (tr->ctrl ^ val) {
2002                 if (val)
2003                         tracer_enabled = 1;
2004                 else
2005                         tracer_enabled = 0;
2006
2007                 tr->ctrl = val;
2008
2009                 if (current_trace && current_trace->ctrl_update)
2010                         current_trace->ctrl_update(tr);
2011         }
2012         mutex_unlock(&trace_types_lock);
2013
2014         filp->f_pos += cnt;
2015
2016         return cnt;
2017 }
2018
2019 static ssize_t
2020 tracing_set_trace_read(struct file *filp, char __user *ubuf,
2021                        size_t cnt, loff_t *ppos)
2022 {
2023         char buf[max_tracer_type_len+2];
2024         int r;
2025
2026         mutex_lock(&trace_types_lock);
2027         if (current_trace)
2028                 r = sprintf(buf, "%s\n", current_trace->name);
2029         else
2030                 r = sprintf(buf, "\n");
2031         mutex_unlock(&trace_types_lock);
2032
2033         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2034 }
2035
2036 static ssize_t
2037 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
2038                         size_t cnt, loff_t *ppos)
2039 {
2040         struct trace_array *tr = &global_trace;
2041         struct tracer *t;
2042         char buf[max_tracer_type_len+1];
2043         int i;
2044
2045         if (cnt > max_tracer_type_len)
2046                 cnt = max_tracer_type_len;
2047
2048         if (copy_from_user(&buf, ubuf, cnt))
2049                 return -EFAULT;
2050
2051         buf[cnt] = 0;
2052
2053         /* strip ending whitespace. */
2054         for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
2055                 buf[i] = 0;
2056
2057         mutex_lock(&trace_types_lock);
2058         for (t = trace_types; t; t = t->next) {
2059                 if (strcmp(t->name, buf) == 0)
2060                         break;
2061         }
2062         if (!t || t == current_trace)
2063                 goto out;
2064
2065         if (current_trace && current_trace->reset)
2066                 current_trace->reset(tr);
2067
2068         current_trace = t;
2069         if (t->init)
2070                 t->init(tr);
2071
2072  out:
2073         mutex_unlock(&trace_types_lock);
2074
2075         filp->f_pos += cnt;
2076
2077         return cnt;
2078 }
2079
2080 static ssize_t
2081 tracing_max_lat_read(struct file *filp, char __user *ubuf,
2082                      size_t cnt, loff_t *ppos)
2083 {
2084         unsigned long *ptr = filp->private_data;
2085         char buf[64];
2086         int r;
2087
2088         r = snprintf(buf, 64, "%ld\n",
2089                      *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
2090         if (r > 64)
2091                 r = 64;
2092         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2093 }
2094
2095 static ssize_t
2096 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
2097                       size_t cnt, loff_t *ppos)
2098 {
2099         long *ptr = filp->private_data;
2100         long val;
2101         char buf[64];
2102
2103         if (cnt > 63)
2104                 cnt = 63;
2105
2106         if (copy_from_user(&buf, ubuf, cnt))
2107                 return -EFAULT;
2108
2109         buf[cnt] = 0;
2110
2111         val = simple_strtoul(buf, NULL, 10);
2112
2113         *ptr = val * 1000;
2114
2115         return cnt;
2116 }
2117
2118 static atomic_t tracing_reader;
2119
2120 static int tracing_open_pipe(struct inode *inode, struct file *filp)
2121 {
2122         struct trace_iterator *iter;
2123
2124         if (tracing_disabled)
2125                 return -ENODEV;
2126
2127         /* We only allow for reader of the pipe */
2128         if (atomic_inc_return(&tracing_reader) != 1) {
2129                 atomic_dec(&tracing_reader);
2130                 return -EBUSY;
2131         }
2132
2133         /* create a buffer to store the information to pass to userspace */
2134         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2135         if (!iter)
2136                 return -ENOMEM;
2137
2138         iter->tr = &global_trace;
2139
2140         filp->private_data = iter;
2141
2142         return 0;
2143 }
2144
2145 static int tracing_release_pipe(struct inode *inode, struct file *file)
2146 {
2147         struct trace_iterator *iter = file->private_data;
2148
2149         kfree(iter);
2150         atomic_dec(&tracing_reader);
2151
2152         return 0;
2153 }
2154
2155 static unsigned int
2156 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
2157 {
2158         struct trace_iterator *iter = filp->private_data;
2159
2160         if (trace_flags & TRACE_ITER_BLOCK) {
2161                 /*
2162                  * Always select as readable when in blocking mode
2163                  */
2164                 return POLLIN | POLLRDNORM;
2165         }
2166         else {
2167                 if (!trace_empty(iter))
2168                         return POLLIN | POLLRDNORM;
2169                 poll_wait(filp, &trace_wait, poll_table);
2170                 if (!trace_empty(iter))
2171                         return POLLIN | POLLRDNORM;
2172
2173                 return 0;
2174         }
2175 }
2176
2177 /*
2178  * Consumer reader.
2179  */
2180 static ssize_t
2181 tracing_read_pipe(struct file *filp, char __user *ubuf,
2182                   size_t cnt, loff_t *ppos)
2183 {
2184         struct trace_iterator *iter = filp->private_data;
2185         struct trace_array_cpu *data;
2186         static cpumask_t mask;
2187         static int start;
2188         unsigned long flags;
2189 #ifdef CONFIG_FTRACE
2190         int ftrace_save;
2191 #endif
2192         int read = 0;
2193         int cpu;
2194         int len;
2195         int ret;
2196
2197         /* return any leftover data */
2198         if (iter->seq.len > start) {
2199                 len = iter->seq.len - start;
2200                 if (cnt > len)
2201                         cnt = len;
2202                 ret = copy_to_user(ubuf, iter->seq.buffer + start, cnt);
2203                 if (ret)
2204                         cnt = -EFAULT;
2205
2206                 start += len;
2207
2208                 return cnt;
2209         }
2210
2211         trace_seq_reset(&iter->seq);
2212         start = 0;
2213
2214         while (trace_empty(iter)) {
2215                 if (!(trace_flags & TRACE_ITER_BLOCK))
2216                         return -EWOULDBLOCK;
2217                 /*
2218                  * This is a make-shift waitqueue. The reason we don't use
2219                  * an actual wait queue is because:
2220                  *  1) we only ever have one waiter
2221                  *  2) the tracing, traces all functions, we don't want
2222                  *     the overhead of calling wake_up and friends
2223                  *     (and tracing them too)
2224                  *     Anyway, this is really very primitive wakeup.
2225                  */
2226                 set_current_state(TASK_INTERRUPTIBLE);
2227                 iter->tr->waiter = current;
2228
2229                 /* sleep for one second, and try again. */
2230                 schedule_timeout(HZ);
2231
2232                 iter->tr->waiter = NULL;
2233
2234                 if (signal_pending(current))
2235                         return -EINTR;
2236
2237                 /*
2238                  * We block until we read something and tracing is disabled.
2239                  * We still block if tracing is disabled, but we have never
2240                  * read anything. This allows a user to cat this file, and
2241                  * then enable tracing. But after we have read something,
2242                  * we give an EOF when tracing is again disabled.
2243                  *
2244                  * iter->pos will be 0 if we haven't read anything.
2245                  */
2246                 if (!tracer_enabled && iter->pos)
2247                         break;
2248
2249                 continue;
2250         }
2251
2252         /* stop when tracing is finished */
2253         if (trace_empty(iter))
2254                 return 0;
2255
2256         if (cnt >= PAGE_SIZE)
2257                 cnt = PAGE_SIZE - 1;
2258
2259         memset(iter, 0, sizeof(*iter));
2260         iter->tr = &global_trace;
2261         iter->pos = -1;
2262
2263         /*
2264          * We need to stop all tracing on all CPUS to read the
2265          * the next buffer. This is a bit expensive, but is
2266          * not done often. We fill all what we can read,
2267          * and then release the locks again.
2268          */
2269
2270         cpus_clear(mask);
2271         local_irq_save(flags);
2272 #ifdef CONFIG_FTRACE
2273         ftrace_save = ftrace_enabled;
2274         ftrace_enabled = 0;
2275 #endif
2276         smp_wmb();
2277         for_each_possible_cpu(cpu) {
2278                 data = iter->tr->data[cpu];
2279
2280                 if (!head_page(data) || !data->trace_idx)
2281                         continue;
2282
2283                 atomic_inc(&data->disabled);
2284                 cpu_set(cpu, mask);
2285         }
2286
2287         for_each_cpu_mask(cpu, mask) {
2288                 data = iter->tr->data[cpu];
2289                 spin_lock(&data->lock);
2290         }
2291
2292         while (find_next_entry_inc(iter) != NULL) {
2293                 int len = iter->seq.len;
2294
2295                 ret = print_trace_line(iter);
2296                 if (!ret) {
2297                         /* don't print partial lines */
2298                         iter->seq.len = len;
2299                         break;
2300                 }
2301
2302                 trace_consume(iter);
2303
2304                 if (iter->seq.len >= cnt)
2305                         break;
2306         }
2307
2308         for_each_cpu_mask(cpu, mask) {
2309                 data = iter->tr->data[cpu];
2310                 spin_unlock(&data->lock);
2311         }
2312
2313         for_each_cpu_mask(cpu, mask) {
2314                 data = iter->tr->data[cpu];
2315                 atomic_dec(&data->disabled);
2316         }
2317 #ifdef CONFIG_FTRACE
2318         ftrace_enabled = ftrace_save;
2319 #endif
2320         local_irq_restore(flags);
2321
2322         /* Now copy what we have to the user */
2323         read = iter->seq.len;
2324         if (read > cnt)
2325                 read = cnt;
2326
2327         ret = copy_to_user(ubuf, iter->seq.buffer, read);
2328
2329         if (read < iter->seq.len)
2330                 start = read;
2331         else
2332                 trace_seq_reset(&iter->seq);
2333
2334         if (ret)
2335                 read = -EFAULT;
2336
2337         return read;
2338 }
2339
2340 static struct file_operations tracing_max_lat_fops = {
2341         .open           = tracing_open_generic,
2342         .read           = tracing_max_lat_read,
2343         .write          = tracing_max_lat_write,
2344 };
2345
2346 static struct file_operations tracing_ctrl_fops = {
2347         .open           = tracing_open_generic,
2348         .read           = tracing_ctrl_read,
2349         .write          = tracing_ctrl_write,
2350 };
2351
2352 static struct file_operations set_tracer_fops = {
2353         .open           = tracing_open_generic,
2354         .read           = tracing_set_trace_read,
2355         .write          = tracing_set_trace_write,
2356 };
2357
2358 static struct file_operations tracing_pipe_fops = {
2359         .open           = tracing_open_pipe,
2360         .poll           = tracing_poll_pipe,
2361         .read           = tracing_read_pipe,
2362         .release        = tracing_release_pipe,
2363 };
2364
2365 #ifdef CONFIG_DYNAMIC_FTRACE
2366
2367 static ssize_t
2368 tracing_read_long(struct file *filp, char __user *ubuf,
2369                   size_t cnt, loff_t *ppos)
2370 {
2371         unsigned long *p = filp->private_data;
2372         char buf[64];
2373         int r;
2374
2375         r = sprintf(buf, "%ld\n", *p);
2376
2377         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2378 }
2379
2380 static struct file_operations tracing_read_long_fops = {
2381         .open           = tracing_open_generic,
2382         .read           = tracing_read_long,
2383 };
2384 #endif
2385
2386 static struct dentry *d_tracer;
2387
2388 struct dentry *tracing_init_dentry(void)
2389 {
2390         static int once;
2391
2392         if (d_tracer)
2393                 return d_tracer;
2394
2395         d_tracer = debugfs_create_dir("tracing", NULL);
2396
2397         if (!d_tracer && !once) {
2398                 once = 1;
2399                 pr_warning("Could not create debugfs directory 'tracing'\n");
2400                 return NULL;
2401         }
2402
2403         return d_tracer;
2404 }
2405
2406 #ifdef CONFIG_FTRACE_SELFTEST
2407 /* Let selftest have access to static functions in this file */
2408 #include "trace_selftest.c"
2409 #endif
2410
2411 static __init void tracer_init_debugfs(void)
2412 {
2413         struct dentry *d_tracer;
2414         struct dentry *entry;
2415
2416         d_tracer = tracing_init_dentry();
2417
2418         entry = debugfs_create_file("tracing_enabled", 0644, d_tracer,
2419                                     &global_trace, &tracing_ctrl_fops);
2420         if (!entry)
2421                 pr_warning("Could not create debugfs 'tracing_enabled' entry\n");
2422
2423         entry = debugfs_create_file("iter_ctrl", 0644, d_tracer,
2424                                     NULL, &tracing_iter_fops);
2425         if (!entry)
2426                 pr_warning("Could not create debugfs 'iter_ctrl' entry\n");
2427
2428         entry = debugfs_create_file("tracing_cpumask", 0644, d_tracer,
2429                                     NULL, &tracing_cpumask_fops);
2430         if (!entry)
2431                 pr_warning("Could not create debugfs 'tracing_cpumask' entry\n");
2432
2433         entry = debugfs_create_file("latency_trace", 0444, d_tracer,
2434                                     &global_trace, &tracing_lt_fops);
2435         if (!entry)
2436                 pr_warning("Could not create debugfs 'latency_trace' entry\n");
2437
2438         entry = debugfs_create_file("trace", 0444, d_tracer,
2439                                     &global_trace, &tracing_fops);
2440         if (!entry)
2441                 pr_warning("Could not create debugfs 'trace' entry\n");
2442
2443         entry = debugfs_create_file("available_tracers", 0444, d_tracer,
2444                                     &global_trace, &show_traces_fops);
2445         if (!entry)
2446                 pr_warning("Could not create debugfs 'trace' entry\n");
2447
2448         entry = debugfs_create_file("current_tracer", 0444, d_tracer,
2449                                     &global_trace, &set_tracer_fops);
2450         if (!entry)
2451                 pr_warning("Could not create debugfs 'trace' entry\n");
2452
2453         entry = debugfs_create_file("tracing_max_latency", 0644, d_tracer,
2454                                     &tracing_max_latency,
2455                                     &tracing_max_lat_fops);
2456         if (!entry)
2457                 pr_warning("Could not create debugfs "
2458                            "'tracing_max_latency' entry\n");
2459
2460         entry = debugfs_create_file("tracing_thresh", 0644, d_tracer,
2461                                     &tracing_thresh, &tracing_max_lat_fops);
2462         if (!entry)
2463                 pr_warning("Could not create debugfs "
2464                            "'tracing_threash' entry\n");
2465         entry = debugfs_create_file("README", 0644, d_tracer,
2466                                     NULL, &tracing_readme_fops);
2467         if (!entry)
2468                 pr_warning("Could not create debugfs 'README' entry\n");
2469
2470         entry = debugfs_create_file("trace_pipe", 0644, d_tracer,
2471                                     NULL, &tracing_pipe_fops);
2472         if (!entry)
2473                 pr_warning("Could not create debugfs "
2474                            "'tracing_threash' entry\n");
2475
2476 #ifdef CONFIG_DYNAMIC_FTRACE
2477         entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer,
2478                                     &ftrace_update_tot_cnt,
2479                                     &tracing_read_long_fops);
2480         if (!entry)
2481                 pr_warning("Could not create debugfs "
2482                            "'dyn_ftrace_total_info' entry\n");
2483 #endif
2484 }
2485
2486 /* dummy trace to disable tracing */
2487 static struct tracer no_tracer __read_mostly =
2488 {
2489         .name           = "none",
2490 };
2491
2492 static int trace_alloc_page(void)
2493 {
2494         struct trace_array_cpu *data;
2495         struct page *page, *tmp;
2496         LIST_HEAD(pages);
2497         void *array;
2498         int i;
2499
2500         /* first allocate a page for each CPU */
2501         for_each_possible_cpu(i) {
2502                 array = (void *)__get_free_page(GFP_KERNEL);
2503                 if (array == NULL) {
2504                         printk(KERN_ERR "tracer: failed to allocate page"
2505                                "for trace buffer!\n");
2506                         goto free_pages;
2507                 }
2508
2509                 page = virt_to_page(array);
2510                 list_add(&page->lru, &pages);
2511
2512 /* Only allocate if we are actually using the max trace */
2513 #ifdef CONFIG_TRACER_MAX_TRACE
2514                 array = (void *)__get_free_page(GFP_KERNEL);
2515                 if (array == NULL) {
2516                         printk(KERN_ERR "tracer: failed to allocate page"
2517                                "for trace buffer!\n");
2518                         goto free_pages;
2519                 }
2520                 page = virt_to_page(array);
2521                 list_add(&page->lru, &pages);
2522 #endif
2523         }
2524
2525         /* Now that we successfully allocate a page per CPU, add them */
2526         for_each_possible_cpu(i) {
2527                 data = global_trace.data[i];
2528                 spin_lock_init(&data->lock);
2529                 lockdep_set_class(&data->lock, &data->lock_key);
2530                 page = list_entry(pages.next, struct page, lru);
2531                 list_del_init(&page->lru);
2532                 list_add_tail(&page->lru, &data->trace_pages);
2533                 ClearPageLRU(page);
2534
2535 #ifdef CONFIG_TRACER_MAX_TRACE
2536                 data = max_tr.data[i];
2537                 spin_lock_init(&data->lock);
2538                 lockdep_set_class(&data->lock, &data->lock_key);
2539                 page = list_entry(pages.next, struct page, lru);
2540                 list_del_init(&page->lru);
2541                 list_add_tail(&page->lru, &data->trace_pages);
2542                 SetPageLRU(page);
2543 #endif
2544         }
2545         global_trace.entries += ENTRIES_PER_PAGE;
2546
2547         return 0;
2548
2549  free_pages:
2550         list_for_each_entry_safe(page, tmp, &pages, lru) {
2551                 list_del_init(&page->lru);
2552                 __free_page(page);
2553         }
2554         return -ENOMEM;
2555 }
2556
2557 __init static int tracer_alloc_buffers(void)
2558 {
2559         struct trace_array_cpu *data;
2560         void *array;
2561         struct page *page;
2562         int pages = 0;
2563         int ret = -ENOMEM;
2564         int i;
2565
2566         global_trace.ctrl = tracer_enabled;
2567
2568         /* Allocate the first page for all buffers */
2569         for_each_possible_cpu(i) {
2570                 data = global_trace.data[i] = &per_cpu(global_trace_cpu, i);
2571                 max_tr.data[i] = &per_cpu(max_data, i);
2572
2573                 array = (void *)__get_free_page(GFP_KERNEL);
2574                 if (array == NULL) {
2575                         printk(KERN_ERR "tracer: failed to allocate page"
2576                                "for trace buffer!\n");
2577                         goto free_buffers;
2578                 }
2579
2580                 /* set the array to the list */
2581                 INIT_LIST_HEAD(&data->trace_pages);
2582                 page = virt_to_page(array);
2583                 list_add(&page->lru, &data->trace_pages);
2584                 /* use the LRU flag to differentiate the two buffers */
2585                 ClearPageLRU(page);
2586
2587 /* Only allocate if we are actually using the max trace */
2588 #ifdef CONFIG_TRACER_MAX_TRACE
2589                 array = (void *)__get_free_page(GFP_KERNEL);
2590                 if (array == NULL) {
2591                         printk(KERN_ERR "tracer: failed to allocate page"
2592                                "for trace buffer!\n");
2593                         goto free_buffers;
2594                 }
2595
2596                 INIT_LIST_HEAD(&max_tr.data[i]->trace_pages);
2597                 page = virt_to_page(array);
2598                 list_add(&page->lru, &max_tr.data[i]->trace_pages);
2599                 SetPageLRU(page);
2600 #endif
2601         }
2602
2603         /*
2604          * Since we allocate by orders of pages, we may be able to
2605          * round up a bit.
2606          */
2607         global_trace.entries = ENTRIES_PER_PAGE;
2608         pages++;
2609
2610         while (global_trace.entries < trace_nr_entries) {
2611                 if (trace_alloc_page())
2612                         break;
2613                 pages++;
2614         }
2615         max_tr.entries = global_trace.entries;
2616
2617         pr_info("tracer: %d pages allocated for %ld",
2618                 pages, trace_nr_entries);
2619         pr_info(" entries of %ld bytes\n", (long)TRACE_ENTRY_SIZE);
2620         pr_info("   actual entries %ld\n", global_trace.entries);
2621
2622         tracer_init_debugfs();
2623
2624         trace_init_cmdlines();
2625
2626         register_tracer(&no_tracer);
2627         current_trace = &no_tracer;
2628
2629         /* All seems OK, enable tracing */
2630         tracing_disabled = 0;
2631
2632         return 0;
2633
2634  free_buffers:
2635         for (i-- ; i >= 0; i--) {
2636                 struct page *page, *tmp;
2637                 struct trace_array_cpu *data = global_trace.data[i];
2638
2639                 if (data) {
2640                         list_for_each_entry_safe(page, tmp,
2641                                                  &data->trace_pages, lru) {
2642                                 list_del_init(&page->lru);
2643                                 __free_page(page);
2644                         }
2645                 }
2646
2647 #ifdef CONFIG_TRACER_MAX_TRACE
2648                 data = max_tr.data[i];
2649                 if (data) {
2650                         list_for_each_entry_safe(page, tmp,
2651                                                  &data->trace_pages, lru) {
2652                                 list_del_init(&page->lru);
2653                                 __free_page(page);
2654                         }
2655                 }
2656 #endif
2657         }
2658         return ret;
2659 }
2660 fs_initcall(tracer_alloc_buffers);