ftrace: cleanups
[linux-2.6] / kernel / trace / trace.c
1 /*
2  * ring buffer based function tracer
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally taken from the RT patch by:
8  *    Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code from the latency_tracer, that is:
11  *  Copyright (C) 2004-2006 Ingo Molnar
12  *  Copyright (C) 2004 William Lee Irwin III
13  */
14 #include <linux/utsrelease.h>
15 #include <linux/kallsyms.h>
16 #include <linux/seq_file.h>
17 #include <linux/debugfs.h>
18 #include <linux/pagemap.h>
19 #include <linux/hardirq.h>
20 #include <linux/linkage.h>
21 #include <linux/uaccess.h>
22 #include <linux/ftrace.h>
23 #include <linux/module.h>
24 #include <linux/percpu.h>
25 #include <linux/ctype.h>
26 #include <linux/init.h>
27 #include <linux/gfp.h>
28 #include <linux/fs.h>
29
30 #include "trace.h"
31
32 unsigned long __read_mostly     tracing_max_latency = (cycle_t)ULONG_MAX;
33 unsigned long __read_mostly     tracing_thresh;
34
35 static int tracing_disabled = 1;
36
37 static long notrace
38 ns2usecs(cycle_t nsec)
39 {
40         nsec += 500;
41         do_div(nsec, 1000);
42         return nsec;
43 }
44
45 static atomic_t                 tracer_counter;
46 static struct trace_array       global_trace;
47
48 static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu);
49
50 static struct trace_array       max_tr;
51
52 static DEFINE_PER_CPU(struct trace_array_cpu, max_data);
53
54 static int                      tracer_enabled;
55 static unsigned long            trace_nr_entries = 16384UL;
56
57 static struct tracer            *trace_types __read_mostly;
58 static struct tracer            *current_trace __read_mostly;
59 static int                      max_tracer_type_len;
60
61 static DEFINE_MUTEX(trace_types_lock);
62
63 #define ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(struct trace_entry))
64
65 static int __init set_nr_entries(char *str)
66 {
67         if (!str)
68                 return 0;
69         trace_nr_entries = simple_strtoul(str, &str, 0);
70         return 1;
71 }
72 __setup("trace_entries=", set_nr_entries);
73
74 unsigned long nsecs_to_usecs(unsigned long nsecs)
75 {
76         return nsecs / 1000;
77 }
78
79 enum trace_type {
80         __TRACE_FIRST_TYPE = 0,
81
82         TRACE_FN,
83         TRACE_CTX,
84
85         __TRACE_LAST_TYPE
86 };
87
88 enum trace_flag_type {
89         TRACE_FLAG_IRQS_OFF             = 0x01,
90         TRACE_FLAG_NEED_RESCHED         = 0x02,
91         TRACE_FLAG_HARDIRQ              = 0x04,
92         TRACE_FLAG_SOFTIRQ              = 0x08,
93 };
94
95 enum trace_iterator_flags {
96         TRACE_ITER_PRINT_PARENT         = 0x01,
97         TRACE_ITER_SYM_OFFSET           = 0x02,
98         TRACE_ITER_SYM_ADDR             = 0x04,
99         TRACE_ITER_VERBOSE              = 0x08,
100 };
101
102 #define TRACE_ITER_SYM_MASK \
103         (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
104
105 /* These must match the bit postions above */
106 static const char *trace_options[] = {
107         "print-parent",
108         "sym-offset",
109         "sym-addr",
110         "verbose",
111         NULL
112 };
113
114 static unsigned trace_flags;
115
116 static DEFINE_SPINLOCK(ftrace_max_lock);
117
118 /*
119  * Copy the new maximum trace into the separate maximum-trace
120  * structure. (this way the maximum trace is permanently saved,
121  * for later retrieval via /debugfs/tracing/latency_trace)
122  */
123 static notrace void
124 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
125 {
126         struct trace_array_cpu *data = tr->data[cpu];
127
128         max_tr.cpu = cpu;
129         max_tr.time_start = data->preempt_timestamp;
130
131         data = max_tr.data[cpu];
132         data->saved_latency = tracing_max_latency;
133
134         memcpy(data->comm, tsk->comm, TASK_COMM_LEN);
135         data->pid = tsk->pid;
136         data->uid = tsk->uid;
137         data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
138         data->policy = tsk->policy;
139         data->rt_priority = tsk->rt_priority;
140
141         /* record this tasks comm */
142         tracing_record_cmdline(current);
143 }
144
145 void check_pages(struct trace_array_cpu *data)
146 {
147         struct page *page, *tmp;
148
149         BUG_ON(data->trace_pages.next->prev != &data->trace_pages);
150         BUG_ON(data->trace_pages.prev->next != &data->trace_pages);
151
152         list_for_each_entry_safe(page, tmp, &data->trace_pages, lru) {
153                 BUG_ON(page->lru.next->prev != &page->lru);
154                 BUG_ON(page->lru.prev->next != &page->lru);
155         }
156 }
157
158 void *head_page(struct trace_array_cpu *data)
159 {
160         struct page *page;
161
162         check_pages(data);
163         if (list_empty(&data->trace_pages))
164                 return NULL;
165
166         page = list_entry(data->trace_pages.next, struct page, lru);
167         BUG_ON(&page->lru == &data->trace_pages);
168
169         return page_address(page);
170 }
171
172 static notrace int
173 trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
174 {
175         int len = (PAGE_SIZE - 1) - s->len;
176         va_list ap;
177         int ret;
178
179         if (!len)
180                 return 0;
181
182         va_start(ap, fmt);
183         ret = vsnprintf(s->buffer + s->len, len, fmt, ap);
184         va_end(ap);
185
186         /* If we can't write it all, don't bother writing anything */
187         if (ret > len)
188                 return 0;
189
190         s->len += ret;
191
192         return len;
193 }
194
195 static notrace int
196 trace_seq_puts(struct trace_seq *s, const char *str)
197 {
198         int len = strlen(str);
199
200         if (len > ((PAGE_SIZE - 1) - s->len))
201                 return 0;
202
203         memcpy(s->buffer + s->len, str, len);
204         s->len += len;
205
206         return len;
207 }
208
209 static notrace int
210 trace_seq_putc(struct trace_seq *s, unsigned char c)
211 {
212         if (s->len >= (PAGE_SIZE - 1))
213                 return 0;
214
215         s->buffer[s->len++] = c;
216
217         return 1;
218 }
219
220 static notrace void
221 trace_seq_reset(struct trace_seq *s)
222 {
223         s->len = 0;
224 }
225
226 static notrace void
227 trace_print_seq(struct seq_file *m, struct trace_seq *s)
228 {
229         int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len;
230
231         s->buffer[len] = 0;
232         seq_puts(m, s->buffer);
233
234         trace_seq_reset(s);
235 }
236
237 notrace static void
238 flip_trace(struct trace_array_cpu *tr1, struct trace_array_cpu *tr2)
239 {
240         struct list_head flip_pages;
241
242         INIT_LIST_HEAD(&flip_pages);
243
244         memcpy(&tr1->trace_head_idx, &tr2->trace_head_idx,
245                 sizeof(struct trace_array_cpu) -
246                 offsetof(struct trace_array_cpu, trace_head_idx));
247
248         check_pages(tr1);
249         check_pages(tr2);
250         list_splice_init(&tr1->trace_pages, &flip_pages);
251         list_splice_init(&tr2->trace_pages, &tr1->trace_pages);
252         list_splice_init(&flip_pages, &tr2->trace_pages);
253         BUG_ON(!list_empty(&flip_pages));
254         check_pages(tr1);
255         check_pages(tr2);
256 }
257
258 notrace void
259 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
260 {
261         struct trace_array_cpu *data;
262         int i;
263
264         WARN_ON_ONCE(!irqs_disabled());
265         spin_lock(&ftrace_max_lock);
266         /* clear out all the previous traces */
267         for_each_possible_cpu(i) {
268                 data = tr->data[i];
269                 flip_trace(max_tr.data[i], data);
270                 tracing_reset(data);
271         }
272
273         __update_max_tr(tr, tsk, cpu);
274         spin_unlock(&ftrace_max_lock);
275 }
276
277 /**
278  * update_max_tr_single - only copy one trace over, and reset the rest
279  * @tr - tracer
280  * @tsk - task with the latency
281  * @cpu - the cpu of the buffer to copy.
282  */
283 notrace void
284 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
285 {
286         struct trace_array_cpu *data = tr->data[cpu];
287         int i;
288
289         WARN_ON_ONCE(!irqs_disabled());
290         spin_lock(&ftrace_max_lock);
291         for_each_possible_cpu(i)
292                 tracing_reset(max_tr.data[i]);
293
294         flip_trace(max_tr.data[cpu], data);
295         tracing_reset(data);
296
297         __update_max_tr(tr, tsk, cpu);
298         spin_unlock(&ftrace_max_lock);
299 }
300
301 int register_tracer(struct tracer *type)
302 {
303         struct tracer *t;
304         int len;
305         int ret = 0;
306
307         if (!type->name) {
308                 pr_info("Tracer must have a name\n");
309                 return -1;
310         }
311
312         mutex_lock(&trace_types_lock);
313         for (t = trace_types; t; t = t->next) {
314                 if (strcmp(type->name, t->name) == 0) {
315                         /* already found */
316                         pr_info("Trace %s already registered\n",
317                                 type->name);
318                         ret = -1;
319                         goto out;
320                 }
321         }
322
323 #ifdef CONFIG_FTRACE_STARTUP_TEST
324         if (type->selftest) {
325                 struct tracer *saved_tracer = current_trace;
326                 struct trace_array_cpu *data;
327                 struct trace_array *tr = &global_trace;
328                 int saved_ctrl = tr->ctrl;
329                 int i;
330                 /*
331                  * Run a selftest on this tracer.
332                  * Here we reset the trace buffer, and set the current
333                  * tracer to be this tracer. The tracer can then run some
334                  * internal tracing to verify that everything is in order.
335                  * If we fail, we do not register this tracer.
336                  */
337                 for_each_possible_cpu(i) {
338                         data = tr->data[i];
339                         if (!head_page(data))
340                                 continue;
341                         tracing_reset(data);
342                 }
343                 current_trace = type;
344                 tr->ctrl = 0;
345                 /* the test is responsible for initializing and enabling */
346                 pr_info("Testing tracer %s: ", type->name);
347                 ret = type->selftest(type, tr);
348                 /* the test is responsible for resetting too */
349                 current_trace = saved_tracer;
350                 tr->ctrl = saved_ctrl;
351                 if (ret) {
352                         printk(KERN_CONT "FAILED!\n");
353                         goto out;
354                 }
355                 /* Only reset on passing, to avoid touching corrupted buffers */
356                 for_each_possible_cpu(i) {
357                         data = tr->data[i];
358                         if (!head_page(data))
359                                 continue;
360                         tracing_reset(data);
361                 }
362                 printk(KERN_CONT "PASSED\n");
363         }
364 #endif
365
366         type->next = trace_types;
367         trace_types = type;
368         len = strlen(type->name);
369         if (len > max_tracer_type_len)
370                 max_tracer_type_len = len;
371
372  out:
373         mutex_unlock(&trace_types_lock);
374
375         return ret;
376 }
377
378 void unregister_tracer(struct tracer *type)
379 {
380         struct tracer **t;
381         int len;
382
383         mutex_lock(&trace_types_lock);
384         for (t = &trace_types; *t; t = &(*t)->next) {
385                 if (*t == type)
386                         goto found;
387         }
388         pr_info("Trace %s not registered\n", type->name);
389         goto out;
390
391  found:
392         *t = (*t)->next;
393         if (strlen(type->name) != max_tracer_type_len)
394                 goto out;
395
396         max_tracer_type_len = 0;
397         for (t = &trace_types; *t; t = &(*t)->next) {
398                 len = strlen((*t)->name);
399                 if (len > max_tracer_type_len)
400                         max_tracer_type_len = len;
401         }
402  out:
403         mutex_unlock(&trace_types_lock);
404 }
405
406 notrace void tracing_reset(struct trace_array_cpu *data)
407 {
408         data->trace_idx = 0;
409         data->trace_head = data->trace_tail = head_page(data);
410         data->trace_head_idx = 0;
411         data->trace_tail_idx = 0;
412 }
413
414 #ifdef CONFIG_FTRACE
415 static notrace void
416 function_trace_call(unsigned long ip, unsigned long parent_ip)
417 {
418         struct trace_array *tr = &global_trace;
419         struct trace_array_cpu *data;
420         unsigned long flags;
421         long disabled;
422         int cpu;
423
424         if (unlikely(!tracer_enabled))
425                 return;
426
427         local_irq_save(flags);
428         cpu = raw_smp_processor_id();
429         data = tr->data[cpu];
430         disabled = atomic_inc_return(&data->disabled);
431
432         if (likely(disabled == 1))
433                 ftrace(tr, data, ip, parent_ip, flags);
434
435         atomic_dec(&data->disabled);
436         local_irq_restore(flags);
437 }
438
439 static struct ftrace_ops trace_ops __read_mostly =
440 {
441         .func = function_trace_call,
442 };
443 #endif
444
445 notrace void tracing_start_function_trace(void)
446 {
447         register_ftrace_function(&trace_ops);
448 }
449
450 notrace void tracing_stop_function_trace(void)
451 {
452         unregister_ftrace_function(&trace_ops);
453 }
454
455 #define SAVED_CMDLINES 128
456 static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
457 static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
458 static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
459 static int cmdline_idx;
460 static DEFINE_SPINLOCK(trace_cmdline_lock);
461 atomic_t trace_record_cmdline_disabled;
462
463 static void trace_init_cmdlines(void)
464 {
465         memset(&map_pid_to_cmdline, -1, sizeof(map_pid_to_cmdline));
466         memset(&map_cmdline_to_pid, -1, sizeof(map_cmdline_to_pid));
467         cmdline_idx = 0;
468 }
469
470 notrace void trace_stop_cmdline_recording(void);
471
472 static notrace void trace_save_cmdline(struct task_struct *tsk)
473 {
474         unsigned map;
475         unsigned idx;
476
477         if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
478                 return;
479
480         /*
481          * It's not the end of the world if we don't get
482          * the lock, but we also don't want to spin
483          * nor do we want to disable interrupts,
484          * so if we miss here, then better luck next time.
485          */
486         if (!spin_trylock(&trace_cmdline_lock))
487                 return;
488
489         idx = map_pid_to_cmdline[tsk->pid];
490         if (idx >= SAVED_CMDLINES) {
491                 idx = (cmdline_idx + 1) % SAVED_CMDLINES;
492
493                 map = map_cmdline_to_pid[idx];
494                 if (map <= PID_MAX_DEFAULT)
495                         map_pid_to_cmdline[map] = (unsigned)-1;
496
497                 map_pid_to_cmdline[tsk->pid] = idx;
498
499                 cmdline_idx = idx;
500         }
501
502         memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
503
504         spin_unlock(&trace_cmdline_lock);
505 }
506
507 static notrace char *trace_find_cmdline(int pid)
508 {
509         char *cmdline = "<...>";
510         unsigned map;
511
512         if (!pid)
513                 return "<idle>";
514
515         if (pid > PID_MAX_DEFAULT)
516                 goto out;
517
518         map = map_pid_to_cmdline[pid];
519         if (map >= SAVED_CMDLINES)
520                 goto out;
521
522         cmdline = saved_cmdlines[map];
523
524  out:
525         return cmdline;
526 }
527
528 notrace void tracing_record_cmdline(struct task_struct *tsk)
529 {
530         if (atomic_read(&trace_record_cmdline_disabled))
531                 return;
532
533         trace_save_cmdline(tsk);
534 }
535
536 static inline notrace struct list_head *
537 trace_next_list(struct trace_array_cpu *data, struct list_head *next)
538 {
539         /*
540          * Roundrobin - but skip the head (which is not a real page):
541          */
542         next = next->next;
543         if (unlikely(next == &data->trace_pages))
544                 next = next->next;
545         BUG_ON(next == &data->trace_pages);
546
547         return next;
548 }
549
550 static inline notrace void *
551 trace_next_page(struct trace_array_cpu *data, void *addr)
552 {
553         struct list_head *next;
554         struct page *page;
555
556         page = virt_to_page(addr);
557
558         next = trace_next_list(data, &page->lru);
559         page = list_entry(next, struct page, lru);
560
561         return page_address(page);
562 }
563
564 static inline notrace struct trace_entry *
565 tracing_get_trace_entry(struct trace_array *tr, struct trace_array_cpu *data)
566 {
567         unsigned long idx, idx_next;
568         struct trace_entry *entry;
569
570         data->trace_idx++;
571         idx = data->trace_head_idx;
572         idx_next = idx + 1;
573
574         BUG_ON(idx * TRACE_ENTRY_SIZE >= PAGE_SIZE);
575
576         entry = data->trace_head + idx * TRACE_ENTRY_SIZE;
577
578         if (unlikely(idx_next >= ENTRIES_PER_PAGE)) {
579                 data->trace_head = trace_next_page(data, data->trace_head);
580                 idx_next = 0;
581         }
582
583         if (data->trace_head == data->trace_tail &&
584             idx_next == data->trace_tail_idx) {
585                 /* overrun */
586                 data->trace_tail_idx++;
587                 if (data->trace_tail_idx >= ENTRIES_PER_PAGE) {
588                         data->trace_tail =
589                                 trace_next_page(data, data->trace_tail);
590                         data->trace_tail_idx = 0;
591                 }
592         }
593
594         data->trace_head_idx = idx_next;
595
596         return entry;
597 }
598
599 static inline notrace void
600 tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags)
601 {
602         struct task_struct *tsk = current;
603         unsigned long pc;
604
605         pc = preempt_count();
606
607         entry->idx              = atomic_inc_return(&tracer_counter);
608         entry->preempt_count    = pc & 0xff;
609         entry->pid              = tsk->pid;
610         entry->t                = now(raw_smp_processor_id());
611         entry->flags = (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
612                 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
613                 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
614                 (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
615 }
616
617 notrace void
618 ftrace(struct trace_array *tr, struct trace_array_cpu *data,
619        unsigned long ip, unsigned long parent_ip, unsigned long flags)
620 {
621         struct trace_entry *entry;
622
623         spin_lock(&data->lock);
624         entry                   = tracing_get_trace_entry(tr, data);
625         tracing_generic_entry_update(entry, flags);
626         entry->type             = TRACE_FN;
627         entry->fn.ip            = ip;
628         entry->fn.parent_ip     = parent_ip;
629         spin_unlock(&data->lock);
630 }
631
632 notrace void
633 tracing_sched_switch_trace(struct trace_array *tr,
634                            struct trace_array_cpu *data,
635                            struct task_struct *prev, struct task_struct *next,
636                            unsigned long flags)
637 {
638         struct trace_entry *entry;
639
640         spin_lock(&data->lock);
641         entry                   = tracing_get_trace_entry(tr, data);
642         tracing_generic_entry_update(entry, flags);
643         entry->type             = TRACE_CTX;
644         entry->ctx.prev_pid     = prev->pid;
645         entry->ctx.prev_prio    = prev->prio;
646         entry->ctx.prev_state   = prev->state;
647         entry->ctx.next_pid     = next->pid;
648         entry->ctx.next_prio    = next->prio;
649         spin_unlock(&data->lock);
650 }
651
652 enum trace_file_type {
653         TRACE_FILE_LAT_FMT      = 1,
654 };
655
656 static struct trace_entry *
657 trace_entry_idx(struct trace_array *tr, struct trace_array_cpu *data,
658                 struct trace_iterator *iter, int cpu)
659 {
660         struct page *page;
661         struct trace_entry *array;
662
663         if (iter->next_idx[cpu] >= tr->entries ||
664             iter->next_idx[cpu] >= data->trace_idx ||
665             (data->trace_head == data->trace_tail &&
666              data->trace_head_idx == data->trace_tail_idx))
667                 return NULL;
668
669         if (!iter->next_page[cpu]) {
670                 /* Initialize the iterator for this cpu trace buffer */
671                 WARN_ON(!data->trace_tail);
672                 page = virt_to_page(data->trace_tail);
673                 iter->next_page[cpu] = &page->lru;
674                 iter->next_page_idx[cpu] = data->trace_tail_idx;
675         }
676
677         page = list_entry(iter->next_page[cpu], struct page, lru);
678         BUG_ON(&data->trace_pages == &page->lru);
679
680         array = page_address(page);
681
682         /* Still possible to catch up to the tail */
683         if (iter->next_idx[cpu] && array == data->trace_tail &&
684             iter->next_page_idx[cpu] == data->trace_tail_idx)
685                 return NULL;
686
687         WARN_ON(iter->next_page_idx[cpu] >= ENTRIES_PER_PAGE);
688         return &array[iter->next_page_idx[cpu]];
689 }
690
691 static struct notrace trace_entry *
692 find_next_entry(struct trace_iterator *iter, int *ent_cpu)
693 {
694         struct trace_array *tr = iter->tr;
695         struct trace_entry *ent, *next = NULL;
696         int next_cpu = -1;
697         int cpu;
698
699         for_each_possible_cpu(cpu) {
700                 if (!head_page(tr->data[cpu]))
701                         continue;
702                 ent = trace_entry_idx(tr, tr->data[cpu], iter, cpu);
703                 if (ent &&
704                     (!next || (long)(next->idx - ent->idx) > 0)) {
705                         next = ent;
706                         next_cpu = cpu;
707                 }
708         }
709
710         if (ent_cpu)
711                 *ent_cpu = next_cpu;
712
713         return next;
714 }
715
716 static notrace void
717 trace_iterator_increment(struct trace_iterator *iter)
718 {
719         iter->idx++;
720         iter->next_idx[iter->cpu]++;
721         iter->next_page_idx[iter->cpu]++;
722         if (iter->next_page_idx[iter->cpu] >= ENTRIES_PER_PAGE) {
723                 struct trace_array_cpu *data = iter->tr->data[iter->cpu];
724
725                 iter->next_page_idx[iter->cpu] = 0;
726                 iter->next_page[iter->cpu] =
727                         trace_next_list(data, iter->next_page[iter->cpu]);
728         }
729 }
730
731 static notrace void
732 trace_consume(struct trace_iterator *iter)
733 {
734         struct trace_array_cpu *data = iter->tr->data[iter->cpu];
735
736         data->trace_tail_idx++;
737         if (data->trace_tail_idx >= ENTRIES_PER_PAGE) {
738                 data->trace_tail = trace_next_page(data, data->trace_tail);
739                 data->trace_tail_idx = 0;
740         }
741
742         /* Check if we empty it, then reset the index */
743         if (data->trace_head == data->trace_tail &&
744             data->trace_head_idx == data->trace_tail_idx)
745                 data->trace_idx = 0;
746
747         trace_iterator_increment(iter);
748 }
749
750 static notrace void *
751 find_next_entry_inc(struct trace_iterator *iter)
752 {
753         struct trace_entry *next;
754         int next_cpu = -1;
755
756         next = find_next_entry(iter, &next_cpu);
757
758         iter->prev_ent = iter->ent;
759         iter->prev_cpu = iter->cpu;
760
761         iter->ent = next;
762         iter->cpu = next_cpu;
763
764         if (next)
765                 trace_iterator_increment(iter);
766
767         return next ? iter : NULL;
768 }
769
770 static notrace void *s_next(struct seq_file *m, void *v, loff_t *pos)
771 {
772         struct trace_iterator *iter = m->private;
773         void *last_ent = iter->ent;
774         int i = (int)*pos;
775         void *ent;
776
777         (*pos)++;
778
779         /* can't go backwards */
780         if (iter->idx > i)
781                 return NULL;
782
783         if (iter->idx < 0)
784                 ent = find_next_entry_inc(iter);
785         else
786                 ent = iter;
787
788         while (ent && iter->idx < i)
789                 ent = find_next_entry_inc(iter);
790
791         iter->pos = *pos;
792
793         if (last_ent && !ent)
794                 seq_puts(m, "\n\nvim:ft=help\n");
795
796         return ent;
797 }
798
799 static void *s_start(struct seq_file *m, loff_t *pos)
800 {
801         struct trace_iterator *iter = m->private;
802         void *p = NULL;
803         loff_t l = 0;
804         int i;
805
806         mutex_lock(&trace_types_lock);
807
808         if (!current_trace || current_trace != iter->trace)
809                 return NULL;
810
811         atomic_inc(&trace_record_cmdline_disabled);
812
813         /* let the tracer grab locks here if needed */
814         if (current_trace->start)
815                 current_trace->start(iter);
816
817         if (*pos != iter->pos) {
818                 iter->ent = NULL;
819                 iter->cpu = 0;
820                 iter->idx = -1;
821                 iter->prev_ent = NULL;
822                 iter->prev_cpu = -1;
823
824                 for_each_possible_cpu(i) {
825                         iter->next_idx[i] = 0;
826                         iter->next_page[i] = NULL;
827                 }
828
829                 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
830                         ;
831
832         } else {
833                 l = *pos - 1;
834                 p = s_next(m, p, &l);
835         }
836
837         return p;
838 }
839
840 static void s_stop(struct seq_file *m, void *p)
841 {
842         struct trace_iterator *iter = m->private;
843
844         atomic_dec(&trace_record_cmdline_disabled);
845
846         /* let the tracer release locks here if needed */
847         if (current_trace && current_trace == iter->trace && iter->trace->stop)
848                 iter->trace->stop(iter);
849
850         mutex_unlock(&trace_types_lock);
851 }
852
853 static int
854 seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address)
855 {
856 #ifdef CONFIG_KALLSYMS
857         char str[KSYM_SYMBOL_LEN];
858
859         kallsyms_lookup(address, NULL, NULL, NULL, str);
860
861         return trace_seq_printf(s, fmt, str);
862 #endif
863         return 1;
864 }
865
866 static int
867 seq_print_sym_offset(struct trace_seq *s, const char *fmt,
868                      unsigned long address)
869 {
870 #ifdef CONFIG_KALLSYMS
871         char str[KSYM_SYMBOL_LEN];
872
873         sprint_symbol(str, address);
874         return trace_seq_printf(s, fmt, str);
875 #endif
876         return 1;
877 }
878
879 #ifndef CONFIG_64BIT
880 # define IP_FMT "%08lx"
881 #else
882 # define IP_FMT "%016lx"
883 #endif
884
885 static notrace int
886 seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
887 {
888         int ret;
889
890         if (!ip)
891                 return trace_seq_printf(s, "0");
892
893         if (sym_flags & TRACE_ITER_SYM_OFFSET)
894                 ret = seq_print_sym_offset(s, "%s", ip);
895         else
896                 ret = seq_print_sym_short(s, "%s", ip);
897
898         if (!ret)
899                 return 0;
900
901         if (sym_flags & TRACE_ITER_SYM_ADDR)
902                 ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
903         return ret;
904 }
905
906 static notrace void print_lat_help_header(struct seq_file *m)
907 {
908         seq_puts(m, "#                _------=> CPU#            \n");
909         seq_puts(m, "#               / _-----=> irqs-off        \n");
910         seq_puts(m, "#              | / _----=> need-resched    \n");
911         seq_puts(m, "#              || / _---=> hardirq/softirq \n");
912         seq_puts(m, "#              ||| / _--=> preempt-depth   \n");
913         seq_puts(m, "#              |||| /                      \n");
914         seq_puts(m, "#              |||||     delay             \n");
915         seq_puts(m, "#  cmd     pid ||||| time  |   caller      \n");
916         seq_puts(m, "#     \\   /    |||||   \\   |   /           \n");
917 }
918
919 static notrace void print_func_help_header(struct seq_file *m)
920 {
921         seq_puts(m, "#           TASK-PID   CPU#    TIMESTAMP  FUNCTION\n");
922         seq_puts(m, "#              | |      |          |         |\n");
923 }
924
925
926 static notrace void
927 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
928 {
929         unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
930         struct trace_array *tr = iter->tr;
931         struct trace_array_cpu *data = tr->data[tr->cpu];
932         struct tracer *type = current_trace;
933         unsigned long total   = 0;
934         unsigned long entries = 0;
935         int cpu;
936         const char *name = "preemption";
937
938         if (type)
939                 name = type->name;
940
941         for_each_possible_cpu(cpu) {
942                 if (head_page(tr->data[cpu])) {
943                         total += tr->data[cpu]->trace_idx;
944                         if (tr->data[cpu]->trace_idx > tr->entries)
945                                 entries += tr->entries;
946                         else
947                                 entries += tr->data[cpu]->trace_idx;
948                 }
949         }
950
951         seq_printf(m, "%s latency trace v1.1.5 on %s\n",
952                    name, UTS_RELEASE);
953         seq_puts(m, "-----------------------------------"
954                  "---------------------------------\n");
955         seq_printf(m, " latency: %lu us, #%lu/%lu, CPU#%d |"
956                    " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
957                    nsecs_to_usecs(data->saved_latency),
958                    entries,
959                    total,
960                    tr->cpu,
961 #if defined(CONFIG_PREEMPT_NONE)
962                    "server",
963 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
964                    "desktop",
965 #elif defined(CONFIG_PREEMPT_DESKTOP)
966                    "preempt",
967 #else
968                    "unknown",
969 #endif
970                    /* These are reserved for later use */
971                    0, 0, 0, 0);
972 #ifdef CONFIG_SMP
973         seq_printf(m, " #P:%d)\n", num_online_cpus());
974 #else
975         seq_puts(m, ")\n");
976 #endif
977         seq_puts(m, "    -----------------\n");
978         seq_printf(m, "    | task: %.16s-%d "
979                    "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
980                    data->comm, data->pid, data->uid, data->nice,
981                    data->policy, data->rt_priority);
982         seq_puts(m, "    -----------------\n");
983
984         if (data->critical_start) {
985                 seq_puts(m, " => started at: ");
986                 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
987                 trace_print_seq(m, &iter->seq);
988                 seq_puts(m, "\n => ended at:   ");
989                 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
990                 trace_print_seq(m, &iter->seq);
991                 seq_puts(m, "\n");
992         }
993
994         seq_puts(m, "\n");
995 }
996
997 static notrace void
998 lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
999 {
1000         int hardirq, softirq;
1001         char *comm;
1002
1003         comm = trace_find_cmdline(entry->pid);
1004
1005         trace_seq_printf(s, "%8.8s-%-5d ", comm, entry->pid);
1006         trace_seq_printf(s, "%d", cpu);
1007         trace_seq_printf(s, "%c%c",
1008                         (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' : '.',
1009                         ((entry->flags & TRACE_FLAG_NEED_RESCHED) ? 'N' : '.'));
1010
1011         hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
1012         softirq = entry->flags & TRACE_FLAG_SOFTIRQ;
1013         if (hardirq && softirq)
1014                 trace_seq_putc(s, 'H');
1015         else {
1016                 if (hardirq)
1017                         trace_seq_putc(s, 'h');
1018                 else {
1019                         if (softirq)
1020                                 trace_seq_putc(s, 's');
1021                         else
1022                                 trace_seq_putc(s, '.');
1023                 }
1024         }
1025
1026         if (entry->preempt_count)
1027                 trace_seq_printf(s, "%x", entry->preempt_count);
1028         else
1029                 trace_seq_puts(s, ".");
1030 }
1031
1032 unsigned long preempt_mark_thresh = 100;
1033
1034 static notrace void
1035 lat_print_timestamp(struct trace_seq *s, unsigned long long abs_usecs,
1036                     unsigned long rel_usecs)
1037 {
1038         trace_seq_printf(s, " %4lldus", abs_usecs);
1039         if (rel_usecs > preempt_mark_thresh)
1040                 trace_seq_puts(s, "!: ");
1041         else if (rel_usecs > 1)
1042                 trace_seq_puts(s, "+: ");
1043         else
1044                 trace_seq_puts(s, " : ");
1045 }
1046
1047 static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
1048
1049 static notrace void
1050 print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
1051 {
1052         struct trace_seq *s = &iter->seq;
1053         unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
1054         struct trace_entry *next_entry = find_next_entry(iter, NULL);
1055         unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE);
1056         struct trace_entry *entry = iter->ent;
1057         unsigned long abs_usecs;
1058         unsigned long rel_usecs;
1059         char *comm;
1060         int S;
1061
1062         if (!next_entry)
1063                 next_entry = entry;
1064         rel_usecs = ns2usecs(next_entry->t - entry->t);
1065         abs_usecs = ns2usecs(entry->t - iter->tr->time_start);
1066
1067         if (verbose) {
1068                 comm = trace_find_cmdline(entry->pid);
1069                 trace_seq_printf(s, "%16s %5d %d %d %08x %08x [%08lx]"
1070                                  " %ld.%03ldms (+%ld.%03ldms): ",
1071                                  comm,
1072                                  entry->pid, cpu, entry->flags,
1073                                  entry->preempt_count, trace_idx,
1074                                  ns2usecs(entry->t),
1075                                  abs_usecs/1000,
1076                                  abs_usecs % 1000, rel_usecs/1000,
1077                                  rel_usecs % 1000);
1078         } else {
1079                 lat_print_generic(s, entry, cpu);
1080                 lat_print_timestamp(s, abs_usecs, rel_usecs);
1081         }
1082         switch (entry->type) {
1083         case TRACE_FN:
1084                 seq_print_ip_sym(s, entry->fn.ip, sym_flags);
1085                 trace_seq_puts(s, " (");
1086                 seq_print_ip_sym(s, entry->fn.parent_ip, sym_flags);
1087                 trace_seq_puts(s, ")\n");
1088                 break;
1089         case TRACE_CTX:
1090                 S = entry->ctx.prev_state < sizeof(state_to_char) ?
1091                         state_to_char[entry->ctx.prev_state] : 'X';
1092                 comm = trace_find_cmdline(entry->ctx.next_pid);
1093                 trace_seq_printf(s, " %d:%d:%c --> %d:%d %s\n",
1094                                  entry->ctx.prev_pid,
1095                                  entry->ctx.prev_prio,
1096                                  S,
1097                                  entry->ctx.next_pid,
1098                                  entry->ctx.next_prio,
1099                                  comm);
1100                 break;
1101         default:
1102                 trace_seq_printf(s, "Unknown type %d\n", entry->type);
1103         }
1104 }
1105
1106 static notrace void sync_time_offset(struct trace_iterator *iter)
1107 {
1108         struct trace_array_cpu *prev_array, *array;
1109         struct trace_entry *prev_entry, *entry;
1110         cycle_t prev_t, t;
1111
1112         entry = iter->ent;
1113         prev_entry = iter->prev_ent;
1114         if (!prev_entry)
1115                 return;
1116
1117         prev_array = iter->tr->data[iter->prev_cpu];
1118         array = iter->tr->data[iter->cpu];
1119
1120         prev_t = prev_entry->t + prev_array->time_offset;
1121         t = entry->t + array->time_offset;
1122
1123         /*
1124          * If time goes backwards we increase the offset of
1125          * the current array, to not have observable time warps.
1126          * This will quickly synchronize the time offsets of
1127          * multiple CPUs:
1128          */
1129         if (t < prev_t)
1130                 array->time_offset += prev_t - t;
1131 }
1132
1133 static notrace int
1134 print_trace_fmt(struct trace_iterator *iter)
1135 {
1136         struct trace_seq *s = &iter->seq;
1137         unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
1138         struct trace_entry *entry;
1139         unsigned long usec_rem;
1140         unsigned long long t;
1141         unsigned long secs;
1142         char *comm;
1143         int S;
1144         int ret;
1145
1146         sync_time_offset(iter);
1147         entry = iter->ent;
1148
1149         comm = trace_find_cmdline(iter->ent->pid);
1150
1151         t = ns2usecs(entry->t + iter->tr->data[iter->cpu]->time_offset);
1152         usec_rem = do_div(t, 1000000ULL);
1153         secs = (unsigned long)t;
1154
1155         ret = trace_seq_printf(s, "%16s-%-5d ", comm, entry->pid);
1156         if (!ret)
1157                 return 0;
1158         ret = trace_seq_printf(s, "[%02d] ", iter->cpu);
1159         if (!ret)
1160                 return 0;
1161         ret = trace_seq_printf(s, "%5lu.%06lu: ", secs, usec_rem);
1162         if (!ret)
1163                 return 0;
1164
1165         switch (entry->type) {
1166         case TRACE_FN:
1167                 ret = seq_print_ip_sym(s, entry->fn.ip, sym_flags);
1168                 if (!ret)
1169                         return 0;
1170                 if ((sym_flags & TRACE_ITER_PRINT_PARENT) &&
1171                                                 entry->fn.parent_ip) {
1172                         ret = trace_seq_printf(s, " <-");
1173                         if (!ret)
1174                                 return 0;
1175                         ret = seq_print_ip_sym(s, entry->fn.parent_ip,
1176                                                sym_flags);
1177                         if (!ret)
1178                                 return 0;
1179                 }
1180                 ret = trace_seq_printf(s, "\n");
1181                 if (!ret)
1182                         return 0;
1183                 break;
1184         case TRACE_CTX:
1185                 S = entry->ctx.prev_state < sizeof(state_to_char) ?
1186                         state_to_char[entry->ctx.prev_state] : 'X';
1187                 ret = trace_seq_printf(s, " %d:%d:%c ==> %d:%d\n",
1188                                        entry->ctx.prev_pid,
1189                                        entry->ctx.prev_prio,
1190                                        S,
1191                                        entry->ctx.next_pid,
1192                                        entry->ctx.next_prio);
1193                 if (!ret)
1194                         return 0;
1195                 break;
1196         }
1197         return 1;
1198 }
1199
1200 static int trace_empty(struct trace_iterator *iter)
1201 {
1202         struct trace_array_cpu *data;
1203         int cpu;
1204
1205         for_each_possible_cpu(cpu) {
1206                 data = iter->tr->data[cpu];
1207
1208                 if (head_page(data) && data->trace_idx &&
1209                     (data->trace_tail != data->trace_head ||
1210                      data->trace_tail_idx != data->trace_head_idx))
1211                         return 0;
1212         }
1213         return 1;
1214 }
1215
1216 static int s_show(struct seq_file *m, void *v)
1217 {
1218         struct trace_iterator *iter = v;
1219
1220         if (iter->ent == NULL) {
1221                 if (iter->tr) {
1222                         seq_printf(m, "# tracer: %s\n", iter->trace->name);
1223                         seq_puts(m, "#\n");
1224                 }
1225                 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
1226                         /* print nothing if the buffers are empty */
1227                         if (trace_empty(iter))
1228                                 return 0;
1229                         print_trace_header(m, iter);
1230                         if (!(trace_flags & TRACE_ITER_VERBOSE))
1231                                 print_lat_help_header(m);
1232                 } else {
1233                         if (!(trace_flags & TRACE_ITER_VERBOSE))
1234                                 print_func_help_header(m);
1235                 }
1236         } else {
1237                 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
1238                         print_lat_fmt(iter, iter->idx, iter->cpu);
1239                 else
1240                         print_trace_fmt(iter);
1241                 trace_print_seq(m, &iter->seq);
1242         }
1243
1244         return 0;
1245 }
1246
1247 static struct seq_operations tracer_seq_ops = {
1248         .start          = s_start,
1249         .next           = s_next,
1250         .stop           = s_stop,
1251         .show           = s_show,
1252 };
1253
1254 static struct trace_iterator notrace *
1255 __tracing_open(struct inode *inode, struct file *file, int *ret)
1256 {
1257         struct trace_iterator *iter;
1258
1259         if (tracing_disabled) {
1260                 *ret = -ENODEV;
1261                 return NULL;
1262         }
1263
1264         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1265         if (!iter) {
1266                 *ret = -ENOMEM;
1267                 goto out;
1268         }
1269
1270         mutex_lock(&trace_types_lock);
1271         if (current_trace && current_trace->print_max)
1272                 iter->tr = &max_tr;
1273         else
1274                 iter->tr = inode->i_private;
1275         iter->trace = current_trace;
1276         iter->pos = -1;
1277
1278         /* TODO stop tracer */
1279         *ret = seq_open(file, &tracer_seq_ops);
1280         if (!*ret) {
1281                 struct seq_file *m = file->private_data;
1282                 m->private = iter;
1283
1284                 /* stop the trace while dumping */
1285                 if (iter->tr->ctrl)
1286                         tracer_enabled = 0;
1287
1288                 if (iter->trace && iter->trace->open)
1289                         iter->trace->open(iter);
1290         } else {
1291                 kfree(iter);
1292                 iter = NULL;
1293         }
1294         mutex_unlock(&trace_types_lock);
1295
1296  out:
1297         return iter;
1298 }
1299
1300 int tracing_open_generic(struct inode *inode, struct file *filp)
1301 {
1302         if (tracing_disabled)
1303                 return -ENODEV;
1304
1305         filp->private_data = inode->i_private;
1306         return 0;
1307 }
1308
1309 int tracing_release(struct inode *inode, struct file *file)
1310 {
1311         struct seq_file *m = (struct seq_file *)file->private_data;
1312         struct trace_iterator *iter = m->private;
1313
1314         mutex_lock(&trace_types_lock);
1315         if (iter->trace && iter->trace->close)
1316                 iter->trace->close(iter);
1317
1318         /* reenable tracing if it was previously enabled */
1319         if (iter->tr->ctrl)
1320                 tracer_enabled = 1;
1321         mutex_unlock(&trace_types_lock);
1322
1323         seq_release(inode, file);
1324         kfree(iter);
1325         return 0;
1326 }
1327
1328 static int tracing_open(struct inode *inode, struct file *file)
1329 {
1330         int ret;
1331
1332         __tracing_open(inode, file, &ret);
1333
1334         return ret;
1335 }
1336
1337 static int tracing_lt_open(struct inode *inode, struct file *file)
1338 {
1339         struct trace_iterator *iter;
1340         int ret;
1341
1342         iter = __tracing_open(inode, file, &ret);
1343
1344         if (!ret)
1345                 iter->iter_flags |= TRACE_FILE_LAT_FMT;
1346
1347         return ret;
1348 }
1349
1350
1351 static notrace void *
1352 t_next(struct seq_file *m, void *v, loff_t *pos)
1353 {
1354         struct tracer *t = m->private;
1355
1356         (*pos)++;
1357
1358         if (t)
1359                 t = t->next;
1360
1361         m->private = t;
1362
1363         return t;
1364 }
1365
1366 static void *t_start(struct seq_file *m, loff_t *pos)
1367 {
1368         struct tracer *t = m->private;
1369         loff_t l = 0;
1370
1371         mutex_lock(&trace_types_lock);
1372         for (; t && l < *pos; t = t_next(m, t, &l))
1373                 ;
1374
1375         return t;
1376 }
1377
1378 static void t_stop(struct seq_file *m, void *p)
1379 {
1380         mutex_unlock(&trace_types_lock);
1381 }
1382
1383 static int t_show(struct seq_file *m, void *v)
1384 {
1385         struct tracer *t = v;
1386
1387         if (!t)
1388                 return 0;
1389
1390         seq_printf(m, "%s", t->name);
1391         if (t->next)
1392                 seq_putc(m, ' ');
1393         else
1394                 seq_putc(m, '\n');
1395
1396         return 0;
1397 }
1398
1399 static struct seq_operations show_traces_seq_ops = {
1400         .start          = t_start,
1401         .next           = t_next,
1402         .stop           = t_stop,
1403         .show           = t_show,
1404 };
1405
1406 static int show_traces_open(struct inode *inode, struct file *file)
1407 {
1408         int ret;
1409
1410         if (tracing_disabled)
1411                 return -ENODEV;
1412
1413         ret = seq_open(file, &show_traces_seq_ops);
1414         if (!ret) {
1415                 struct seq_file *m = file->private_data;
1416                 m->private = trace_types;
1417         }
1418
1419         return ret;
1420 }
1421
1422 static struct file_operations tracing_fops = {
1423         .open           = tracing_open,
1424         .read           = seq_read,
1425         .llseek         = seq_lseek,
1426         .release        = tracing_release,
1427 };
1428
1429 static struct file_operations tracing_lt_fops = {
1430         .open           = tracing_lt_open,
1431         .read           = seq_read,
1432         .llseek         = seq_lseek,
1433         .release        = tracing_release,
1434 };
1435
1436 static struct file_operations show_traces_fops = {
1437         .open = show_traces_open,
1438         .read = seq_read,
1439         .release = seq_release,
1440 };
1441
1442 static ssize_t
1443 tracing_iter_ctrl_read(struct file *filp, char __user *ubuf,
1444                        size_t cnt, loff_t *ppos)
1445 {
1446         char *buf;
1447         int r = 0;
1448         int len = 0;
1449         int i;
1450
1451         /* calulate max size */
1452         for (i = 0; trace_options[i]; i++) {
1453                 len += strlen(trace_options[i]);
1454                 len += 3; /* "no" and space */
1455         }
1456
1457         /* +2 for \n and \0 */
1458         buf = kmalloc(len + 2, GFP_KERNEL);
1459         if (!buf)
1460                 return -ENOMEM;
1461
1462         for (i = 0; trace_options[i]; i++) {
1463                 if (trace_flags & (1 << i))
1464                         r += sprintf(buf + r, "%s ", trace_options[i]);
1465                 else
1466                         r += sprintf(buf + r, "no%s ", trace_options[i]);
1467         }
1468
1469         r += sprintf(buf + r, "\n");
1470         WARN_ON(r >= len + 2);
1471
1472         r = simple_read_from_buffer(ubuf, cnt, ppos,
1473                                     buf, r);
1474
1475         kfree(buf);
1476
1477         return r;
1478 }
1479
1480 static ssize_t
1481 tracing_iter_ctrl_write(struct file *filp, const char __user *ubuf,
1482                         size_t cnt, loff_t *ppos)
1483 {
1484         char buf[64];
1485         char *cmp = buf;
1486         int neg = 0;
1487         int i;
1488
1489         if (cnt > 63)
1490                 cnt = 63;
1491
1492         if (copy_from_user(&buf, ubuf, cnt))
1493                 return -EFAULT;
1494
1495         buf[cnt] = 0;
1496
1497         if (strncmp(buf, "no", 2) == 0) {
1498                 neg = 1;
1499                 cmp += 2;
1500         }
1501
1502         for (i = 0; trace_options[i]; i++) {
1503                 int len = strlen(trace_options[i]);
1504
1505                 if (strncmp(cmp, trace_options[i], len) == 0) {
1506                         if (neg)
1507                                 trace_flags &= ~(1 << i);
1508                         else
1509                                 trace_flags |= (1 << i);
1510                         break;
1511                 }
1512         }
1513
1514         filp->f_pos += cnt;
1515
1516         return cnt;
1517 }
1518
1519 static struct file_operations tracing_iter_fops = {
1520         .open = tracing_open_generic,
1521         .read = tracing_iter_ctrl_read,
1522         .write = tracing_iter_ctrl_write,
1523 };
1524
1525 static const char readme_msg[] =
1526         "tracing mini-HOWTO:\n\n"
1527         "# mkdir /debug\n"
1528         "# mount -t debugfs nodev /debug\n\n"
1529         "# cat /debug/tracing/available_tracers\n"
1530         "wakeup preemptirqsoff preemptoff irqsoff ftrace sched_switch none\n\n"
1531         "# cat /debug/tracing/current_tracer\n"
1532         "none\n"
1533         "# echo sched_switch > /debug/tracing/current_tracer\n"
1534         "# cat /debug/tracing/current_tracer\n"
1535         "sched_switch\n"
1536         "# cat /debug/tracing/iter_ctrl\n"
1537         "noprint-parent nosym-offset nosym-addr noverbose\n"
1538         "# echo print-parent > /debug/tracing/iter_ctrl\n"
1539         "# echo 1 > /debug/tracing/tracing_enabled\n"
1540         "# cat /debug/tracing/trace > /tmp/trace.txt\n"
1541         "echo 0 > /debug/tracing/tracing_enabled\n"
1542 ;
1543
1544 static ssize_t
1545 tracing_readme_read(struct file *filp, char __user *ubuf,
1546                        size_t cnt, loff_t *ppos)
1547 {
1548         return simple_read_from_buffer(ubuf, cnt, ppos,
1549                                         readme_msg, strlen(readme_msg));
1550 }
1551
1552 static struct file_operations tracing_readme_fops = {
1553         .open = tracing_open_generic,
1554         .read = tracing_readme_read,
1555 };
1556
1557
1558 static ssize_t
1559 tracing_ctrl_read(struct file *filp, char __user *ubuf,
1560                   size_t cnt, loff_t *ppos)
1561 {
1562         struct trace_array *tr = filp->private_data;
1563         char buf[64];
1564         int r;
1565
1566         r = sprintf(buf, "%ld\n", tr->ctrl);
1567         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1568 }
1569
1570 static ssize_t
1571 tracing_ctrl_write(struct file *filp, const char __user *ubuf,
1572                    size_t cnt, loff_t *ppos)
1573 {
1574         struct trace_array *tr = filp->private_data;
1575         long val;
1576         char buf[64];
1577
1578         if (cnt > 63)
1579                 cnt = 63;
1580
1581         if (copy_from_user(&buf, ubuf, cnt))
1582                 return -EFAULT;
1583
1584         buf[cnt] = 0;
1585
1586         val = simple_strtoul(buf, NULL, 10);
1587
1588         val = !!val;
1589
1590         mutex_lock(&trace_types_lock);
1591         if (tr->ctrl ^ val) {
1592                 if (val)
1593                         tracer_enabled = 1;
1594                 else
1595                         tracer_enabled = 0;
1596
1597                 tr->ctrl = val;
1598
1599                 if (current_trace && current_trace->ctrl_update)
1600                         current_trace->ctrl_update(tr);
1601         }
1602         mutex_unlock(&trace_types_lock);
1603
1604         filp->f_pos += cnt;
1605
1606         return cnt;
1607 }
1608
1609 static ssize_t
1610 tracing_set_trace_read(struct file *filp, char __user *ubuf,
1611                        size_t cnt, loff_t *ppos)
1612 {
1613         char buf[max_tracer_type_len+2];
1614         int r;
1615
1616         mutex_lock(&trace_types_lock);
1617         if (current_trace)
1618                 r = sprintf(buf, "%s\n", current_trace->name);
1619         else
1620                 r = sprintf(buf, "\n");
1621         mutex_unlock(&trace_types_lock);
1622
1623         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1624 }
1625
1626 static ssize_t
1627 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
1628                         size_t cnt, loff_t *ppos)
1629 {
1630         struct trace_array *tr = &global_trace;
1631         struct tracer *t;
1632         char buf[max_tracer_type_len+1];
1633         int i;
1634
1635         if (cnt > max_tracer_type_len)
1636                 cnt = max_tracer_type_len;
1637
1638         if (copy_from_user(&buf, ubuf, cnt))
1639                 return -EFAULT;
1640
1641         buf[cnt] = 0;
1642
1643         /* strip ending whitespace. */
1644         for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
1645                 buf[i] = 0;
1646
1647         mutex_lock(&trace_types_lock);
1648         for (t = trace_types; t; t = t->next) {
1649                 if (strcmp(t->name, buf) == 0)
1650                         break;
1651         }
1652         if (!t || t == current_trace)
1653                 goto out;
1654
1655         if (current_trace && current_trace->reset)
1656                 current_trace->reset(tr);
1657
1658         current_trace = t;
1659         if (t->init)
1660                 t->init(tr);
1661
1662  out:
1663         mutex_unlock(&trace_types_lock);
1664
1665         filp->f_pos += cnt;
1666
1667         return cnt;
1668 }
1669
1670 static ssize_t
1671 tracing_max_lat_read(struct file *filp, char __user *ubuf,
1672                      size_t cnt, loff_t *ppos)
1673 {
1674         unsigned long *ptr = filp->private_data;
1675         char buf[64];
1676         int r;
1677
1678         r = snprintf(buf, 64, "%ld\n",
1679                      *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
1680         if (r > 64)
1681                 r = 64;
1682         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1683 }
1684
1685 static ssize_t
1686 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
1687                       size_t cnt, loff_t *ppos)
1688 {
1689         long *ptr = filp->private_data;
1690         long val;
1691         char buf[64];
1692
1693         if (cnt > 63)
1694                 cnt = 63;
1695
1696         if (copy_from_user(&buf, ubuf, cnt))
1697                 return -EFAULT;
1698
1699         buf[cnt] = 0;
1700
1701         val = simple_strtoul(buf, NULL, 10);
1702
1703         *ptr = val * 1000;
1704
1705         return cnt;
1706 }
1707
1708 static atomic_t tracing_reader;
1709
1710 static int tracing_open_pipe(struct inode *inode, struct file *filp)
1711 {
1712         struct trace_iterator *iter;
1713
1714         if (tracing_disabled)
1715                 return -ENODEV;
1716
1717         /* We only allow for reader of the pipe */
1718         if (atomic_inc_return(&tracing_reader) != 1) {
1719                 atomic_dec(&tracing_reader);
1720                 return -EBUSY;
1721         }
1722
1723         /* create a buffer to store the information to pass to userspace */
1724         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1725         if (!iter)
1726                 return -ENOMEM;
1727
1728         iter->tr = &global_trace;
1729
1730         filp->private_data = iter;
1731
1732         return 0;
1733 }
1734
1735 static int tracing_release_pipe(struct inode *inode, struct file *file)
1736 {
1737         struct trace_iterator *iter = file->private_data;
1738
1739         kfree(iter);
1740         atomic_dec(&tracing_reader);
1741
1742         return 0;
1743 }
1744
1745 /*
1746  * Consumer reader.
1747  */
1748 static ssize_t
1749 tracing_read_pipe(struct file *filp, char __user *ubuf,
1750                   size_t cnt, loff_t *ppos)
1751 {
1752         struct trace_iterator *iter = filp->private_data;
1753         struct trace_array_cpu *data;
1754         static cpumask_t mask;
1755         struct trace_entry *entry;
1756         static int start;
1757         unsigned long flags;
1758         int read = 0;
1759         int cpu;
1760         int len;
1761         int ret;
1762
1763         /* return any leftover data */
1764         if (iter->seq.len > start) {
1765                 len = iter->seq.len - start;
1766                 if (cnt > len)
1767                         cnt = len;
1768                 ret = copy_to_user(ubuf, iter->seq.buffer + start, cnt);
1769                 if (ret)
1770                         cnt = -EFAULT;
1771
1772                 start += len;
1773
1774                 return cnt;
1775         }
1776
1777         trace_seq_reset(&iter->seq);
1778         start = 0;
1779
1780         while (trace_empty(iter)) {
1781                 /*
1782                  * This is a make-shift waitqueue. The reason we don't use
1783                  * an actual wait queue is because:
1784                  *  1) we only ever have one waiter
1785                  *  2) the tracing, traces all functions, we don't want
1786                  *     the overhead of calling wake_up and friends
1787                  *     (and tracing them too)
1788                  *     Anyway, this is really very primitive wakeup.
1789                  */
1790                 set_current_state(TASK_INTERRUPTIBLE);
1791                 iter->tr->waiter = current;
1792
1793                 /* sleep for one second, and try again. */
1794                 schedule_timeout(HZ);
1795
1796                 iter->tr->waiter = NULL;
1797
1798                 if (signal_pending(current))
1799                         return -EINTR;
1800
1801                 /*
1802                  * We block until we read something and tracing is disabled.
1803                  * We still block if tracing is disabled, but we have never
1804                  * read anything. This allows a user to cat this file, and
1805                  * then enable tracing. But after we have read something,
1806                  * we give an EOF when tracing is again disabled.
1807                  *
1808                  * iter->pos will be 0 if we haven't read anything.
1809                  */
1810                 if (!tracer_enabled && iter->pos)
1811                         break;
1812
1813                 continue;
1814         }
1815
1816         /* stop when tracing is finished */
1817         if (trace_empty(iter))
1818                 return 0;
1819
1820         if (cnt >= PAGE_SIZE)
1821                 cnt = PAGE_SIZE - 1;
1822
1823         memset(iter, 0, sizeof(*iter));
1824         iter->tr = &global_trace;
1825         iter->pos = -1;
1826
1827         /*
1828          * We need to stop all tracing on all CPUS to read the
1829          * the next buffer. This is a bit expensive, but is
1830          * not done often. We fill all what we can read,
1831          * and then release the locks again.
1832          */
1833
1834         cpus_clear(mask);
1835         local_irq_save(flags);
1836         for_each_possible_cpu(cpu) {
1837                 data = iter->tr->data[cpu];
1838
1839                 if (!head_page(data) || !data->trace_idx)
1840                         continue;
1841
1842                 atomic_inc(&data->disabled);
1843                 spin_lock(&data->lock);
1844                 cpu_set(cpu, mask);
1845         }
1846
1847         while ((entry = find_next_entry(iter, &cpu))) {
1848
1849                 if (!entry)
1850                         break;
1851
1852                 iter->ent = entry;
1853                 iter->cpu = cpu;
1854
1855                 ret = print_trace_fmt(iter);
1856                 if (!ret)
1857                         break;
1858
1859                 trace_consume(iter);
1860
1861                 if (iter->seq.len >= cnt)
1862                         break;
1863
1864         }
1865
1866         for_each_cpu_mask(cpu, mask) {
1867                 data = iter->tr->data[cpu];
1868                 spin_unlock(&data->lock);
1869                 atomic_dec(&data->disabled);
1870         }
1871         local_irq_restore(flags);
1872
1873         /* Now copy what we have to the user */
1874         read = iter->seq.len;
1875         if (read > cnt)
1876                 read = cnt;
1877
1878         ret = copy_to_user(ubuf, iter->seq.buffer, read);
1879
1880         if (read < iter->seq.len)
1881                 start = read;
1882         else
1883                 trace_seq_reset(&iter->seq);
1884
1885         if (ret)
1886                 read = -EFAULT;
1887
1888         return read;
1889 }
1890
1891 static struct file_operations tracing_max_lat_fops = {
1892         .open           = tracing_open_generic,
1893         .read           = tracing_max_lat_read,
1894         .write          = tracing_max_lat_write,
1895 };
1896
1897 static struct file_operations tracing_ctrl_fops = {
1898         .open           = tracing_open_generic,
1899         .read           = tracing_ctrl_read,
1900         .write          = tracing_ctrl_write,
1901 };
1902
1903 static struct file_operations set_tracer_fops = {
1904         .open           = tracing_open_generic,
1905         .read           = tracing_set_trace_read,
1906         .write          = tracing_set_trace_write,
1907 };
1908
1909 static struct file_operations tracing_pipe_fops = {
1910         .open           = tracing_open_pipe,
1911         .read           = tracing_read_pipe,
1912         .release        = tracing_release_pipe,
1913 };
1914
1915 #ifdef CONFIG_DYNAMIC_FTRACE
1916
1917 static ssize_t
1918 tracing_read_long(struct file *filp, char __user *ubuf,
1919                   size_t cnt, loff_t *ppos)
1920 {
1921         unsigned long *p = filp->private_data;
1922         char buf[64];
1923         int r;
1924
1925         r = sprintf(buf, "%ld\n", *p);
1926
1927         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1928 }
1929
1930 static struct file_operations tracing_read_long_fops = {
1931         .open           = tracing_open_generic,
1932         .read           = tracing_read_long,
1933 };
1934 #endif
1935
1936 static struct dentry *d_tracer;
1937
1938 struct dentry *tracing_init_dentry(void)
1939 {
1940         static int once;
1941
1942         if (d_tracer)
1943                 return d_tracer;
1944
1945         d_tracer = debugfs_create_dir("tracing", NULL);
1946
1947         if (!d_tracer && !once) {
1948                 once = 1;
1949                 pr_warning("Could not create debugfs directory 'tracing'\n");
1950                 return NULL;
1951         }
1952
1953         return d_tracer;
1954 }
1955
1956 #ifdef CONFIG_FTRACE_SELFTEST
1957 /* Let selftest have access to static functions in this file */
1958 #include "trace_selftest.c"
1959 #endif
1960
1961 static __init void tracer_init_debugfs(void)
1962 {
1963         struct dentry *d_tracer;
1964         struct dentry *entry;
1965
1966         d_tracer = tracing_init_dentry();
1967
1968         entry = debugfs_create_file("tracing_enabled", 0644, d_tracer,
1969                                     &global_trace, &tracing_ctrl_fops);
1970         if (!entry)
1971                 pr_warning("Could not create debugfs 'tracing_enabled' entry\n");
1972
1973         entry = debugfs_create_file("iter_ctrl", 0644, d_tracer,
1974                                     NULL, &tracing_iter_fops);
1975         if (!entry)
1976                 pr_warning("Could not create debugfs 'iter_ctrl' entry\n");
1977
1978         entry = debugfs_create_file("latency_trace", 0444, d_tracer,
1979                                     &global_trace, &tracing_lt_fops);
1980         if (!entry)
1981                 pr_warning("Could not create debugfs 'latency_trace' entry\n");
1982
1983         entry = debugfs_create_file("trace", 0444, d_tracer,
1984                                     &global_trace, &tracing_fops);
1985         if (!entry)
1986                 pr_warning("Could not create debugfs 'trace' entry\n");
1987
1988         entry = debugfs_create_file("available_tracers", 0444, d_tracer,
1989                                     &global_trace, &show_traces_fops);
1990         if (!entry)
1991                 pr_warning("Could not create debugfs 'trace' entry\n");
1992
1993         entry = debugfs_create_file("current_tracer", 0444, d_tracer,
1994                                     &global_trace, &set_tracer_fops);
1995         if (!entry)
1996                 pr_warning("Could not create debugfs 'trace' entry\n");
1997
1998         entry = debugfs_create_file("tracing_max_latency", 0644, d_tracer,
1999                                     &tracing_max_latency,
2000                                     &tracing_max_lat_fops);
2001         if (!entry)
2002                 pr_warning("Could not create debugfs "
2003                            "'tracing_max_latency' entry\n");
2004
2005         entry = debugfs_create_file("tracing_thresh", 0644, d_tracer,
2006                                     &tracing_thresh, &tracing_max_lat_fops);
2007         if (!entry)
2008                 pr_warning("Could not create debugfs "
2009                            "'tracing_threash' entry\n");
2010         entry = debugfs_create_file("README", 0644, d_tracer,
2011                                     NULL, &tracing_readme_fops);
2012         if (!entry)
2013                 pr_warning("Could not create debugfs 'README' entry\n");
2014
2015         entry = debugfs_create_file("trace_pipe", 0644, d_tracer,
2016                                     NULL, &tracing_pipe_fops);
2017         if (!entry)
2018                 pr_warning("Could not create debugfs "
2019                            "'tracing_threash' entry\n");
2020
2021 #ifdef CONFIG_DYNAMIC_FTRACE
2022         entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer,
2023                                     &ftrace_update_tot_cnt,
2024                                     &tracing_read_long_fops);
2025         if (!entry)
2026                 pr_warning("Could not create debugfs "
2027                            "'dyn_ftrace_total_info' entry\n");
2028 #endif
2029 }
2030
2031 /* dummy trace to disable tracing */
2032 static struct tracer no_tracer __read_mostly =
2033 {
2034         .name           = "none",
2035 };
2036
2037 static int trace_alloc_page(void)
2038 {
2039         struct trace_array_cpu *data;
2040         struct page *page, *tmp;
2041         LIST_HEAD(pages);
2042         void *array;
2043         int i;
2044
2045         /* first allocate a page for each CPU */
2046         for_each_possible_cpu(i) {
2047                 array = (void *)__get_free_page(GFP_KERNEL);
2048                 if (array == NULL) {
2049                         printk(KERN_ERR "tracer: failed to allocate page"
2050                                "for trace buffer!\n");
2051                         goto free_pages;
2052                 }
2053
2054                 page = virt_to_page(array);
2055                 list_add(&page->lru, &pages);
2056
2057 /* Only allocate if we are actually using the max trace */
2058 #ifdef CONFIG_TRACER_MAX_TRACE
2059                 array = (void *)__get_free_page(GFP_KERNEL);
2060                 if (array == NULL) {
2061                         printk(KERN_ERR "tracer: failed to allocate page"
2062                                "for trace buffer!\n");
2063                         goto free_pages;
2064                 }
2065                 page = virt_to_page(array);
2066                 list_add(&page->lru, &pages);
2067 #endif
2068         }
2069
2070         /* Now that we successfully allocate a page per CPU, add them */
2071         for_each_possible_cpu(i) {
2072                 data = global_trace.data[i];
2073                 spin_lock_init(&data->lock);
2074                 lockdep_set_class(&data->lock, &data->lock_key);
2075                 page = list_entry(pages.next, struct page, lru);
2076                 list_del_init(&page->lru);
2077                 list_add_tail(&page->lru, &data->trace_pages);
2078                 ClearPageLRU(page);
2079
2080 #ifdef CONFIG_TRACER_MAX_TRACE
2081                 data = max_tr.data[i];
2082                 spin_lock_init(&data->lock);
2083                 lockdep_set_class(&data->lock, &data->lock_key);
2084                 page = list_entry(pages.next, struct page, lru);
2085                 list_del_init(&page->lru);
2086                 list_add_tail(&page->lru, &data->trace_pages);
2087                 SetPageLRU(page);
2088 #endif
2089         }
2090         global_trace.entries += ENTRIES_PER_PAGE;
2091
2092         return 0;
2093
2094  free_pages:
2095         list_for_each_entry_safe(page, tmp, &pages, lru) {
2096                 list_del_init(&page->lru);
2097                 __free_page(page);
2098         }
2099         return -ENOMEM;
2100 }
2101
2102 __init static int tracer_alloc_buffers(void)
2103 {
2104         struct trace_array_cpu *data;
2105         void *array;
2106         struct page *page;
2107         int pages = 0;
2108         int ret = -ENOMEM;
2109         int i;
2110
2111         /* Allocate the first page for all buffers */
2112         for_each_possible_cpu(i) {
2113                 data = global_trace.data[i] = &per_cpu(global_trace_cpu, i);
2114                 max_tr.data[i] = &per_cpu(max_data, i);
2115
2116                 array = (void *)__get_free_page(GFP_KERNEL);
2117                 if (array == NULL) {
2118                         printk(KERN_ERR "tracer: failed to allocate page"
2119                                "for trace buffer!\n");
2120                         goto free_buffers;
2121                 }
2122
2123                 /* set the array to the list */
2124                 INIT_LIST_HEAD(&data->trace_pages);
2125                 page = virt_to_page(array);
2126                 list_add(&page->lru, &data->trace_pages);
2127                 /* use the LRU flag to differentiate the two buffers */
2128                 ClearPageLRU(page);
2129
2130 /* Only allocate if we are actually using the max trace */
2131 #ifdef CONFIG_TRACER_MAX_TRACE
2132                 array = (void *)__get_free_page(GFP_KERNEL);
2133                 if (array == NULL) {
2134                         printk(KERN_ERR "tracer: failed to allocate page"
2135                                "for trace buffer!\n");
2136                         goto free_buffers;
2137                 }
2138
2139                 INIT_LIST_HEAD(&max_tr.data[i]->trace_pages);
2140                 page = virt_to_page(array);
2141                 list_add(&page->lru, &max_tr.data[i]->trace_pages);
2142                 SetPageLRU(page);
2143 #endif
2144         }
2145
2146         /*
2147          * Since we allocate by orders of pages, we may be able to
2148          * round up a bit.
2149          */
2150         global_trace.entries = ENTRIES_PER_PAGE;
2151         pages++;
2152
2153         while (global_trace.entries < trace_nr_entries) {
2154                 if (trace_alloc_page())
2155                         break;
2156                 pages++;
2157         }
2158         max_tr.entries = global_trace.entries;
2159
2160         pr_info("tracer: %d pages allocated for %ld",
2161                 pages, trace_nr_entries);
2162         pr_info(" entries of %ld bytes\n", (long)TRACE_ENTRY_SIZE);
2163         pr_info("   actual entries %ld\n", global_trace.entries);
2164
2165         tracer_init_debugfs();
2166
2167         trace_init_cmdlines();
2168
2169         register_tracer(&no_tracer);
2170         current_trace = &no_tracer;
2171
2172         /* All seems OK, enable tracing */
2173         tracing_disabled = 0;
2174
2175         return 0;
2176
2177  free_buffers:
2178         for (i-- ; i >= 0; i--) {
2179                 struct page *page, *tmp;
2180                 struct trace_array_cpu *data = global_trace.data[i];
2181
2182                 if (data) {
2183                         list_for_each_entry_safe(page, tmp,
2184                                                  &data->trace_pages, lru) {
2185                                 list_del_init(&page->lru);
2186                                 __free_page(page);
2187                         }
2188                 }
2189
2190 #ifdef CONFIG_TRACER_MAX_TRACE
2191                 data = max_tr.data[i];
2192                 if (data) {
2193                         list_for_each_entry_safe(page, tmp,
2194                                                  &data->trace_pages, lru) {
2195                                 list_del_init(&page->lru);
2196                                 __free_page(page);
2197                         }
2198                 }
2199 #endif
2200         }
2201         return ret;
2202 }
2203 fs_initcall(tracer_alloc_buffers);