ftrace: return EOF in trace_pipe on change of tracer
[linux-2.6] / kernel / trace / trace.c
1 /*
2  * ring buffer based function tracer
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally taken from the RT patch by:
8  *    Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code from the latency_tracer, that is:
11  *  Copyright (C) 2004-2006 Ingo Molnar
12  *  Copyright (C) 2004 William Lee Irwin III
13  */
14 #include <linux/utsrelease.h>
15 #include <linux/kallsyms.h>
16 #include <linux/seq_file.h>
17 #include <linux/debugfs.h>
18 #include <linux/pagemap.h>
19 #include <linux/hardirq.h>
20 #include <linux/linkage.h>
21 #include <linux/uaccess.h>
22 #include <linux/ftrace.h>
23 #include <linux/module.h>
24 #include <linux/percpu.h>
25 #include <linux/ctype.h>
26 #include <linux/init.h>
27 #include <linux/poll.h>
28 #include <linux/gfp.h>
29 #include <linux/fs.h>
30
31 #include <linux/stacktrace.h>
32
33 #include "trace.h"
34
35 unsigned long __read_mostly     tracing_max_latency = (cycle_t)ULONG_MAX;
36 unsigned long __read_mostly     tracing_thresh;
37
38 static int tracing_disabled = 1;
39
40 long
41 ns2usecs(cycle_t nsec)
42 {
43         nsec += 500;
44         do_div(nsec, 1000);
45         return nsec;
46 }
47
48 cycle_t ftrace_now(int cpu)
49 {
50         return cpu_clock(cpu);
51 }
52
53 static struct trace_array       global_trace;
54
55 static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu);
56
57 static struct trace_array       max_tr;
58
59 static DEFINE_PER_CPU(struct trace_array_cpu, max_data);
60
61 static int                      tracer_enabled = 1;
62 static unsigned long            trace_nr_entries = 65536UL;
63
64 static struct tracer            *trace_types __read_mostly;
65 static struct tracer            *current_trace __read_mostly;
66 static int                      max_tracer_type_len;
67
68 static DEFINE_MUTEX(trace_types_lock);
69 static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
70
71 unsigned long trace_flags = TRACE_ITER_PRINT_PARENT;
72
73 void trace_wake_up(void)
74 {
75         /*
76          * The runqueue_is_locked() can fail, but this is the best we
77          * have for now:
78          */
79         if (!(trace_flags & TRACE_ITER_BLOCK) && !runqueue_is_locked())
80                 wake_up(&trace_wait);
81 }
82
83 #define ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(struct trace_entry))
84
85 static int __init set_nr_entries(char *str)
86 {
87         if (!str)
88                 return 0;
89         trace_nr_entries = simple_strtoul(str, &str, 0);
90         return 1;
91 }
92 __setup("trace_entries=", set_nr_entries);
93
94 unsigned long nsecs_to_usecs(unsigned long nsecs)
95 {
96         return nsecs / 1000;
97 }
98
99 enum trace_flag_type {
100         TRACE_FLAG_IRQS_OFF             = 0x01,
101         TRACE_FLAG_NEED_RESCHED         = 0x02,
102         TRACE_FLAG_HARDIRQ              = 0x04,
103         TRACE_FLAG_SOFTIRQ              = 0x08,
104 };
105
106 #define TRACE_ITER_SYM_MASK \
107         (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
108
109 /* These must match the bit postions above */
110 static const char *trace_options[] = {
111         "print-parent",
112         "sym-offset",
113         "sym-addr",
114         "verbose",
115         "raw",
116         "hex",
117         "bin",
118         "block",
119         "stacktrace",
120         "sched-tree",
121         NULL
122 };
123
124 static raw_spinlock_t ftrace_max_lock =
125         (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
126
127 /*
128  * Copy the new maximum trace into the separate maximum-trace
129  * structure. (this way the maximum trace is permanently saved,
130  * for later retrieval via /debugfs/tracing/latency_trace)
131  */
132 static void
133 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
134 {
135         struct trace_array_cpu *data = tr->data[cpu];
136
137         max_tr.cpu = cpu;
138         max_tr.time_start = data->preempt_timestamp;
139
140         data = max_tr.data[cpu];
141         data->saved_latency = tracing_max_latency;
142
143         memcpy(data->comm, tsk->comm, TASK_COMM_LEN);
144         data->pid = tsk->pid;
145         data->uid = tsk->uid;
146         data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
147         data->policy = tsk->policy;
148         data->rt_priority = tsk->rt_priority;
149
150         /* record this tasks comm */
151         tracing_record_cmdline(current);
152 }
153
154 void check_pages(struct trace_array_cpu *data)
155 {
156         struct page *page, *tmp;
157
158         BUG_ON(data->trace_pages.next->prev != &data->trace_pages);
159         BUG_ON(data->trace_pages.prev->next != &data->trace_pages);
160
161         list_for_each_entry_safe(page, tmp, &data->trace_pages, lru) {
162                 BUG_ON(page->lru.next->prev != &page->lru);
163                 BUG_ON(page->lru.prev->next != &page->lru);
164         }
165 }
166
167 void *head_page(struct trace_array_cpu *data)
168 {
169         struct page *page;
170
171         check_pages(data);
172         if (list_empty(&data->trace_pages))
173                 return NULL;
174
175         page = list_entry(data->trace_pages.next, struct page, lru);
176         BUG_ON(&page->lru == &data->trace_pages);
177
178         return page_address(page);
179 }
180
181 int
182 trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
183 {
184         int len = (PAGE_SIZE - 1) - s->len;
185         va_list ap;
186         int ret;
187
188         if (!len)
189                 return 0;
190
191         va_start(ap, fmt);
192         ret = vsnprintf(s->buffer + s->len, len, fmt, ap);
193         va_end(ap);
194
195         /* If we can't write it all, don't bother writing anything */
196         if (ret >= len)
197                 return 0;
198
199         s->len += ret;
200
201         return len;
202 }
203
204 static int
205 trace_seq_puts(struct trace_seq *s, const char *str)
206 {
207         int len = strlen(str);
208
209         if (len > ((PAGE_SIZE - 1) - s->len))
210                 return 0;
211
212         memcpy(s->buffer + s->len, str, len);
213         s->len += len;
214
215         return len;
216 }
217
218 static int
219 trace_seq_putc(struct trace_seq *s, unsigned char c)
220 {
221         if (s->len >= (PAGE_SIZE - 1))
222                 return 0;
223
224         s->buffer[s->len++] = c;
225
226         return 1;
227 }
228
229 static int
230 trace_seq_putmem(struct trace_seq *s, void *mem, size_t len)
231 {
232         if (len > ((PAGE_SIZE - 1) - s->len))
233                 return 0;
234
235         memcpy(s->buffer + s->len, mem, len);
236         s->len += len;
237
238         return len;
239 }
240
241 #define HEX_CHARS 17
242
243 static int
244 trace_seq_putmem_hex(struct trace_seq *s, void *mem, size_t len)
245 {
246         unsigned char hex[HEX_CHARS];
247         unsigned char *data;
248         unsigned char byte;
249         int i, j;
250
251         BUG_ON(len >= HEX_CHARS);
252
253         data = mem;
254
255 #ifdef __BIG_ENDIAN
256         for (i = 0, j = 0; i < len; i++) {
257 #else
258         for (i = len-1, j = 0; i >= 0; i--) {
259 #endif
260                 byte = data[i];
261
262                 hex[j]   = byte & 0x0f;
263                 if (hex[j] >= 10)
264                         hex[j] += 'a' - 10;
265                 else
266                         hex[j] += '0';
267                 j++;
268
269                 hex[j] = byte >> 4;
270                 if (hex[j] >= 10)
271                         hex[j] += 'a' - 10;
272                 else
273                         hex[j] += '0';
274                 j++;
275         }
276         hex[j] = ' ';
277         j++;
278
279         return trace_seq_putmem(s, hex, j);
280 }
281
282 static void
283 trace_seq_reset(struct trace_seq *s)
284 {
285         s->len = 0;
286 }
287
288 static void
289 trace_print_seq(struct seq_file *m, struct trace_seq *s)
290 {
291         int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len;
292
293         s->buffer[len] = 0;
294         seq_puts(m, s->buffer);
295
296         trace_seq_reset(s);
297 }
298
299 static void
300 flip_trace(struct trace_array_cpu *tr1, struct trace_array_cpu *tr2)
301 {
302         struct list_head flip_pages;
303
304         INIT_LIST_HEAD(&flip_pages);
305
306         memcpy(&tr1->trace_head_idx, &tr2->trace_head_idx,
307                 sizeof(struct trace_array_cpu) -
308                 offsetof(struct trace_array_cpu, trace_head_idx));
309
310         check_pages(tr1);
311         check_pages(tr2);
312         list_splice_init(&tr1->trace_pages, &flip_pages);
313         list_splice_init(&tr2->trace_pages, &tr1->trace_pages);
314         list_splice_init(&flip_pages, &tr2->trace_pages);
315         BUG_ON(!list_empty(&flip_pages));
316         check_pages(tr1);
317         check_pages(tr2);
318 }
319
320 void
321 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
322 {
323         struct trace_array_cpu *data;
324         int i;
325
326         WARN_ON_ONCE(!irqs_disabled());
327         __raw_spin_lock(&ftrace_max_lock);
328         /* clear out all the previous traces */
329         for_each_possible_cpu(i) {
330                 data = tr->data[i];
331                 flip_trace(max_tr.data[i], data);
332                 tracing_reset(data);
333         }
334
335         __update_max_tr(tr, tsk, cpu);
336         __raw_spin_unlock(&ftrace_max_lock);
337 }
338
339 /**
340  * update_max_tr_single - only copy one trace over, and reset the rest
341  * @tr - tracer
342  * @tsk - task with the latency
343  * @cpu - the cpu of the buffer to copy.
344  */
345 void
346 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
347 {
348         struct trace_array_cpu *data = tr->data[cpu];
349         int i;
350
351         WARN_ON_ONCE(!irqs_disabled());
352         __raw_spin_lock(&ftrace_max_lock);
353         for_each_possible_cpu(i)
354                 tracing_reset(max_tr.data[i]);
355
356         flip_trace(max_tr.data[cpu], data);
357         tracing_reset(data);
358
359         __update_max_tr(tr, tsk, cpu);
360         __raw_spin_unlock(&ftrace_max_lock);
361 }
362
363 int register_tracer(struct tracer *type)
364 {
365         struct tracer *t;
366         int len;
367         int ret = 0;
368
369         if (!type->name) {
370                 pr_info("Tracer must have a name\n");
371                 return -1;
372         }
373
374         mutex_lock(&trace_types_lock);
375         for (t = trace_types; t; t = t->next) {
376                 if (strcmp(type->name, t->name) == 0) {
377                         /* already found */
378                         pr_info("Trace %s already registered\n",
379                                 type->name);
380                         ret = -1;
381                         goto out;
382                 }
383         }
384
385 #ifdef CONFIG_FTRACE_STARTUP_TEST
386         if (type->selftest) {
387                 struct tracer *saved_tracer = current_trace;
388                 struct trace_array_cpu *data;
389                 struct trace_array *tr = &global_trace;
390                 int saved_ctrl = tr->ctrl;
391                 int i;
392                 /*
393                  * Run a selftest on this tracer.
394                  * Here we reset the trace buffer, and set the current
395                  * tracer to be this tracer. The tracer can then run some
396                  * internal tracing to verify that everything is in order.
397                  * If we fail, we do not register this tracer.
398                  */
399                 for_each_possible_cpu(i) {
400                         data = tr->data[i];
401                         if (!head_page(data))
402                                 continue;
403                         tracing_reset(data);
404                 }
405                 current_trace = type;
406                 tr->ctrl = 0;
407                 /* the test is responsible for initializing and enabling */
408                 pr_info("Testing tracer %s: ", type->name);
409                 ret = type->selftest(type, tr);
410                 /* the test is responsible for resetting too */
411                 current_trace = saved_tracer;
412                 tr->ctrl = saved_ctrl;
413                 if (ret) {
414                         printk(KERN_CONT "FAILED!\n");
415                         goto out;
416                 }
417                 /* Only reset on passing, to avoid touching corrupted buffers */
418                 for_each_possible_cpu(i) {
419                         data = tr->data[i];
420                         if (!head_page(data))
421                                 continue;
422                         tracing_reset(data);
423                 }
424                 printk(KERN_CONT "PASSED\n");
425         }
426 #endif
427
428         type->next = trace_types;
429         trace_types = type;
430         len = strlen(type->name);
431         if (len > max_tracer_type_len)
432                 max_tracer_type_len = len;
433
434  out:
435         mutex_unlock(&trace_types_lock);
436
437         return ret;
438 }
439
440 void unregister_tracer(struct tracer *type)
441 {
442         struct tracer **t;
443         int len;
444
445         mutex_lock(&trace_types_lock);
446         for (t = &trace_types; *t; t = &(*t)->next) {
447                 if (*t == type)
448                         goto found;
449         }
450         pr_info("Trace %s not registered\n", type->name);
451         goto out;
452
453  found:
454         *t = (*t)->next;
455         if (strlen(type->name) != max_tracer_type_len)
456                 goto out;
457
458         max_tracer_type_len = 0;
459         for (t = &trace_types; *t; t = &(*t)->next) {
460                 len = strlen((*t)->name);
461                 if (len > max_tracer_type_len)
462                         max_tracer_type_len = len;
463         }
464  out:
465         mutex_unlock(&trace_types_lock);
466 }
467
468 void tracing_reset(struct trace_array_cpu *data)
469 {
470         data->trace_idx = 0;
471         data->trace_head = data->trace_tail = head_page(data);
472         data->trace_head_idx = 0;
473         data->trace_tail_idx = 0;
474 }
475
476 #define SAVED_CMDLINES 128
477 static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
478 static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
479 static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
480 static int cmdline_idx;
481 static DEFINE_SPINLOCK(trace_cmdline_lock);
482 atomic_t trace_record_cmdline_disabled;
483
484 static void trace_init_cmdlines(void)
485 {
486         memset(&map_pid_to_cmdline, -1, sizeof(map_pid_to_cmdline));
487         memset(&map_cmdline_to_pid, -1, sizeof(map_cmdline_to_pid));
488         cmdline_idx = 0;
489 }
490
491 void trace_stop_cmdline_recording(void);
492
493 static void trace_save_cmdline(struct task_struct *tsk)
494 {
495         unsigned map;
496         unsigned idx;
497
498         if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
499                 return;
500
501         /*
502          * It's not the end of the world if we don't get
503          * the lock, but we also don't want to spin
504          * nor do we want to disable interrupts,
505          * so if we miss here, then better luck next time.
506          */
507         if (!spin_trylock(&trace_cmdline_lock))
508                 return;
509
510         idx = map_pid_to_cmdline[tsk->pid];
511         if (idx >= SAVED_CMDLINES) {
512                 idx = (cmdline_idx + 1) % SAVED_CMDLINES;
513
514                 map = map_cmdline_to_pid[idx];
515                 if (map <= PID_MAX_DEFAULT)
516                         map_pid_to_cmdline[map] = (unsigned)-1;
517
518                 map_pid_to_cmdline[tsk->pid] = idx;
519
520                 cmdline_idx = idx;
521         }
522
523         memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
524
525         spin_unlock(&trace_cmdline_lock);
526 }
527
528 static char *trace_find_cmdline(int pid)
529 {
530         char *cmdline = "<...>";
531         unsigned map;
532
533         if (!pid)
534                 return "<idle>";
535
536         if (pid > PID_MAX_DEFAULT)
537                 goto out;
538
539         map = map_pid_to_cmdline[pid];
540         if (map >= SAVED_CMDLINES)
541                 goto out;
542
543         cmdline = saved_cmdlines[map];
544
545  out:
546         return cmdline;
547 }
548
549 void tracing_record_cmdline(struct task_struct *tsk)
550 {
551         if (atomic_read(&trace_record_cmdline_disabled))
552                 return;
553
554         trace_save_cmdline(tsk);
555 }
556
557 static inline struct list_head *
558 trace_next_list(struct trace_array_cpu *data, struct list_head *next)
559 {
560         /*
561          * Roundrobin - but skip the head (which is not a real page):
562          */
563         next = next->next;
564         if (unlikely(next == &data->trace_pages))
565                 next = next->next;
566         BUG_ON(next == &data->trace_pages);
567
568         return next;
569 }
570
571 static inline void *
572 trace_next_page(struct trace_array_cpu *data, void *addr)
573 {
574         struct list_head *next;
575         struct page *page;
576
577         page = virt_to_page(addr);
578
579         next = trace_next_list(data, &page->lru);
580         page = list_entry(next, struct page, lru);
581
582         return page_address(page);
583 }
584
585 static inline struct trace_entry *
586 tracing_get_trace_entry(struct trace_array *tr, struct trace_array_cpu *data)
587 {
588         unsigned long idx, idx_next;
589         struct trace_entry *entry;
590
591         data->trace_idx++;
592         idx = data->trace_head_idx;
593         idx_next = idx + 1;
594
595         BUG_ON(idx * TRACE_ENTRY_SIZE >= PAGE_SIZE);
596
597         entry = data->trace_head + idx * TRACE_ENTRY_SIZE;
598
599         if (unlikely(idx_next >= ENTRIES_PER_PAGE)) {
600                 data->trace_head = trace_next_page(data, data->trace_head);
601                 idx_next = 0;
602         }
603
604         if (data->trace_head == data->trace_tail &&
605             idx_next == data->trace_tail_idx) {
606                 /* overrun */
607                 data->trace_tail_idx++;
608                 if (data->trace_tail_idx >= ENTRIES_PER_PAGE) {
609                         data->trace_tail =
610                                 trace_next_page(data, data->trace_tail);
611                         data->trace_tail_idx = 0;
612                 }
613         }
614
615         data->trace_head_idx = idx_next;
616
617         return entry;
618 }
619
620 static inline void
621 tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags)
622 {
623         struct task_struct *tsk = current;
624         unsigned long pc;
625
626         pc = preempt_count();
627
628         entry->preempt_count    = pc & 0xff;
629         entry->pid              = (tsk) ? tsk->pid : 0;
630         entry->t                = ftrace_now(raw_smp_processor_id());
631         entry->flags = (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
632                 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
633                 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
634                 (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
635 }
636
637 void
638 trace_function(struct trace_array *tr, struct trace_array_cpu *data,
639                unsigned long ip, unsigned long parent_ip, unsigned long flags)
640 {
641         struct trace_entry *entry;
642         unsigned long irq_flags;
643
644         raw_local_irq_save(irq_flags);
645         __raw_spin_lock(&data->lock);
646         entry                   = tracing_get_trace_entry(tr, data);
647         tracing_generic_entry_update(entry, flags);
648         entry->type             = TRACE_FN;
649         entry->fn.ip            = ip;
650         entry->fn.parent_ip     = parent_ip;
651         __raw_spin_unlock(&data->lock);
652         raw_local_irq_restore(irq_flags);
653 }
654
655 void
656 ftrace(struct trace_array *tr, struct trace_array_cpu *data,
657        unsigned long ip, unsigned long parent_ip, unsigned long flags)
658 {
659         if (likely(!atomic_read(&data->disabled)))
660                 trace_function(tr, data, ip, parent_ip, flags);
661 }
662
663 void
664 __trace_special(void *__tr, void *__data,
665                 unsigned long arg1, unsigned long arg2, unsigned long arg3)
666 {
667         struct trace_array_cpu *data = __data;
668         struct trace_array *tr = __tr;
669         struct trace_entry *entry;
670         unsigned long irq_flags;
671
672         raw_local_irq_save(irq_flags);
673         __raw_spin_lock(&data->lock);
674         entry                   = tracing_get_trace_entry(tr, data);
675         tracing_generic_entry_update(entry, 0);
676         entry->type             = TRACE_SPECIAL;
677         entry->special.arg1     = arg1;
678         entry->special.arg2     = arg2;
679         entry->special.arg3     = arg3;
680         __raw_spin_unlock(&data->lock);
681         raw_local_irq_restore(irq_flags);
682
683         trace_wake_up();
684 }
685
686 void __trace_stack(struct trace_array *tr,
687                    struct trace_array_cpu *data,
688                    unsigned long flags,
689                    int skip)
690 {
691         struct trace_entry *entry;
692         struct stack_trace trace;
693
694         if (!(trace_flags & TRACE_ITER_STACKTRACE))
695                 return;
696
697         entry                   = tracing_get_trace_entry(tr, data);
698         tracing_generic_entry_update(entry, flags);
699         entry->type             = TRACE_STACK;
700
701         memset(&entry->stack, 0, sizeof(entry->stack));
702
703         trace.nr_entries        = 0;
704         trace.max_entries       = FTRACE_STACK_ENTRIES;
705         trace.skip              = skip;
706         trace.entries           = entry->stack.caller;
707
708         save_stack_trace(&trace);
709 }
710
711 void
712 tracing_sched_switch_trace(struct trace_array *tr,
713                            struct trace_array_cpu *data,
714                            struct task_struct *prev,
715                            struct task_struct *next,
716                            unsigned long flags)
717 {
718         struct trace_entry *entry;
719         unsigned long irq_flags;
720
721         raw_local_irq_save(irq_flags);
722         __raw_spin_lock(&data->lock);
723         entry                   = tracing_get_trace_entry(tr, data);
724         tracing_generic_entry_update(entry, flags);
725         entry->type             = TRACE_CTX;
726         entry->ctx.prev_pid     = prev->pid;
727         entry->ctx.prev_prio    = prev->prio;
728         entry->ctx.prev_state   = prev->state;
729         entry->ctx.next_pid     = next->pid;
730         entry->ctx.next_prio    = next->prio;
731         entry->ctx.next_state   = next->state;
732         __trace_stack(tr, data, flags, 4);
733         __raw_spin_unlock(&data->lock);
734         raw_local_irq_restore(irq_flags);
735 }
736
737 void
738 tracing_sched_wakeup_trace(struct trace_array *tr,
739                            struct trace_array_cpu *data,
740                            struct task_struct *wakee,
741                            struct task_struct *curr,
742                            unsigned long flags)
743 {
744         struct trace_entry *entry;
745         unsigned long irq_flags;
746
747         raw_local_irq_save(irq_flags);
748         __raw_spin_lock(&data->lock);
749         entry                   = tracing_get_trace_entry(tr, data);
750         tracing_generic_entry_update(entry, flags);
751         entry->type             = TRACE_WAKE;
752         entry->ctx.prev_pid     = curr->pid;
753         entry->ctx.prev_prio    = curr->prio;
754         entry->ctx.prev_state   = curr->state;
755         entry->ctx.next_pid     = wakee->pid;
756         entry->ctx.next_prio    = wakee->prio;
757         entry->ctx.next_state   = wakee->state;
758         __trace_stack(tr, data, flags, 5);
759         __raw_spin_unlock(&data->lock);
760         raw_local_irq_restore(irq_flags);
761
762         trace_wake_up();
763 }
764
765 #ifdef CONFIG_FTRACE
766 static void
767 function_trace_call(unsigned long ip, unsigned long parent_ip)
768 {
769         struct trace_array *tr = &global_trace;
770         struct trace_array_cpu *data;
771         unsigned long flags;
772         long disabled;
773         int cpu;
774
775         if (unlikely(!tracer_enabled))
776                 return;
777
778         local_irq_save(flags);
779         cpu = raw_smp_processor_id();
780         data = tr->data[cpu];
781         disabled = atomic_inc_return(&data->disabled);
782
783         if (likely(disabled == 1))
784                 trace_function(tr, data, ip, parent_ip, flags);
785
786         atomic_dec(&data->disabled);
787         local_irq_restore(flags);
788 }
789
790 static struct ftrace_ops trace_ops __read_mostly =
791 {
792         .func = function_trace_call,
793 };
794
795 void tracing_start_function_trace(void)
796 {
797         register_ftrace_function(&trace_ops);
798 }
799
800 void tracing_stop_function_trace(void)
801 {
802         unregister_ftrace_function(&trace_ops);
803 }
804 #endif
805
806 enum trace_file_type {
807         TRACE_FILE_LAT_FMT      = 1,
808 };
809
810 static struct trace_entry *
811 trace_entry_idx(struct trace_array *tr, struct trace_array_cpu *data,
812                 struct trace_iterator *iter, int cpu)
813 {
814         struct page *page;
815         struct trace_entry *array;
816
817         if (iter->next_idx[cpu] >= tr->entries ||
818             iter->next_idx[cpu] >= data->trace_idx ||
819             (data->trace_head == data->trace_tail &&
820              data->trace_head_idx == data->trace_tail_idx))
821                 return NULL;
822
823         if (!iter->next_page[cpu]) {
824                 /* Initialize the iterator for this cpu trace buffer */
825                 WARN_ON(!data->trace_tail);
826                 page = virt_to_page(data->trace_tail);
827                 iter->next_page[cpu] = &page->lru;
828                 iter->next_page_idx[cpu] = data->trace_tail_idx;
829         }
830
831         page = list_entry(iter->next_page[cpu], struct page, lru);
832         BUG_ON(&data->trace_pages == &page->lru);
833
834         array = page_address(page);
835
836         WARN_ON(iter->next_page_idx[cpu] >= ENTRIES_PER_PAGE);
837         return &array[iter->next_page_idx[cpu]];
838 }
839
840 static struct trace_entry *
841 find_next_entry(struct trace_iterator *iter, int *ent_cpu)
842 {
843         struct trace_array *tr = iter->tr;
844         struct trace_entry *ent, *next = NULL;
845         int next_cpu = -1;
846         int cpu;
847
848         for_each_possible_cpu(cpu) {
849                 if (!head_page(tr->data[cpu]))
850                         continue;
851                 ent = trace_entry_idx(tr, tr->data[cpu], iter, cpu);
852                 /*
853                  * Pick the entry with the smallest timestamp:
854                  */
855                 if (ent && (!next || ent->t < next->t)) {
856                         next = ent;
857                         next_cpu = cpu;
858                 }
859         }
860
861         if (ent_cpu)
862                 *ent_cpu = next_cpu;
863
864         return next;
865 }
866
867 static void trace_iterator_increment(struct trace_iterator *iter)
868 {
869         iter->idx++;
870         iter->next_idx[iter->cpu]++;
871         iter->next_page_idx[iter->cpu]++;
872
873         if (iter->next_page_idx[iter->cpu] >= ENTRIES_PER_PAGE) {
874                 struct trace_array_cpu *data = iter->tr->data[iter->cpu];
875
876                 iter->next_page_idx[iter->cpu] = 0;
877                 iter->next_page[iter->cpu] =
878                         trace_next_list(data, iter->next_page[iter->cpu]);
879         }
880 }
881
882 static void trace_consume(struct trace_iterator *iter)
883 {
884         struct trace_array_cpu *data = iter->tr->data[iter->cpu];
885
886         data->trace_tail_idx++;
887         if (data->trace_tail_idx >= ENTRIES_PER_PAGE) {
888                 data->trace_tail = trace_next_page(data, data->trace_tail);
889                 data->trace_tail_idx = 0;
890         }
891
892         /* Check if we empty it, then reset the index */
893         if (data->trace_head == data->trace_tail &&
894             data->trace_head_idx == data->trace_tail_idx)
895                 data->trace_idx = 0;
896 }
897
898 static void *find_next_entry_inc(struct trace_iterator *iter)
899 {
900         struct trace_entry *next;
901         int next_cpu = -1;
902
903         next = find_next_entry(iter, &next_cpu);
904
905         iter->prev_ent = iter->ent;
906         iter->prev_cpu = iter->cpu;
907
908         iter->ent = next;
909         iter->cpu = next_cpu;
910
911         if (next)
912                 trace_iterator_increment(iter);
913
914         return next ? iter : NULL;
915 }
916
917 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
918 {
919         struct trace_iterator *iter = m->private;
920         void *last_ent = iter->ent;
921         int i = (int)*pos;
922         void *ent;
923
924         (*pos)++;
925
926         /* can't go backwards */
927         if (iter->idx > i)
928                 return NULL;
929
930         if (iter->idx < 0)
931                 ent = find_next_entry_inc(iter);
932         else
933                 ent = iter;
934
935         while (ent && iter->idx < i)
936                 ent = find_next_entry_inc(iter);
937
938         iter->pos = *pos;
939
940         if (last_ent && !ent)
941                 seq_puts(m, "\n\nvim:ft=help\n");
942
943         return ent;
944 }
945
946 static void *s_start(struct seq_file *m, loff_t *pos)
947 {
948         struct trace_iterator *iter = m->private;
949         void *p = NULL;
950         loff_t l = 0;
951         int i;
952
953         mutex_lock(&trace_types_lock);
954
955         if (!current_trace || current_trace != iter->trace) {
956                 mutex_unlock(&trace_types_lock);
957                 return NULL;
958         }
959
960         atomic_inc(&trace_record_cmdline_disabled);
961
962         /* let the tracer grab locks here if needed */
963         if (current_trace->start)
964                 current_trace->start(iter);
965
966         if (*pos != iter->pos) {
967                 iter->ent = NULL;
968                 iter->cpu = 0;
969                 iter->idx = -1;
970                 iter->prev_ent = NULL;
971                 iter->prev_cpu = -1;
972
973                 for_each_possible_cpu(i) {
974                         iter->next_idx[i] = 0;
975                         iter->next_page[i] = NULL;
976                 }
977
978                 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
979                         ;
980
981         } else {
982                 l = *pos - 1;
983                 p = s_next(m, p, &l);
984         }
985
986         return p;
987 }
988
989 static void s_stop(struct seq_file *m, void *p)
990 {
991         struct trace_iterator *iter = m->private;
992
993         atomic_dec(&trace_record_cmdline_disabled);
994
995         /* let the tracer release locks here if needed */
996         if (current_trace && current_trace == iter->trace && iter->trace->stop)
997                 iter->trace->stop(iter);
998
999         mutex_unlock(&trace_types_lock);
1000 }
1001
1002 static int
1003 seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address)
1004 {
1005 #ifdef CONFIG_KALLSYMS
1006         char str[KSYM_SYMBOL_LEN];
1007
1008         kallsyms_lookup(address, NULL, NULL, NULL, str);
1009
1010         return trace_seq_printf(s, fmt, str);
1011 #endif
1012         return 1;
1013 }
1014
1015 static int
1016 seq_print_sym_offset(struct trace_seq *s, const char *fmt,
1017                      unsigned long address)
1018 {
1019 #ifdef CONFIG_KALLSYMS
1020         char str[KSYM_SYMBOL_LEN];
1021
1022         sprint_symbol(str, address);
1023         return trace_seq_printf(s, fmt, str);
1024 #endif
1025         return 1;
1026 }
1027
1028 #ifndef CONFIG_64BIT
1029 # define IP_FMT "%08lx"
1030 #else
1031 # define IP_FMT "%016lx"
1032 #endif
1033
1034 static int
1035 seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
1036 {
1037         int ret;
1038
1039         if (!ip)
1040                 return trace_seq_printf(s, "0");
1041
1042         if (sym_flags & TRACE_ITER_SYM_OFFSET)
1043                 ret = seq_print_sym_offset(s, "%s", ip);
1044         else
1045                 ret = seq_print_sym_short(s, "%s", ip);
1046
1047         if (!ret)
1048                 return 0;
1049
1050         if (sym_flags & TRACE_ITER_SYM_ADDR)
1051                 ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
1052         return ret;
1053 }
1054
1055 static void print_lat_help_header(struct seq_file *m)
1056 {
1057         seq_puts(m, "#                _------=> CPU#            \n");
1058         seq_puts(m, "#               / _-----=> irqs-off        \n");
1059         seq_puts(m, "#              | / _----=> need-resched    \n");
1060         seq_puts(m, "#              || / _---=> hardirq/softirq \n");
1061         seq_puts(m, "#              ||| / _--=> preempt-depth   \n");
1062         seq_puts(m, "#              |||| /                      \n");
1063         seq_puts(m, "#              |||||     delay             \n");
1064         seq_puts(m, "#  cmd     pid ||||| time  |   caller      \n");
1065         seq_puts(m, "#     \\   /    |||||   \\   |   /           \n");
1066 }
1067
1068 static void print_func_help_header(struct seq_file *m)
1069 {
1070         seq_puts(m, "#           TASK-PID   CPU#    TIMESTAMP  FUNCTION\n");
1071         seq_puts(m, "#              | |      |          |         |\n");
1072 }
1073
1074
1075 static void
1076 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
1077 {
1078         unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
1079         struct trace_array *tr = iter->tr;
1080         struct trace_array_cpu *data = tr->data[tr->cpu];
1081         struct tracer *type = current_trace;
1082         unsigned long total   = 0;
1083         unsigned long entries = 0;
1084         int cpu;
1085         const char *name = "preemption";
1086
1087         if (type)
1088                 name = type->name;
1089
1090         for_each_possible_cpu(cpu) {
1091                 if (head_page(tr->data[cpu])) {
1092                         total += tr->data[cpu]->trace_idx;
1093                         if (tr->data[cpu]->trace_idx > tr->entries)
1094                                 entries += tr->entries;
1095                         else
1096                                 entries += tr->data[cpu]->trace_idx;
1097                 }
1098         }
1099
1100         seq_printf(m, "%s latency trace v1.1.5 on %s\n",
1101                    name, UTS_RELEASE);
1102         seq_puts(m, "-----------------------------------"
1103                  "---------------------------------\n");
1104         seq_printf(m, " latency: %lu us, #%lu/%lu, CPU#%d |"
1105                    " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
1106                    nsecs_to_usecs(data->saved_latency),
1107                    entries,
1108                    total,
1109                    tr->cpu,
1110 #if defined(CONFIG_PREEMPT_NONE)
1111                    "server",
1112 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
1113                    "desktop",
1114 #elif defined(CONFIG_PREEMPT_DESKTOP)
1115                    "preempt",
1116 #else
1117                    "unknown",
1118 #endif
1119                    /* These are reserved for later use */
1120                    0, 0, 0, 0);
1121 #ifdef CONFIG_SMP
1122         seq_printf(m, " #P:%d)\n", num_online_cpus());
1123 #else
1124         seq_puts(m, ")\n");
1125 #endif
1126         seq_puts(m, "    -----------------\n");
1127         seq_printf(m, "    | task: %.16s-%d "
1128                    "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
1129                    data->comm, data->pid, data->uid, data->nice,
1130                    data->policy, data->rt_priority);
1131         seq_puts(m, "    -----------------\n");
1132
1133         if (data->critical_start) {
1134                 seq_puts(m, " => started at: ");
1135                 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
1136                 trace_print_seq(m, &iter->seq);
1137                 seq_puts(m, "\n => ended at:   ");
1138                 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
1139                 trace_print_seq(m, &iter->seq);
1140                 seq_puts(m, "\n");
1141         }
1142
1143         seq_puts(m, "\n");
1144 }
1145
1146 static void
1147 lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
1148 {
1149         int hardirq, softirq;
1150         char *comm;
1151
1152         comm = trace_find_cmdline(entry->pid);
1153
1154         trace_seq_printf(s, "%8.8s-%-5d ", comm, entry->pid);
1155         trace_seq_printf(s, "%d", cpu);
1156         trace_seq_printf(s, "%c%c",
1157                         (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' : '.',
1158                         ((entry->flags & TRACE_FLAG_NEED_RESCHED) ? 'N' : '.'));
1159
1160         hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
1161         softirq = entry->flags & TRACE_FLAG_SOFTIRQ;
1162         if (hardirq && softirq)
1163                 trace_seq_putc(s, 'H');
1164         else {
1165                 if (hardirq)
1166                         trace_seq_putc(s, 'h');
1167                 else {
1168                         if (softirq)
1169                                 trace_seq_putc(s, 's');
1170                         else
1171                                 trace_seq_putc(s, '.');
1172                 }
1173         }
1174
1175         if (entry->preempt_count)
1176                 trace_seq_printf(s, "%x", entry->preempt_count);
1177         else
1178                 trace_seq_puts(s, ".");
1179 }
1180
1181 unsigned long preempt_mark_thresh = 100;
1182
1183 static void
1184 lat_print_timestamp(struct trace_seq *s, unsigned long long abs_usecs,
1185                     unsigned long rel_usecs)
1186 {
1187         trace_seq_printf(s, " %4lldus", abs_usecs);
1188         if (rel_usecs > preempt_mark_thresh)
1189                 trace_seq_puts(s, "!: ");
1190         else if (rel_usecs > 1)
1191                 trace_seq_puts(s, "+: ");
1192         else
1193                 trace_seq_puts(s, " : ");
1194 }
1195
1196 static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
1197
1198 static int
1199 print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
1200 {
1201         struct trace_seq *s = &iter->seq;
1202         unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
1203         struct trace_entry *next_entry = find_next_entry(iter, NULL);
1204         unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE);
1205         struct trace_entry *entry = iter->ent;
1206         unsigned long abs_usecs;
1207         unsigned long rel_usecs;
1208         char *comm;
1209         int S, T;
1210         int i;
1211         unsigned state;
1212
1213         if (!next_entry)
1214                 next_entry = entry;
1215         rel_usecs = ns2usecs(next_entry->t - entry->t);
1216         abs_usecs = ns2usecs(entry->t - iter->tr->time_start);
1217
1218         if (verbose) {
1219                 comm = trace_find_cmdline(entry->pid);
1220                 trace_seq_printf(s, "%16s %5d %d %d %08x %08x [%08lx]"
1221                                  " %ld.%03ldms (+%ld.%03ldms): ",
1222                                  comm,
1223                                  entry->pid, cpu, entry->flags,
1224                                  entry->preempt_count, trace_idx,
1225                                  ns2usecs(entry->t),
1226                                  abs_usecs/1000,
1227                                  abs_usecs % 1000, rel_usecs/1000,
1228                                  rel_usecs % 1000);
1229         } else {
1230                 lat_print_generic(s, entry, cpu);
1231                 lat_print_timestamp(s, abs_usecs, rel_usecs);
1232         }
1233         switch (entry->type) {
1234         case TRACE_FN:
1235                 seq_print_ip_sym(s, entry->fn.ip, sym_flags);
1236                 trace_seq_puts(s, " (");
1237                 seq_print_ip_sym(s, entry->fn.parent_ip, sym_flags);
1238                 trace_seq_puts(s, ")\n");
1239                 break;
1240         case TRACE_CTX:
1241         case TRACE_WAKE:
1242                 T = entry->ctx.next_state < sizeof(state_to_char) ?
1243                         state_to_char[entry->ctx.next_state] : 'X';
1244
1245                 state = entry->ctx.prev_state ? __ffs(entry->ctx.prev_state) + 1 : 0;
1246                 S = state < sizeof(state_to_char) - 1 ? state_to_char[state] : 'X';
1247                 comm = trace_find_cmdline(entry->ctx.next_pid);
1248                 trace_seq_printf(s, " %5d:%3d:%c %s %5d:%3d:%c %s\n",
1249                                  entry->ctx.prev_pid,
1250                                  entry->ctx.prev_prio,
1251                                  S, entry->type == TRACE_CTX ? "==>" : "  +",
1252                                  entry->ctx.next_pid,
1253                                  entry->ctx.next_prio,
1254                                  T, comm);
1255                 break;
1256         case TRACE_SPECIAL:
1257                 trace_seq_printf(s, "# %ld %ld %ld\n",
1258                                  entry->special.arg1,
1259                                  entry->special.arg2,
1260                                  entry->special.arg3);
1261                 break;
1262         case TRACE_STACK:
1263                 for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
1264                         if (i)
1265                                 trace_seq_puts(s, " <= ");
1266                         seq_print_ip_sym(s, entry->stack.caller[i], sym_flags);
1267                 }
1268                 trace_seq_puts(s, "\n");
1269                 break;
1270         default:
1271                 trace_seq_printf(s, "Unknown type %d\n", entry->type);
1272         }
1273         return 1;
1274 }
1275
1276 static int print_trace_fmt(struct trace_iterator *iter)
1277 {
1278         struct trace_seq *s = &iter->seq;
1279         unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
1280         struct trace_entry *entry;
1281         unsigned long usec_rem;
1282         unsigned long long t;
1283         unsigned long secs;
1284         char *comm;
1285         int ret;
1286         int S, T;
1287         int i;
1288
1289         entry = iter->ent;
1290
1291         comm = trace_find_cmdline(iter->ent->pid);
1292
1293         t = ns2usecs(entry->t);
1294         usec_rem = do_div(t, 1000000ULL);
1295         secs = (unsigned long)t;
1296
1297         ret = trace_seq_printf(s, "%16s-%-5d ", comm, entry->pid);
1298         if (!ret)
1299                 return 0;
1300         ret = trace_seq_printf(s, "[%02d] ", iter->cpu);
1301         if (!ret)
1302                 return 0;
1303         ret = trace_seq_printf(s, "%5lu.%06lu: ", secs, usec_rem);
1304         if (!ret)
1305                 return 0;
1306
1307         switch (entry->type) {
1308         case TRACE_FN:
1309                 ret = seq_print_ip_sym(s, entry->fn.ip, sym_flags);
1310                 if (!ret)
1311                         return 0;
1312                 if ((sym_flags & TRACE_ITER_PRINT_PARENT) &&
1313                                                 entry->fn.parent_ip) {
1314                         ret = trace_seq_printf(s, " <-");
1315                         if (!ret)
1316                                 return 0;
1317                         ret = seq_print_ip_sym(s, entry->fn.parent_ip,
1318                                                sym_flags);
1319                         if (!ret)
1320                                 return 0;
1321                 }
1322                 ret = trace_seq_printf(s, "\n");
1323                 if (!ret)
1324                         return 0;
1325                 break;
1326         case TRACE_CTX:
1327         case TRACE_WAKE:
1328                 S = entry->ctx.prev_state < sizeof(state_to_char) ?
1329                         state_to_char[entry->ctx.prev_state] : 'X';
1330                 T = entry->ctx.next_state < sizeof(state_to_char) ?
1331                         state_to_char[entry->ctx.next_state] : 'X';
1332                 ret = trace_seq_printf(s, " %5d:%3d:%c %s %5d:%3d:%c\n",
1333                                        entry->ctx.prev_pid,
1334                                        entry->ctx.prev_prio,
1335                                        S,
1336                                        entry->type == TRACE_CTX ? "==>" : "  +",
1337                                        entry->ctx.next_pid,
1338                                        entry->ctx.next_prio,
1339                                        T);
1340                 if (!ret)
1341                         return 0;
1342                 break;
1343         case TRACE_SPECIAL:
1344                 ret = trace_seq_printf(s, "# %ld %ld %ld\n",
1345                                  entry->special.arg1,
1346                                  entry->special.arg2,
1347                                  entry->special.arg3);
1348                 if (!ret)
1349                         return 0;
1350                 break;
1351         case TRACE_STACK:
1352                 for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
1353                         if (i) {
1354                                 ret = trace_seq_puts(s, " <= ");
1355                                 if (!ret)
1356                                         return 0;
1357                         }
1358                         ret = seq_print_ip_sym(s, entry->stack.caller[i],
1359                                                sym_flags);
1360                         if (!ret)
1361                                 return 0;
1362                 }
1363                 ret = trace_seq_puts(s, "\n");
1364                 if (!ret)
1365                         return 0;
1366                 break;
1367         }
1368         return 1;
1369 }
1370
1371 static int print_raw_fmt(struct trace_iterator *iter)
1372 {
1373         struct trace_seq *s = &iter->seq;
1374         struct trace_entry *entry;
1375         int ret;
1376         int S, T;
1377
1378         entry = iter->ent;
1379
1380         ret = trace_seq_printf(s, "%d %d %llu ",
1381                 entry->pid, iter->cpu, entry->t);
1382         if (!ret)
1383                 return 0;
1384
1385         switch (entry->type) {
1386         case TRACE_FN:
1387                 ret = trace_seq_printf(s, "%x %x\n",
1388                                         entry->fn.ip, entry->fn.parent_ip);
1389                 if (!ret)
1390                         return 0;
1391                 break;
1392         case TRACE_CTX:
1393         case TRACE_WAKE:
1394                 S = entry->ctx.prev_state < sizeof(state_to_char) ?
1395                         state_to_char[entry->ctx.prev_state] : 'X';
1396                 T = entry->ctx.next_state < sizeof(state_to_char) ?
1397                         state_to_char[entry->ctx.next_state] : 'X';
1398                 if (entry->type == TRACE_WAKE)
1399                         S = '+';
1400                 ret = trace_seq_printf(s, "%d %d %c %d %d %c\n",
1401                                        entry->ctx.prev_pid,
1402                                        entry->ctx.prev_prio,
1403                                        S,
1404                                        entry->ctx.next_pid,
1405                                        entry->ctx.next_prio,
1406                                        T);
1407                 if (!ret)
1408                         return 0;
1409                 break;
1410         case TRACE_SPECIAL:
1411         case TRACE_STACK:
1412                 ret = trace_seq_printf(s, "# %ld %ld %ld\n",
1413                                  entry->special.arg1,
1414                                  entry->special.arg2,
1415                                  entry->special.arg3);
1416                 if (!ret)
1417                         return 0;
1418                 break;
1419         }
1420         return 1;
1421 }
1422
1423 #define SEQ_PUT_FIELD_RET(s, x)                         \
1424 do {                                                    \
1425         if (!trace_seq_putmem(s, &(x), sizeof(x)))      \
1426                 return 0;                               \
1427 } while (0)
1428
1429 #define SEQ_PUT_HEX_FIELD_RET(s, x)                     \
1430 do {                                                    \
1431         if (!trace_seq_putmem_hex(s, &(x), sizeof(x)))  \
1432                 return 0;                               \
1433 } while (0)
1434
1435 static int print_hex_fmt(struct trace_iterator *iter)
1436 {
1437         struct trace_seq *s = &iter->seq;
1438         unsigned char newline = '\n';
1439         struct trace_entry *entry;
1440         int S, T;
1441
1442         entry = iter->ent;
1443
1444         SEQ_PUT_HEX_FIELD_RET(s, entry->pid);
1445         SEQ_PUT_HEX_FIELD_RET(s, iter->cpu);
1446         SEQ_PUT_HEX_FIELD_RET(s, entry->t);
1447
1448         switch (entry->type) {
1449         case TRACE_FN:
1450                 SEQ_PUT_HEX_FIELD_RET(s, entry->fn.ip);
1451                 SEQ_PUT_HEX_FIELD_RET(s, entry->fn.parent_ip);
1452                 break;
1453         case TRACE_CTX:
1454         case TRACE_WAKE:
1455                 S = entry->ctx.prev_state < sizeof(state_to_char) ?
1456                         state_to_char[entry->ctx.prev_state] : 'X';
1457                 T = entry->ctx.next_state < sizeof(state_to_char) ?
1458                         state_to_char[entry->ctx.next_state] : 'X';
1459                 if (entry->type == TRACE_WAKE)
1460                         S = '+';
1461                 SEQ_PUT_HEX_FIELD_RET(s, entry->ctx.prev_pid);
1462                 SEQ_PUT_HEX_FIELD_RET(s, entry->ctx.prev_prio);
1463                 SEQ_PUT_HEX_FIELD_RET(s, S);
1464                 SEQ_PUT_HEX_FIELD_RET(s, entry->ctx.next_pid);
1465                 SEQ_PUT_HEX_FIELD_RET(s, entry->ctx.next_prio);
1466                 SEQ_PUT_HEX_FIELD_RET(s, entry->fn.parent_ip);
1467                 SEQ_PUT_HEX_FIELD_RET(s, T);
1468                 break;
1469         case TRACE_SPECIAL:
1470         case TRACE_STACK:
1471                 SEQ_PUT_HEX_FIELD_RET(s, entry->special.arg1);
1472                 SEQ_PUT_HEX_FIELD_RET(s, entry->special.arg2);
1473                 SEQ_PUT_HEX_FIELD_RET(s, entry->special.arg3);
1474                 break;
1475         }
1476         SEQ_PUT_FIELD_RET(s, newline);
1477
1478         return 1;
1479 }
1480
1481 static int print_bin_fmt(struct trace_iterator *iter)
1482 {
1483         struct trace_seq *s = &iter->seq;
1484         struct trace_entry *entry;
1485
1486         entry = iter->ent;
1487
1488         SEQ_PUT_FIELD_RET(s, entry->pid);
1489         SEQ_PUT_FIELD_RET(s, entry->cpu);
1490         SEQ_PUT_FIELD_RET(s, entry->t);
1491
1492         switch (entry->type) {
1493         case TRACE_FN:
1494                 SEQ_PUT_FIELD_RET(s, entry->fn.ip);
1495                 SEQ_PUT_FIELD_RET(s, entry->fn.parent_ip);
1496                 break;
1497         case TRACE_CTX:
1498                 SEQ_PUT_FIELD_RET(s, entry->ctx.prev_pid);
1499                 SEQ_PUT_FIELD_RET(s, entry->ctx.prev_prio);
1500                 SEQ_PUT_FIELD_RET(s, entry->ctx.prev_state);
1501                 SEQ_PUT_FIELD_RET(s, entry->ctx.next_pid);
1502                 SEQ_PUT_FIELD_RET(s, entry->ctx.next_prio);
1503                 SEQ_PUT_FIELD_RET(s, entry->ctx.next_state);
1504                 break;
1505         case TRACE_SPECIAL:
1506         case TRACE_STACK:
1507                 SEQ_PUT_FIELD_RET(s, entry->special.arg1);
1508                 SEQ_PUT_FIELD_RET(s, entry->special.arg2);
1509                 SEQ_PUT_FIELD_RET(s, entry->special.arg3);
1510                 break;
1511         }
1512         return 1;
1513 }
1514
1515 static int trace_empty(struct trace_iterator *iter)
1516 {
1517         struct trace_array_cpu *data;
1518         int cpu;
1519
1520         for_each_possible_cpu(cpu) {
1521                 data = iter->tr->data[cpu];
1522
1523                 if (head_page(data) && data->trace_idx &&
1524                     (data->trace_tail != data->trace_head ||
1525                      data->trace_tail_idx != data->trace_head_idx))
1526                         return 0;
1527         }
1528         return 1;
1529 }
1530
1531 static int print_trace_line(struct trace_iterator *iter)
1532 {
1533         if (iter->trace && iter->trace->print_line)
1534                 return iter->trace->print_line(iter);
1535
1536         if (trace_flags & TRACE_ITER_BIN)
1537                 return print_bin_fmt(iter);
1538
1539         if (trace_flags & TRACE_ITER_HEX)
1540                 return print_hex_fmt(iter);
1541
1542         if (trace_flags & TRACE_ITER_RAW)
1543                 return print_raw_fmt(iter);
1544
1545         if (iter->iter_flags & TRACE_FILE_LAT_FMT)
1546                 return print_lat_fmt(iter, iter->idx, iter->cpu);
1547
1548         return print_trace_fmt(iter);
1549 }
1550
1551 static int s_show(struct seq_file *m, void *v)
1552 {
1553         struct trace_iterator *iter = v;
1554
1555         if (iter->ent == NULL) {
1556                 if (iter->tr) {
1557                         seq_printf(m, "# tracer: %s\n", iter->trace->name);
1558                         seq_puts(m, "#\n");
1559                 }
1560                 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
1561                         /* print nothing if the buffers are empty */
1562                         if (trace_empty(iter))
1563                                 return 0;
1564                         print_trace_header(m, iter);
1565                         if (!(trace_flags & TRACE_ITER_VERBOSE))
1566                                 print_lat_help_header(m);
1567                 } else {
1568                         if (!(trace_flags & TRACE_ITER_VERBOSE))
1569                                 print_func_help_header(m);
1570                 }
1571         } else {
1572                 print_trace_line(iter);
1573                 trace_print_seq(m, &iter->seq);
1574         }
1575
1576         return 0;
1577 }
1578
1579 static struct seq_operations tracer_seq_ops = {
1580         .start          = s_start,
1581         .next           = s_next,
1582         .stop           = s_stop,
1583         .show           = s_show,
1584 };
1585
1586 static struct trace_iterator *
1587 __tracing_open(struct inode *inode, struct file *file, int *ret)
1588 {
1589         struct trace_iterator *iter;
1590
1591         if (tracing_disabled) {
1592                 *ret = -ENODEV;
1593                 return NULL;
1594         }
1595
1596         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1597         if (!iter) {
1598                 *ret = -ENOMEM;
1599                 goto out;
1600         }
1601
1602         mutex_lock(&trace_types_lock);
1603         if (current_trace && current_trace->print_max)
1604                 iter->tr = &max_tr;
1605         else
1606                 iter->tr = inode->i_private;
1607         iter->trace = current_trace;
1608         iter->pos = -1;
1609
1610         /* TODO stop tracer */
1611         *ret = seq_open(file, &tracer_seq_ops);
1612         if (!*ret) {
1613                 struct seq_file *m = file->private_data;
1614                 m->private = iter;
1615
1616                 /* stop the trace while dumping */
1617                 if (iter->tr->ctrl)
1618                         tracer_enabled = 0;
1619
1620                 if (iter->trace && iter->trace->open)
1621                         iter->trace->open(iter);
1622         } else {
1623                 kfree(iter);
1624                 iter = NULL;
1625         }
1626         mutex_unlock(&trace_types_lock);
1627
1628  out:
1629         return iter;
1630 }
1631
1632 int tracing_open_generic(struct inode *inode, struct file *filp)
1633 {
1634         if (tracing_disabled)
1635                 return -ENODEV;
1636
1637         filp->private_data = inode->i_private;
1638         return 0;
1639 }
1640
1641 int tracing_release(struct inode *inode, struct file *file)
1642 {
1643         struct seq_file *m = (struct seq_file *)file->private_data;
1644         struct trace_iterator *iter = m->private;
1645
1646         mutex_lock(&trace_types_lock);
1647         if (iter->trace && iter->trace->close)
1648                 iter->trace->close(iter);
1649
1650         /* reenable tracing if it was previously enabled */
1651         if (iter->tr->ctrl)
1652                 tracer_enabled = 1;
1653         mutex_unlock(&trace_types_lock);
1654
1655         seq_release(inode, file);
1656         kfree(iter);
1657         return 0;
1658 }
1659
1660 static int tracing_open(struct inode *inode, struct file *file)
1661 {
1662         int ret;
1663
1664         __tracing_open(inode, file, &ret);
1665
1666         return ret;
1667 }
1668
1669 static int tracing_lt_open(struct inode *inode, struct file *file)
1670 {
1671         struct trace_iterator *iter;
1672         int ret;
1673
1674         iter = __tracing_open(inode, file, &ret);
1675
1676         if (!ret)
1677                 iter->iter_flags |= TRACE_FILE_LAT_FMT;
1678
1679         return ret;
1680 }
1681
1682
1683 static void *
1684 t_next(struct seq_file *m, void *v, loff_t *pos)
1685 {
1686         struct tracer *t = m->private;
1687
1688         (*pos)++;
1689
1690         if (t)
1691                 t = t->next;
1692
1693         m->private = t;
1694
1695         return t;
1696 }
1697
1698 static void *t_start(struct seq_file *m, loff_t *pos)
1699 {
1700         struct tracer *t = m->private;
1701         loff_t l = 0;
1702
1703         mutex_lock(&trace_types_lock);
1704         for (; t && l < *pos; t = t_next(m, t, &l))
1705                 ;
1706
1707         return t;
1708 }
1709
1710 static void t_stop(struct seq_file *m, void *p)
1711 {
1712         mutex_unlock(&trace_types_lock);
1713 }
1714
1715 static int t_show(struct seq_file *m, void *v)
1716 {
1717         struct tracer *t = v;
1718
1719         if (!t)
1720                 return 0;
1721
1722         seq_printf(m, "%s", t->name);
1723         if (t->next)
1724                 seq_putc(m, ' ');
1725         else
1726                 seq_putc(m, '\n');
1727
1728         return 0;
1729 }
1730
1731 static struct seq_operations show_traces_seq_ops = {
1732         .start          = t_start,
1733         .next           = t_next,
1734         .stop           = t_stop,
1735         .show           = t_show,
1736 };
1737
1738 static int show_traces_open(struct inode *inode, struct file *file)
1739 {
1740         int ret;
1741
1742         if (tracing_disabled)
1743                 return -ENODEV;
1744
1745         ret = seq_open(file, &show_traces_seq_ops);
1746         if (!ret) {
1747                 struct seq_file *m = file->private_data;
1748                 m->private = trace_types;
1749         }
1750
1751         return ret;
1752 }
1753
1754 static struct file_operations tracing_fops = {
1755         .open           = tracing_open,
1756         .read           = seq_read,
1757         .llseek         = seq_lseek,
1758         .release        = tracing_release,
1759 };
1760
1761 static struct file_operations tracing_lt_fops = {
1762         .open           = tracing_lt_open,
1763         .read           = seq_read,
1764         .llseek         = seq_lseek,
1765         .release        = tracing_release,
1766 };
1767
1768 static struct file_operations show_traces_fops = {
1769         .open           = show_traces_open,
1770         .read           = seq_read,
1771         .release        = seq_release,
1772 };
1773
1774 /*
1775  * Only trace on a CPU if the bitmask is set:
1776  */
1777 static cpumask_t tracing_cpumask = CPU_MASK_ALL;
1778
1779 /*
1780  * When tracing/tracing_cpu_mask is modified then this holds
1781  * the new bitmask we are about to install:
1782  */
1783 static cpumask_t tracing_cpumask_new;
1784
1785 /*
1786  * The tracer itself will not take this lock, but still we want
1787  * to provide a consistent cpumask to user-space:
1788  */
1789 static DEFINE_MUTEX(tracing_cpumask_update_lock);
1790
1791 /*
1792  * Temporary storage for the character representation of the
1793  * CPU bitmask (and one more byte for the newline):
1794  */
1795 static char mask_str[NR_CPUS + 1];
1796
1797 static ssize_t
1798 tracing_cpumask_read(struct file *filp, char __user *ubuf,
1799                      size_t count, loff_t *ppos)
1800 {
1801         int len;
1802
1803         mutex_lock(&tracing_cpumask_update_lock);
1804
1805         len = cpumask_scnprintf(mask_str, count, tracing_cpumask);
1806         if (count - len < 2) {
1807                 count = -EINVAL;
1808                 goto out_err;
1809         }
1810         len += sprintf(mask_str + len, "\n");
1811         count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
1812
1813 out_err:
1814         mutex_unlock(&tracing_cpumask_update_lock);
1815
1816         return count;
1817 }
1818
1819 static ssize_t
1820 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
1821                       size_t count, loff_t *ppos)
1822 {
1823         int err, cpu;
1824
1825         mutex_lock(&tracing_cpumask_update_lock);
1826         err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
1827         if (err)
1828                 goto err_unlock;
1829
1830         raw_local_irq_disable();
1831         __raw_spin_lock(&ftrace_max_lock);
1832         for_each_possible_cpu(cpu) {
1833                 /*
1834                  * Increase/decrease the disabled counter if we are
1835                  * about to flip a bit in the cpumask:
1836                  */
1837                 if (cpu_isset(cpu, tracing_cpumask) &&
1838                                 !cpu_isset(cpu, tracing_cpumask_new)) {
1839                         atomic_inc(&global_trace.data[cpu]->disabled);
1840                 }
1841                 if (!cpu_isset(cpu, tracing_cpumask) &&
1842                                 cpu_isset(cpu, tracing_cpumask_new)) {
1843                         atomic_dec(&global_trace.data[cpu]->disabled);
1844                 }
1845         }
1846         __raw_spin_unlock(&ftrace_max_lock);
1847         raw_local_irq_enable();
1848
1849         tracing_cpumask = tracing_cpumask_new;
1850
1851         mutex_unlock(&tracing_cpumask_update_lock);
1852
1853         return count;
1854
1855 err_unlock:
1856         mutex_unlock(&tracing_cpumask_update_lock);
1857
1858         return err;
1859 }
1860
1861 static struct file_operations tracing_cpumask_fops = {
1862         .open           = tracing_open_generic,
1863         .read           = tracing_cpumask_read,
1864         .write          = tracing_cpumask_write,
1865 };
1866
1867 static ssize_t
1868 tracing_iter_ctrl_read(struct file *filp, char __user *ubuf,
1869                        size_t cnt, loff_t *ppos)
1870 {
1871         char *buf;
1872         int r = 0;
1873         int len = 0;
1874         int i;
1875
1876         /* calulate max size */
1877         for (i = 0; trace_options[i]; i++) {
1878                 len += strlen(trace_options[i]);
1879                 len += 3; /* "no" and space */
1880         }
1881
1882         /* +2 for \n and \0 */
1883         buf = kmalloc(len + 2, GFP_KERNEL);
1884         if (!buf)
1885                 return -ENOMEM;
1886
1887         for (i = 0; trace_options[i]; i++) {
1888                 if (trace_flags & (1 << i))
1889                         r += sprintf(buf + r, "%s ", trace_options[i]);
1890                 else
1891                         r += sprintf(buf + r, "no%s ", trace_options[i]);
1892         }
1893
1894         r += sprintf(buf + r, "\n");
1895         WARN_ON(r >= len + 2);
1896
1897         r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1898
1899         kfree(buf);
1900
1901         return r;
1902 }
1903
1904 static ssize_t
1905 tracing_iter_ctrl_write(struct file *filp, const char __user *ubuf,
1906                         size_t cnt, loff_t *ppos)
1907 {
1908         char buf[64];
1909         char *cmp = buf;
1910         int neg = 0;
1911         int i;
1912
1913         if (cnt > 63)
1914                 cnt = 63;
1915
1916         if (copy_from_user(&buf, ubuf, cnt))
1917                 return -EFAULT;
1918
1919         buf[cnt] = 0;
1920
1921         if (strncmp(buf, "no", 2) == 0) {
1922                 neg = 1;
1923                 cmp += 2;
1924         }
1925
1926         for (i = 0; trace_options[i]; i++) {
1927                 int len = strlen(trace_options[i]);
1928
1929                 if (strncmp(cmp, trace_options[i], len) == 0) {
1930                         if (neg)
1931                                 trace_flags &= ~(1 << i);
1932                         else
1933                                 trace_flags |= (1 << i);
1934                         break;
1935                 }
1936         }
1937         /*
1938          * If no option could be set, return an error:
1939          */
1940         if (!trace_options[i])
1941                 return -EINVAL;
1942
1943         filp->f_pos += cnt;
1944
1945         return cnt;
1946 }
1947
1948 static struct file_operations tracing_iter_fops = {
1949         .open           = tracing_open_generic,
1950         .read           = tracing_iter_ctrl_read,
1951         .write          = tracing_iter_ctrl_write,
1952 };
1953
1954 static const char readme_msg[] =
1955         "tracing mini-HOWTO:\n\n"
1956         "# mkdir /debug\n"
1957         "# mount -t debugfs nodev /debug\n\n"
1958         "# cat /debug/tracing/available_tracers\n"
1959         "wakeup preemptirqsoff preemptoff irqsoff ftrace sched_switch none\n\n"
1960         "# cat /debug/tracing/current_tracer\n"
1961         "none\n"
1962         "# echo sched_switch > /debug/tracing/current_tracer\n"
1963         "# cat /debug/tracing/current_tracer\n"
1964         "sched_switch\n"
1965         "# cat /debug/tracing/iter_ctrl\n"
1966         "noprint-parent nosym-offset nosym-addr noverbose\n"
1967         "# echo print-parent > /debug/tracing/iter_ctrl\n"
1968         "# echo 1 > /debug/tracing/tracing_enabled\n"
1969         "# cat /debug/tracing/trace > /tmp/trace.txt\n"
1970         "echo 0 > /debug/tracing/tracing_enabled\n"
1971 ;
1972
1973 static ssize_t
1974 tracing_readme_read(struct file *filp, char __user *ubuf,
1975                        size_t cnt, loff_t *ppos)
1976 {
1977         return simple_read_from_buffer(ubuf, cnt, ppos,
1978                                         readme_msg, strlen(readme_msg));
1979 }
1980
1981 static struct file_operations tracing_readme_fops = {
1982         .open           = tracing_open_generic,
1983         .read           = tracing_readme_read,
1984 };
1985
1986 static ssize_t
1987 tracing_ctrl_read(struct file *filp, char __user *ubuf,
1988                   size_t cnt, loff_t *ppos)
1989 {
1990         struct trace_array *tr = filp->private_data;
1991         char buf[64];
1992         int r;
1993
1994         r = sprintf(buf, "%ld\n", tr->ctrl);
1995         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1996 }
1997
1998 static ssize_t
1999 tracing_ctrl_write(struct file *filp, const char __user *ubuf,
2000                    size_t cnt, loff_t *ppos)
2001 {
2002         struct trace_array *tr = filp->private_data;
2003         long val;
2004         char buf[64];
2005
2006         if (cnt > 63)
2007                 cnt = 63;
2008
2009         if (copy_from_user(&buf, ubuf, cnt))
2010                 return -EFAULT;
2011
2012         buf[cnt] = 0;
2013
2014         val = simple_strtoul(buf, NULL, 10);
2015
2016         val = !!val;
2017
2018         mutex_lock(&trace_types_lock);
2019         if (tr->ctrl ^ val) {
2020                 if (val)
2021                         tracer_enabled = 1;
2022                 else
2023                         tracer_enabled = 0;
2024
2025                 tr->ctrl = val;
2026
2027                 if (current_trace && current_trace->ctrl_update)
2028                         current_trace->ctrl_update(tr);
2029         }
2030         mutex_unlock(&trace_types_lock);
2031
2032         filp->f_pos += cnt;
2033
2034         return cnt;
2035 }
2036
2037 static ssize_t
2038 tracing_set_trace_read(struct file *filp, char __user *ubuf,
2039                        size_t cnt, loff_t *ppos)
2040 {
2041         char buf[max_tracer_type_len+2];
2042         int r;
2043
2044         mutex_lock(&trace_types_lock);
2045         if (current_trace)
2046                 r = sprintf(buf, "%s\n", current_trace->name);
2047         else
2048                 r = sprintf(buf, "\n");
2049         mutex_unlock(&trace_types_lock);
2050
2051         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2052 }
2053
2054 static ssize_t
2055 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
2056                         size_t cnt, loff_t *ppos)
2057 {
2058         struct trace_array *tr = &global_trace;
2059         struct tracer *t;
2060         char buf[max_tracer_type_len+1];
2061         int i;
2062
2063         if (cnt > max_tracer_type_len)
2064                 cnt = max_tracer_type_len;
2065
2066         if (copy_from_user(&buf, ubuf, cnt))
2067                 return -EFAULT;
2068
2069         buf[cnt] = 0;
2070
2071         /* strip ending whitespace. */
2072         for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
2073                 buf[i] = 0;
2074
2075         mutex_lock(&trace_types_lock);
2076         for (t = trace_types; t; t = t->next) {
2077                 if (strcmp(t->name, buf) == 0)
2078                         break;
2079         }
2080         if (!t || t == current_trace)
2081                 goto out;
2082
2083         if (current_trace && current_trace->reset)
2084                 current_trace->reset(tr);
2085
2086         current_trace = t;
2087         if (t->init)
2088                 t->init(tr);
2089
2090  out:
2091         mutex_unlock(&trace_types_lock);
2092
2093         filp->f_pos += cnt;
2094
2095         return cnt;
2096 }
2097
2098 static ssize_t
2099 tracing_max_lat_read(struct file *filp, char __user *ubuf,
2100                      size_t cnt, loff_t *ppos)
2101 {
2102         unsigned long *ptr = filp->private_data;
2103         char buf[64];
2104         int r;
2105
2106         r = snprintf(buf, 64, "%ld\n",
2107                      *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
2108         if (r > 64)
2109                 r = 64;
2110         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2111 }
2112
2113 static ssize_t
2114 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
2115                       size_t cnt, loff_t *ppos)
2116 {
2117         long *ptr = filp->private_data;
2118         long val;
2119         char buf[64];
2120
2121         if (cnt > 63)
2122                 cnt = 63;
2123
2124         if (copy_from_user(&buf, ubuf, cnt))
2125                 return -EFAULT;
2126
2127         buf[cnt] = 0;
2128
2129         val = simple_strtoul(buf, NULL, 10);
2130
2131         *ptr = val * 1000;
2132
2133         return cnt;
2134 }
2135
2136 static atomic_t tracing_reader;
2137
2138 static int tracing_open_pipe(struct inode *inode, struct file *filp)
2139 {
2140         struct trace_iterator *iter;
2141
2142         if (tracing_disabled)
2143                 return -ENODEV;
2144
2145         /* We only allow for reader of the pipe */
2146         if (atomic_inc_return(&tracing_reader) != 1) {
2147                 atomic_dec(&tracing_reader);
2148                 return -EBUSY;
2149         }
2150
2151         /* create a buffer to store the information to pass to userspace */
2152         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2153         if (!iter)
2154                 return -ENOMEM;
2155
2156         iter->tr = &global_trace;
2157         iter->trace = current_trace;
2158
2159         filp->private_data = iter;
2160
2161         return 0;
2162 }
2163
2164 static int tracing_release_pipe(struct inode *inode, struct file *file)
2165 {
2166         struct trace_iterator *iter = file->private_data;
2167
2168         kfree(iter);
2169         atomic_dec(&tracing_reader);
2170
2171         return 0;
2172 }
2173
2174 static unsigned int
2175 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
2176 {
2177         struct trace_iterator *iter = filp->private_data;
2178
2179         if (trace_flags & TRACE_ITER_BLOCK) {
2180                 /*
2181                  * Always select as readable when in blocking mode
2182                  */
2183                 return POLLIN | POLLRDNORM;
2184         }
2185         else {
2186                 if (!trace_empty(iter))
2187                         return POLLIN | POLLRDNORM;
2188                 poll_wait(filp, &trace_wait, poll_table);
2189                 if (!trace_empty(iter))
2190                         return POLLIN | POLLRDNORM;
2191
2192                 return 0;
2193         }
2194 }
2195
2196 /*
2197  * Consumer reader.
2198  */
2199 static ssize_t
2200 tracing_read_pipe(struct file *filp, char __user *ubuf,
2201                   size_t cnt, loff_t *ppos)
2202 {
2203         struct trace_iterator *iter = filp->private_data;
2204         struct trace_array_cpu *data;
2205         struct trace_array *tr = iter->tr;
2206         struct tracer *tracer = iter->trace;
2207         static cpumask_t mask;
2208         static int start;
2209         unsigned long flags;
2210 #ifdef CONFIG_FTRACE
2211         int ftrace_save;
2212 #endif
2213         int read = 0;
2214         int cpu;
2215         int len;
2216         int ret;
2217
2218         /* return any leftover data */
2219         if (iter->seq.len > start) {
2220                 len = iter->seq.len - start;
2221                 if (cnt > len)
2222                         cnt = len;
2223                 ret = copy_to_user(ubuf, iter->seq.buffer + start, cnt);
2224                 if (ret)
2225                         cnt = -EFAULT;
2226
2227                 start += len;
2228
2229                 return cnt;
2230         }
2231
2232         trace_seq_reset(&iter->seq);
2233         start = 0;
2234
2235         while (trace_empty(iter)) {
2236                 /*
2237                  * This is a make-shift waitqueue. The reason we don't use
2238                  * an actual wait queue is because:
2239                  *  1) we only ever have one waiter
2240                  *  2) the tracing, traces all functions, we don't want
2241                  *     the overhead of calling wake_up and friends
2242                  *     (and tracing them too)
2243                  *     Anyway, this is really very primitive wakeup.
2244                  */
2245                 set_current_state(TASK_INTERRUPTIBLE);
2246                 iter->tr->waiter = current;
2247
2248                 /* sleep for one second, and try again. */
2249                 schedule_timeout(HZ);
2250
2251                 iter->tr->waiter = NULL;
2252
2253                 if (signal_pending(current))
2254                         return -EINTR;
2255
2256                 if (iter->trace != current_trace)
2257                         return 0;
2258
2259                 /*
2260                  * We block until we read something and tracing is disabled.
2261                  * We still block if tracing is disabled, but we have never
2262                  * read anything. This allows a user to cat this file, and
2263                  * then enable tracing. But after we have read something,
2264                  * we give an EOF when tracing is again disabled.
2265                  *
2266                  * iter->pos will be 0 if we haven't read anything.
2267                  */
2268                 if (!tracer_enabled && iter->pos)
2269                         break;
2270
2271                 continue;
2272         }
2273
2274         /* stop when tracing is finished */
2275         if (trace_empty(iter))
2276                 return 0;
2277
2278         if (cnt >= PAGE_SIZE)
2279                 cnt = PAGE_SIZE - 1;
2280
2281         memset(iter, 0, sizeof(*iter));
2282         iter->tr = tr;
2283         iter->trace = tracer;
2284         iter->pos = -1;
2285
2286         /*
2287          * We need to stop all tracing on all CPUS to read the
2288          * the next buffer. This is a bit expensive, but is
2289          * not done often. We fill all what we can read,
2290          * and then release the locks again.
2291          */
2292
2293         cpus_clear(mask);
2294         local_irq_save(flags);
2295 #ifdef CONFIG_FTRACE
2296         ftrace_save = ftrace_enabled;
2297         ftrace_enabled = 0;
2298 #endif
2299         smp_wmb();
2300         for_each_possible_cpu(cpu) {
2301                 data = iter->tr->data[cpu];
2302
2303                 if (!head_page(data) || !data->trace_idx)
2304                         continue;
2305
2306                 atomic_inc(&data->disabled);
2307                 cpu_set(cpu, mask);
2308         }
2309
2310         for_each_cpu_mask(cpu, mask) {
2311                 data = iter->tr->data[cpu];
2312                 __raw_spin_lock(&data->lock);
2313         }
2314
2315         while (find_next_entry_inc(iter) != NULL) {
2316                 int len = iter->seq.len;
2317
2318                 ret = print_trace_line(iter);
2319                 if (!ret) {
2320                         /* don't print partial lines */
2321                         iter->seq.len = len;
2322                         break;
2323                 }
2324
2325                 trace_consume(iter);
2326
2327                 if (iter->seq.len >= cnt)
2328                         break;
2329         }
2330
2331         for_each_cpu_mask(cpu, mask) {
2332                 data = iter->tr->data[cpu];
2333                 __raw_spin_unlock(&data->lock);
2334         }
2335
2336         for_each_cpu_mask(cpu, mask) {
2337                 data = iter->tr->data[cpu];
2338                 atomic_dec(&data->disabled);
2339         }
2340 #ifdef CONFIG_FTRACE
2341         ftrace_enabled = ftrace_save;
2342 #endif
2343         local_irq_restore(flags);
2344
2345         /* Now copy what we have to the user */
2346         read = iter->seq.len;
2347         if (read > cnt)
2348                 read = cnt;
2349
2350         ret = copy_to_user(ubuf, iter->seq.buffer, read);
2351
2352         if (read < iter->seq.len)
2353                 start = read;
2354         else
2355                 trace_seq_reset(&iter->seq);
2356
2357         if (ret)
2358                 read = -EFAULT;
2359
2360         return read;
2361 }
2362
2363 static struct file_operations tracing_max_lat_fops = {
2364         .open           = tracing_open_generic,
2365         .read           = tracing_max_lat_read,
2366         .write          = tracing_max_lat_write,
2367 };
2368
2369 static struct file_operations tracing_ctrl_fops = {
2370         .open           = tracing_open_generic,
2371         .read           = tracing_ctrl_read,
2372         .write          = tracing_ctrl_write,
2373 };
2374
2375 static struct file_operations set_tracer_fops = {
2376         .open           = tracing_open_generic,
2377         .read           = tracing_set_trace_read,
2378         .write          = tracing_set_trace_write,
2379 };
2380
2381 static struct file_operations tracing_pipe_fops = {
2382         .open           = tracing_open_pipe,
2383         .poll           = tracing_poll_pipe,
2384         .read           = tracing_read_pipe,
2385         .release        = tracing_release_pipe,
2386 };
2387
2388 #ifdef CONFIG_DYNAMIC_FTRACE
2389
2390 static ssize_t
2391 tracing_read_long(struct file *filp, char __user *ubuf,
2392                   size_t cnt, loff_t *ppos)
2393 {
2394         unsigned long *p = filp->private_data;
2395         char buf[64];
2396         int r;
2397
2398         r = sprintf(buf, "%ld\n", *p);
2399
2400         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2401 }
2402
2403 static struct file_operations tracing_read_long_fops = {
2404         .open           = tracing_open_generic,
2405         .read           = tracing_read_long,
2406 };
2407 #endif
2408
2409 static struct dentry *d_tracer;
2410
2411 struct dentry *tracing_init_dentry(void)
2412 {
2413         static int once;
2414
2415         if (d_tracer)
2416                 return d_tracer;
2417
2418         d_tracer = debugfs_create_dir("tracing", NULL);
2419
2420         if (!d_tracer && !once) {
2421                 once = 1;
2422                 pr_warning("Could not create debugfs directory 'tracing'\n");
2423                 return NULL;
2424         }
2425
2426         return d_tracer;
2427 }
2428
2429 #ifdef CONFIG_FTRACE_SELFTEST
2430 /* Let selftest have access to static functions in this file */
2431 #include "trace_selftest.c"
2432 #endif
2433
2434 static __init void tracer_init_debugfs(void)
2435 {
2436         struct dentry *d_tracer;
2437         struct dentry *entry;
2438
2439         d_tracer = tracing_init_dentry();
2440
2441         entry = debugfs_create_file("tracing_enabled", 0644, d_tracer,
2442                                     &global_trace, &tracing_ctrl_fops);
2443         if (!entry)
2444                 pr_warning("Could not create debugfs 'tracing_enabled' entry\n");
2445
2446         entry = debugfs_create_file("iter_ctrl", 0644, d_tracer,
2447                                     NULL, &tracing_iter_fops);
2448         if (!entry)
2449                 pr_warning("Could not create debugfs 'iter_ctrl' entry\n");
2450
2451         entry = debugfs_create_file("tracing_cpumask", 0644, d_tracer,
2452                                     NULL, &tracing_cpumask_fops);
2453         if (!entry)
2454                 pr_warning("Could not create debugfs 'tracing_cpumask' entry\n");
2455
2456         entry = debugfs_create_file("latency_trace", 0444, d_tracer,
2457                                     &global_trace, &tracing_lt_fops);
2458         if (!entry)
2459                 pr_warning("Could not create debugfs 'latency_trace' entry\n");
2460
2461         entry = debugfs_create_file("trace", 0444, d_tracer,
2462                                     &global_trace, &tracing_fops);
2463         if (!entry)
2464                 pr_warning("Could not create debugfs 'trace' entry\n");
2465
2466         entry = debugfs_create_file("available_tracers", 0444, d_tracer,
2467                                     &global_trace, &show_traces_fops);
2468         if (!entry)
2469                 pr_warning("Could not create debugfs 'trace' entry\n");
2470
2471         entry = debugfs_create_file("current_tracer", 0444, d_tracer,
2472                                     &global_trace, &set_tracer_fops);
2473         if (!entry)
2474                 pr_warning("Could not create debugfs 'trace' entry\n");
2475
2476         entry = debugfs_create_file("tracing_max_latency", 0644, d_tracer,
2477                                     &tracing_max_latency,
2478                                     &tracing_max_lat_fops);
2479         if (!entry)
2480                 pr_warning("Could not create debugfs "
2481                            "'tracing_max_latency' entry\n");
2482
2483         entry = debugfs_create_file("tracing_thresh", 0644, d_tracer,
2484                                     &tracing_thresh, &tracing_max_lat_fops);
2485         if (!entry)
2486                 pr_warning("Could not create debugfs "
2487                            "'tracing_threash' entry\n");
2488         entry = debugfs_create_file("README", 0644, d_tracer,
2489                                     NULL, &tracing_readme_fops);
2490         if (!entry)
2491                 pr_warning("Could not create debugfs 'README' entry\n");
2492
2493         entry = debugfs_create_file("trace_pipe", 0644, d_tracer,
2494                                     NULL, &tracing_pipe_fops);
2495         if (!entry)
2496                 pr_warning("Could not create debugfs "
2497                            "'tracing_threash' entry\n");
2498
2499 #ifdef CONFIG_DYNAMIC_FTRACE
2500         entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer,
2501                                     &ftrace_update_tot_cnt,
2502                                     &tracing_read_long_fops);
2503         if (!entry)
2504                 pr_warning("Could not create debugfs "
2505                            "'dyn_ftrace_total_info' entry\n");
2506 #endif
2507 }
2508
2509 /* dummy trace to disable tracing */
2510 static struct tracer no_tracer __read_mostly =
2511 {
2512         .name           = "none",
2513 };
2514
2515 static int trace_alloc_page(void)
2516 {
2517         struct trace_array_cpu *data;
2518         struct page *page, *tmp;
2519         LIST_HEAD(pages);
2520         void *array;
2521         int i;
2522
2523         /* first allocate a page for each CPU */
2524         for_each_possible_cpu(i) {
2525                 array = (void *)__get_free_page(GFP_KERNEL);
2526                 if (array == NULL) {
2527                         printk(KERN_ERR "tracer: failed to allocate page"
2528                                "for trace buffer!\n");
2529                         goto free_pages;
2530                 }
2531
2532                 page = virt_to_page(array);
2533                 list_add(&page->lru, &pages);
2534
2535 /* Only allocate if we are actually using the max trace */
2536 #ifdef CONFIG_TRACER_MAX_TRACE
2537                 array = (void *)__get_free_page(GFP_KERNEL);
2538                 if (array == NULL) {
2539                         printk(KERN_ERR "tracer: failed to allocate page"
2540                                "for trace buffer!\n");
2541                         goto free_pages;
2542                 }
2543                 page = virt_to_page(array);
2544                 list_add(&page->lru, &pages);
2545 #endif
2546         }
2547
2548         /* Now that we successfully allocate a page per CPU, add them */
2549         for_each_possible_cpu(i) {
2550                 data = global_trace.data[i];
2551                 data->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
2552                 page = list_entry(pages.next, struct page, lru);
2553                 list_del_init(&page->lru);
2554                 list_add_tail(&page->lru, &data->trace_pages);
2555                 ClearPageLRU(page);
2556
2557 #ifdef CONFIG_TRACER_MAX_TRACE
2558                 data = max_tr.data[i];
2559                 data->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
2560                 page = list_entry(pages.next, struct page, lru);
2561                 list_del_init(&page->lru);
2562                 list_add_tail(&page->lru, &data->trace_pages);
2563                 SetPageLRU(page);
2564 #endif
2565         }
2566         global_trace.entries += ENTRIES_PER_PAGE;
2567
2568         return 0;
2569
2570  free_pages:
2571         list_for_each_entry_safe(page, tmp, &pages, lru) {
2572                 list_del_init(&page->lru);
2573                 __free_page(page);
2574         }
2575         return -ENOMEM;
2576 }
2577
2578 __init static int tracer_alloc_buffers(void)
2579 {
2580         struct trace_array_cpu *data;
2581         void *array;
2582         struct page *page;
2583         int pages = 0;
2584         int ret = -ENOMEM;
2585         int i;
2586
2587         global_trace.ctrl = tracer_enabled;
2588
2589         /* Allocate the first page for all buffers */
2590         for_each_possible_cpu(i) {
2591                 data = global_trace.data[i] = &per_cpu(global_trace_cpu, i);
2592                 max_tr.data[i] = &per_cpu(max_data, i);
2593
2594                 array = (void *)__get_free_page(GFP_KERNEL);
2595                 if (array == NULL) {
2596                         printk(KERN_ERR "tracer: failed to allocate page"
2597                                "for trace buffer!\n");
2598                         goto free_buffers;
2599                 }
2600
2601                 /* set the array to the list */
2602                 INIT_LIST_HEAD(&data->trace_pages);
2603                 page = virt_to_page(array);
2604                 list_add(&page->lru, &data->trace_pages);
2605                 /* use the LRU flag to differentiate the two buffers */
2606                 ClearPageLRU(page);
2607
2608 /* Only allocate if we are actually using the max trace */
2609 #ifdef CONFIG_TRACER_MAX_TRACE
2610                 array = (void *)__get_free_page(GFP_KERNEL);
2611                 if (array == NULL) {
2612                         printk(KERN_ERR "tracer: failed to allocate page"
2613                                "for trace buffer!\n");
2614                         goto free_buffers;
2615                 }
2616
2617                 INIT_LIST_HEAD(&max_tr.data[i]->trace_pages);
2618                 page = virt_to_page(array);
2619                 list_add(&page->lru, &max_tr.data[i]->trace_pages);
2620                 SetPageLRU(page);
2621 #endif
2622         }
2623
2624         /*
2625          * Since we allocate by orders of pages, we may be able to
2626          * round up a bit.
2627          */
2628         global_trace.entries = ENTRIES_PER_PAGE;
2629         pages++;
2630
2631         while (global_trace.entries < trace_nr_entries) {
2632                 if (trace_alloc_page())
2633                         break;
2634                 pages++;
2635         }
2636         max_tr.entries = global_trace.entries;
2637
2638         pr_info("tracer: %d pages allocated for %ld",
2639                 pages, trace_nr_entries);
2640         pr_info(" entries of %ld bytes\n", (long)TRACE_ENTRY_SIZE);
2641         pr_info("   actual entries %ld\n", global_trace.entries);
2642
2643         tracer_init_debugfs();
2644
2645         trace_init_cmdlines();
2646
2647         register_tracer(&no_tracer);
2648         current_trace = &no_tracer;
2649
2650         /* All seems OK, enable tracing */
2651         tracing_disabled = 0;
2652
2653         return 0;
2654
2655  free_buffers:
2656         for (i-- ; i >= 0; i--) {
2657                 struct page *page, *tmp;
2658                 struct trace_array_cpu *data = global_trace.data[i];
2659
2660                 if (data) {
2661                         list_for_each_entry_safe(page, tmp,
2662                                                  &data->trace_pages, lru) {
2663                                 list_del_init(&page->lru);
2664                                 __free_page(page);
2665                         }
2666                 }
2667
2668 #ifdef CONFIG_TRACER_MAX_TRACE
2669                 data = max_tr.data[i];
2670                 if (data) {
2671                         list_for_each_entry_safe(page, tmp,
2672                                                  &data->trace_pages, lru) {
2673                                 list_del_init(&page->lru);
2674                                 __free_page(page);
2675                         }
2676                 }
2677 #endif
2678         }
2679         return ret;
2680 }
2681 fs_initcall(tracer_alloc_buffers);