ftrace: remove add-hoc code
[linux-2.6] / kernel / trace / trace_sched_switch.c
1 /*
2  * trace context switch
3  *
4  * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com>
5  *
6  */
7 #include <linux/module.h>
8 #include <linux/fs.h>
9 #include <linux/debugfs.h>
10 #include <linux/kallsyms.h>
11 #include <linux/uaccess.h>
12 #include <linux/marker.h>
13 #include <linux/ftrace.h>
14
15 #include "trace.h"
16
17 static struct trace_array       *ctx_trace;
18 static int __read_mostly        tracer_enabled;
19
20 static void
21 ctx_switch_func(void *__rq, struct task_struct *prev, struct task_struct *next)
22 {
23         struct trace_array *tr = ctx_trace;
24         struct trace_array_cpu *data;
25         unsigned long flags;
26         long disabled;
27         int cpu;
28
29         if (!tracer_enabled)
30                 return;
31
32         tracing_record_cmdline(prev);
33
34         local_irq_save(flags);
35         cpu = raw_smp_processor_id();
36         data = tr->data[cpu];
37         disabled = atomic_inc_return(&data->disabled);
38
39         if (likely(disabled == 1))
40                 tracing_sched_switch_trace(tr, data, prev, next, flags);
41
42         atomic_dec(&data->disabled);
43         local_irq_restore(flags);
44 }
45
46 static void
47 wakeup_func(void *__rq, struct task_struct *wakee, struct task_struct *curr)
48 {
49         struct trace_array *tr = ctx_trace;
50         struct trace_array_cpu *data;
51         unsigned long flags;
52         long disabled;
53         int cpu;
54
55         if (!tracer_enabled)
56                 return;
57
58         tracing_record_cmdline(curr);
59
60         local_irq_save(flags);
61         cpu = raw_smp_processor_id();
62         data = tr->data[cpu];
63         disabled = atomic_inc_return(&data->disabled);
64
65         if (likely(disabled == 1))
66                 tracing_sched_wakeup_trace(tr, data, wakee, curr, flags);
67
68         atomic_dec(&data->disabled);
69         local_irq_restore(flags);
70 }
71
72 void
73 ftrace_ctx_switch(void *__rq, struct task_struct *prev,
74                   struct task_struct *next)
75 {
76         /*
77          * If tracer_switch_func only points to the local
78          * switch func, it still needs the ptr passed to it.
79          */
80         ctx_switch_func(__rq, prev, next);
81
82         /*
83          * Chain to the wakeup tracer (this is a NOP if disabled):
84          */
85         wakeup_sched_switch(prev, next);
86 }
87
88 void
89 ftrace_wake_up_task(void *__rq, struct task_struct *wakee,
90                     struct task_struct *curr)
91 {
92         wakeup_func(__rq, wakee, curr);
93
94         /*
95          * Chain to the wakeup tracer (this is a NOP if disabled):
96          */
97         wakeup_sched_wakeup(wakee, curr);
98 }
99
100 void
101 ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
102 {
103         struct trace_array *tr = ctx_trace;
104         struct trace_array_cpu *data;
105         unsigned long flags;
106         long disabled;
107         int cpu;
108
109         if (!tracer_enabled)
110                 return;
111
112         local_irq_save(flags);
113         cpu = raw_smp_processor_id();
114         data = tr->data[cpu];
115         disabled = atomic_inc_return(&data->disabled);
116
117         if (likely(disabled == 1))
118                 __trace_special(tr, data, arg1, arg2, arg3);
119
120         atomic_dec(&data->disabled);
121         local_irq_restore(flags);
122 }
123
124 static void sched_switch_reset(struct trace_array *tr)
125 {
126         int cpu;
127
128         tr->time_start = ftrace_now(tr->cpu);
129
130         for_each_online_cpu(cpu)
131                 tracing_reset(tr->data[cpu]);
132 }
133
134 static void start_sched_trace(struct trace_array *tr)
135 {
136         sched_switch_reset(tr);
137         tracer_enabled = 1;
138 }
139
140 static void stop_sched_trace(struct trace_array *tr)
141 {
142         tracer_enabled = 0;
143 }
144
145 static void sched_switch_trace_init(struct trace_array *tr)
146 {
147         ctx_trace = tr;
148
149         if (tr->ctrl)
150                 start_sched_trace(tr);
151 }
152
153 static void sched_switch_trace_reset(struct trace_array *tr)
154 {
155         if (tr->ctrl)
156                 stop_sched_trace(tr);
157 }
158
159 static void sched_switch_trace_ctrl_update(struct trace_array *tr)
160 {
161         /* When starting a new trace, reset the buffers */
162         if (tr->ctrl)
163                 start_sched_trace(tr);
164         else
165                 stop_sched_trace(tr);
166 }
167
168 static struct tracer sched_switch_trace __read_mostly =
169 {
170         .name           = "sched_switch",
171         .init           = sched_switch_trace_init,
172         .reset          = sched_switch_trace_reset,
173         .ctrl_update    = sched_switch_trace_ctrl_update,
174 #ifdef CONFIG_FTRACE_SELFTEST
175         .selftest    = trace_selftest_startup_sched_switch,
176 #endif
177 };
178
179 __init static int init_sched_switch_trace(void)
180 {
181         return register_tracer(&sched_switch_trace);
182 }
183 device_initcall(init_sched_switch_trace);