ftrace: move ftrace_special to trace.c
[linux-2.6] / kernel / trace / trace_sched_switch.c
1 /*
2  * trace context switch
3  *
4  * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com>
5  *
6  */
7 #include <linux/module.h>
8 #include <linux/fs.h>
9 #include <linux/debugfs.h>
10 #include <linux/kallsyms.h>
11 #include <linux/uaccess.h>
12 #include <linux/marker.h>
13 #include <linux/ftrace.h>
14
15 #include "trace.h"
16
17 static struct trace_array       *ctx_trace;
18 static int __read_mostly        tracer_enabled;
19 static atomic_t                 sched_ref;
20
21 static void
22 sched_switch_func(void *private, void *__rq, struct task_struct *prev,
23                         struct task_struct *next)
24 {
25         struct trace_array **ptr = private;
26         struct trace_array *tr = *ptr;
27         struct trace_array_cpu *data;
28         unsigned long flags;
29         long disabled;
30         int cpu;
31
32         if (!tracer_enabled)
33                 return;
34
35         local_irq_save(flags);
36         cpu = raw_smp_processor_id();
37         data = tr->data[cpu];
38         disabled = atomic_inc_return(&data->disabled);
39
40         if (likely(disabled == 1))
41                 tracing_sched_switch_trace(tr, data, prev, next, flags);
42
43         atomic_dec(&data->disabled);
44         local_irq_restore(flags);
45 }
46
47 static notrace void
48 sched_switch_callback(void *probe_data, void *call_data,
49                       const char *format, va_list *args)
50 {
51         struct task_struct *prev;
52         struct task_struct *next;
53         struct rq *__rq;
54
55         if (!atomic_read(&sched_ref))
56                 return;
57
58         /* skip prev_pid %d next_pid %d prev_state %ld */
59         (void)va_arg(*args, int);
60         (void)va_arg(*args, int);
61         (void)va_arg(*args, long);
62         __rq = va_arg(*args, typeof(__rq));
63         prev = va_arg(*args, typeof(prev));
64         next = va_arg(*args, typeof(next));
65
66         tracing_record_cmdline(prev);
67
68         /*
69          * If tracer_switch_func only points to the local
70          * switch func, it still needs the ptr passed to it.
71          */
72         sched_switch_func(probe_data, __rq, prev, next);
73 }
74
75 static void
76 wakeup_func(void *private, void *__rq, struct task_struct *wakee, struct
77                         task_struct *curr)
78 {
79         struct trace_array **ptr = private;
80         struct trace_array *tr = *ptr;
81         struct trace_array_cpu *data;
82         unsigned long flags;
83         long disabled;
84         int cpu;
85
86         if (!tracer_enabled)
87                 return;
88
89         tracing_record_cmdline(curr);
90
91         local_irq_save(flags);
92         cpu = raw_smp_processor_id();
93         data = tr->data[cpu];
94         disabled = atomic_inc_return(&data->disabled);
95
96         if (likely(disabled == 1))
97                 tracing_sched_wakeup_trace(tr, data, wakee, curr, flags);
98
99         atomic_dec(&data->disabled);
100         local_irq_restore(flags);
101 }
102
103 static notrace void
104 wake_up_callback(void *probe_data, void *call_data,
105                  const char *format, va_list *args)
106 {
107         struct task_struct *curr;
108         struct task_struct *task;
109         struct rq *__rq;
110
111         if (likely(!tracer_enabled))
112                 return;
113
114         /* Skip pid %d state %ld */
115         (void)va_arg(*args, int);
116         (void)va_arg(*args, long);
117         /* now get the meat: "rq %p task %p rq->curr %p" */
118         __rq = va_arg(*args, typeof(__rq));
119         task = va_arg(*args, typeof(task));
120         curr = va_arg(*args, typeof(curr));
121
122         tracing_record_cmdline(task);
123         tracing_record_cmdline(curr);
124
125         wakeup_func(probe_data, __rq, task, curr);
126 }
127
128 static void sched_switch_reset(struct trace_array *tr)
129 {
130         int cpu;
131
132         tr->time_start = ftrace_now(tr->cpu);
133
134         for_each_online_cpu(cpu)
135                 tracing_reset(tr->data[cpu]);
136 }
137
138 static int tracing_sched_register(void)
139 {
140         int ret;
141
142         ret = marker_probe_register("kernel_sched_wakeup",
143                         "pid %d state %ld ## rq %p task %p rq->curr %p",
144                         wake_up_callback,
145                         &ctx_trace);
146         if (ret) {
147                 pr_info("wakeup trace: Couldn't add marker"
148                         " probe to kernel_sched_wakeup\n");
149                 return ret;
150         }
151
152         ret = marker_probe_register("kernel_sched_wakeup_new",
153                         "pid %d state %ld ## rq %p task %p rq->curr %p",
154                         wake_up_callback,
155                         &ctx_trace);
156         if (ret) {
157                 pr_info("wakeup trace: Couldn't add marker"
158                         " probe to kernel_sched_wakeup_new\n");
159                 goto fail_deprobe;
160         }
161
162         ret = marker_probe_register("kernel_sched_schedule",
163                 "prev_pid %d next_pid %d prev_state %ld "
164                 "## rq %p prev %p next %p",
165                 sched_switch_callback,
166                 &ctx_trace);
167         if (ret) {
168                 pr_info("sched trace: Couldn't add marker"
169                         " probe to kernel_sched_schedule\n");
170                 goto fail_deprobe_wake_new;
171         }
172
173         return ret;
174 fail_deprobe_wake_new:
175         marker_probe_unregister("kernel_sched_wakeup_new",
176                                 wake_up_callback,
177                                 &ctx_trace);
178 fail_deprobe:
179         marker_probe_unregister("kernel_sched_wakeup",
180                                 wake_up_callback,
181                                 &ctx_trace);
182         return ret;
183 }
184
185 static void tracing_sched_unregister(void)
186 {
187         marker_probe_unregister("kernel_sched_schedule",
188                                 sched_switch_callback,
189                                 &ctx_trace);
190         marker_probe_unregister("kernel_sched_wakeup_new",
191                                 wake_up_callback,
192                                 &ctx_trace);
193         marker_probe_unregister("kernel_sched_wakeup",
194                                 wake_up_callback,
195                                 &ctx_trace);
196 }
197
198 void tracing_start_sched_switch(void)
199 {
200         long ref;
201
202         ref = atomic_inc_return(&sched_ref);
203         if (ref == 1)
204                 tracing_sched_register();
205 }
206
207 void tracing_stop_sched_switch(void)
208 {
209         long ref;
210
211         ref = atomic_dec_and_test(&sched_ref);
212         if (ref)
213                 tracing_sched_unregister();
214 }
215
216 static void start_sched_trace(struct trace_array *tr)
217 {
218         sched_switch_reset(tr);
219         atomic_inc(&trace_record_cmdline_enabled);
220         tracer_enabled = 1;
221         tracing_start_sched_switch();
222 }
223
224 static void stop_sched_trace(struct trace_array *tr)
225 {
226         tracing_stop_sched_switch();
227         atomic_dec(&trace_record_cmdline_enabled);
228         tracer_enabled = 0;
229 }
230
231 static void sched_switch_trace_init(struct trace_array *tr)
232 {
233         ctx_trace = tr;
234
235         if (tr->ctrl)
236                 start_sched_trace(tr);
237 }
238
239 static void sched_switch_trace_reset(struct trace_array *tr)
240 {
241         if (tr->ctrl)
242                 stop_sched_trace(tr);
243 }
244
245 static void sched_switch_trace_ctrl_update(struct trace_array *tr)
246 {
247         /* When starting a new trace, reset the buffers */
248         if (tr->ctrl)
249                 start_sched_trace(tr);
250         else
251                 stop_sched_trace(tr);
252 }
253
254 static struct tracer sched_switch_trace __read_mostly =
255 {
256         .name           = "sched_switch",
257         .init           = sched_switch_trace_init,
258         .reset          = sched_switch_trace_reset,
259         .ctrl_update    = sched_switch_trace_ctrl_update,
260 #ifdef CONFIG_FTRACE_SELFTEST
261         .selftest    = trace_selftest_startup_sched_switch,
262 #endif
263 };
264
265 __init static int init_sched_switch_trace(void)
266 {
267         int ret = 0;
268
269         if (atomic_read(&sched_ref))
270                 ret = tracing_sched_register();
271         if (ret) {
272                 pr_info("error registering scheduler trace\n");
273                 return ret;
274         }
275         return register_tracer(&sched_switch_trace);
276 }
277 device_initcall(init_sched_switch_trace);