Merge branch 'timers-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6] / kernel / trace / trace_sched_switch.c
1 /*
2  * trace context switch
3  *
4  * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com>
5  *
6  */
7 #include <linux/module.h>
8 #include <linux/fs.h>
9 #include <linux/debugfs.h>
10 #include <linux/kallsyms.h>
11 #include <linux/uaccess.h>
12 #include <linux/marker.h>
13 #include <linux/ftrace.h>
14
15 #include "trace.h"
16
17 static struct trace_array       *ctx_trace;
18 static int __read_mostly        tracer_enabled;
19 static atomic_t                 sched_ref;
20
21 static void
22 sched_switch_func(void *private, void *__rq, struct task_struct *prev,
23                         struct task_struct *next)
24 {
25         struct trace_array **ptr = private;
26         struct trace_array *tr = *ptr;
27         struct trace_array_cpu *data;
28         unsigned long flags;
29         long disabled;
30         int cpu;
31
32         tracing_record_cmdline(prev);
33         tracing_record_cmdline(next);
34
35         if (!tracer_enabled)
36                 return;
37
38         local_irq_save(flags);
39         cpu = raw_smp_processor_id();
40         data = tr->data[cpu];
41         disabled = atomic_inc_return(&data->disabled);
42
43         if (likely(disabled == 1))
44                 tracing_sched_switch_trace(tr, data, prev, next, flags);
45
46         atomic_dec(&data->disabled);
47         local_irq_restore(flags);
48 }
49
50 static notrace void
51 sched_switch_callback(void *probe_data, void *call_data,
52                       const char *format, va_list *args)
53 {
54         struct task_struct *prev;
55         struct task_struct *next;
56         struct rq *__rq;
57
58         if (!atomic_read(&sched_ref))
59                 return;
60
61         /* skip prev_pid %d next_pid %d prev_state %ld */
62         (void)va_arg(*args, int);
63         (void)va_arg(*args, int);
64         (void)va_arg(*args, long);
65         __rq = va_arg(*args, typeof(__rq));
66         prev = va_arg(*args, typeof(prev));
67         next = va_arg(*args, typeof(next));
68
69         /*
70          * If tracer_switch_func only points to the local
71          * switch func, it still needs the ptr passed to it.
72          */
73         sched_switch_func(probe_data, __rq, prev, next);
74 }
75
76 static void
77 wakeup_func(void *private, void *__rq, struct task_struct *wakee, struct
78                         task_struct *curr)
79 {
80         struct trace_array **ptr = private;
81         struct trace_array *tr = *ptr;
82         struct trace_array_cpu *data;
83         unsigned long flags;
84         long disabled;
85         int cpu;
86
87         if (!tracer_enabled)
88                 return;
89
90         tracing_record_cmdline(curr);
91
92         local_irq_save(flags);
93         cpu = raw_smp_processor_id();
94         data = tr->data[cpu];
95         disabled = atomic_inc_return(&data->disabled);
96
97         if (likely(disabled == 1))
98                 tracing_sched_wakeup_trace(tr, data, wakee, curr, flags);
99
100         atomic_dec(&data->disabled);
101         local_irq_restore(flags);
102 }
103
104 static notrace void
105 wake_up_callback(void *probe_data, void *call_data,
106                  const char *format, va_list *args)
107 {
108         struct task_struct *curr;
109         struct task_struct *task;
110         struct rq *__rq;
111
112         if (likely(!tracer_enabled))
113                 return;
114
115         /* Skip pid %d state %ld */
116         (void)va_arg(*args, int);
117         (void)va_arg(*args, long);
118         /* now get the meat: "rq %p task %p rq->curr %p" */
119         __rq = va_arg(*args, typeof(__rq));
120         task = va_arg(*args, typeof(task));
121         curr = va_arg(*args, typeof(curr));
122
123         tracing_record_cmdline(task);
124         tracing_record_cmdline(curr);
125
126         wakeup_func(probe_data, __rq, task, curr);
127 }
128
129 static void sched_switch_reset(struct trace_array *tr)
130 {
131         int cpu;
132
133         tr->time_start = ftrace_now(tr->cpu);
134
135         for_each_online_cpu(cpu)
136                 tracing_reset(tr->data[cpu]);
137 }
138
139 static int tracing_sched_register(void)
140 {
141         int ret;
142
143         ret = marker_probe_register("kernel_sched_wakeup",
144                         "pid %d state %ld ## rq %p task %p rq->curr %p",
145                         wake_up_callback,
146                         &ctx_trace);
147         if (ret) {
148                 pr_info("wakeup trace: Couldn't add marker"
149                         " probe to kernel_sched_wakeup\n");
150                 return ret;
151         }
152
153         ret = marker_probe_register("kernel_sched_wakeup_new",
154                         "pid %d state %ld ## rq %p task %p rq->curr %p",
155                         wake_up_callback,
156                         &ctx_trace);
157         if (ret) {
158                 pr_info("wakeup trace: Couldn't add marker"
159                         " probe to kernel_sched_wakeup_new\n");
160                 goto fail_deprobe;
161         }
162
163         ret = marker_probe_register("kernel_sched_schedule",
164                 "prev_pid %d next_pid %d prev_state %ld "
165                 "## rq %p prev %p next %p",
166                 sched_switch_callback,
167                 &ctx_trace);
168         if (ret) {
169                 pr_info("sched trace: Couldn't add marker"
170                         " probe to kernel_sched_schedule\n");
171                 goto fail_deprobe_wake_new;
172         }
173
174         return ret;
175 fail_deprobe_wake_new:
176         marker_probe_unregister("kernel_sched_wakeup_new",
177                                 wake_up_callback,
178                                 &ctx_trace);
179 fail_deprobe:
180         marker_probe_unregister("kernel_sched_wakeup",
181                                 wake_up_callback,
182                                 &ctx_trace);
183         return ret;
184 }
185
186 static void tracing_sched_unregister(void)
187 {
188         marker_probe_unregister("kernel_sched_schedule",
189                                 sched_switch_callback,
190                                 &ctx_trace);
191         marker_probe_unregister("kernel_sched_wakeup_new",
192                                 wake_up_callback,
193                                 &ctx_trace);
194         marker_probe_unregister("kernel_sched_wakeup",
195                                 wake_up_callback,
196                                 &ctx_trace);
197 }
198
199 static void tracing_start_sched_switch(void)
200 {
201         long ref;
202
203         ref = atomic_inc_return(&sched_ref);
204         if (ref == 1)
205                 tracing_sched_register();
206 }
207
208 static void tracing_stop_sched_switch(void)
209 {
210         long ref;
211
212         ref = atomic_dec_and_test(&sched_ref);
213         if (ref)
214                 tracing_sched_unregister();
215 }
216
217 void tracing_start_cmdline_record(void)
218 {
219         tracing_start_sched_switch();
220 }
221
222 void tracing_stop_cmdline_record(void)
223 {
224         tracing_stop_sched_switch();
225 }
226
227 static void start_sched_trace(struct trace_array *tr)
228 {
229         sched_switch_reset(tr);
230         tracing_start_cmdline_record();
231         tracer_enabled = 1;
232 }
233
234 static void stop_sched_trace(struct trace_array *tr)
235 {
236         tracer_enabled = 0;
237         tracing_stop_cmdline_record();
238 }
239
240 static void sched_switch_trace_init(struct trace_array *tr)
241 {
242         ctx_trace = tr;
243
244         if (tr->ctrl)
245                 start_sched_trace(tr);
246 }
247
248 static void sched_switch_trace_reset(struct trace_array *tr)
249 {
250         if (tr->ctrl)
251                 stop_sched_trace(tr);
252 }
253
254 static void sched_switch_trace_ctrl_update(struct trace_array *tr)
255 {
256         /* When starting a new trace, reset the buffers */
257         if (tr->ctrl)
258                 start_sched_trace(tr);
259         else
260                 stop_sched_trace(tr);
261 }
262
263 static struct tracer sched_switch_trace __read_mostly =
264 {
265         .name           = "sched_switch",
266         .init           = sched_switch_trace_init,
267         .reset          = sched_switch_trace_reset,
268         .ctrl_update    = sched_switch_trace_ctrl_update,
269 #ifdef CONFIG_FTRACE_SELFTEST
270         .selftest    = trace_selftest_startup_sched_switch,
271 #endif
272 };
273
274 __init static int init_sched_switch_trace(void)
275 {
276         int ret = 0;
277
278         if (atomic_read(&sched_ref))
279                 ret = tracing_sched_register();
280         if (ret) {
281                 pr_info("error registering scheduler trace\n");
282                 return ret;
283         }
284         return register_tracer(&sched_switch_trace);
285 }
286 device_initcall(init_sched_switch_trace);