Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound-2.6
[linux-2.6] / kernel / trace / trace_sched_switch.c
1 /*
2  * trace context switch
3  *
4  * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com>
5  *
6  */
7 #include <linux/module.h>
8 #include <linux/fs.h>
9 #include <linux/debugfs.h>
10 #include <linux/kallsyms.h>
11 #include <linux/uaccess.h>
12 #include <linux/ftrace.h>
13 #include <trace/events/sched.h>
14
15 #include "trace.h"
16
17 static struct trace_array       *ctx_trace;
18 static int __read_mostly        tracer_enabled;
19 static int                      sched_ref;
20 static DEFINE_MUTEX(sched_register_mutex);
21 static int                      sched_stopped;
22
23 static void
24 probe_sched_switch(struct rq *__rq, struct task_struct *prev,
25                         struct task_struct *next)
26 {
27         struct trace_array_cpu *data;
28         unsigned long flags;
29         int cpu;
30         int pc;
31
32         if (unlikely(!sched_ref))
33                 return;
34
35         tracing_record_cmdline(prev);
36         tracing_record_cmdline(next);
37
38         if (!tracer_enabled || sched_stopped)
39                 return;
40
41         pc = preempt_count();
42         local_irq_save(flags);
43         cpu = raw_smp_processor_id();
44         data = ctx_trace->data[cpu];
45
46         if (likely(!atomic_read(&data->disabled)))
47                 tracing_sched_switch_trace(ctx_trace, prev, next, flags, pc);
48
49         local_irq_restore(flags);
50 }
51
52 static void
53 probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee, int success)
54 {
55         struct trace_array_cpu *data;
56         unsigned long flags;
57         int cpu, pc;
58
59         if (unlikely(!sched_ref))
60                 return;
61
62         tracing_record_cmdline(current);
63
64         if (!tracer_enabled || sched_stopped)
65                 return;
66
67         pc = preempt_count();
68         local_irq_save(flags);
69         cpu = raw_smp_processor_id();
70         data = ctx_trace->data[cpu];
71
72         if (likely(!atomic_read(&data->disabled)))
73                 tracing_sched_wakeup_trace(ctx_trace, wakee, current,
74                                            flags, pc);
75
76         local_irq_restore(flags);
77 }
78
79 static int tracing_sched_register(void)
80 {
81         int ret;
82
83         ret = register_trace_sched_wakeup(probe_sched_wakeup);
84         if (ret) {
85                 pr_info("wakeup trace: Couldn't activate tracepoint"
86                         " probe to kernel_sched_wakeup\n");
87                 return ret;
88         }
89
90         ret = register_trace_sched_wakeup_new(probe_sched_wakeup);
91         if (ret) {
92                 pr_info("wakeup trace: Couldn't activate tracepoint"
93                         " probe to kernel_sched_wakeup_new\n");
94                 goto fail_deprobe;
95         }
96
97         ret = register_trace_sched_switch(probe_sched_switch);
98         if (ret) {
99                 pr_info("sched trace: Couldn't activate tracepoint"
100                         " probe to kernel_sched_switch\n");
101                 goto fail_deprobe_wake_new;
102         }
103
104         return ret;
105 fail_deprobe_wake_new:
106         unregister_trace_sched_wakeup_new(probe_sched_wakeup);
107 fail_deprobe:
108         unregister_trace_sched_wakeup(probe_sched_wakeup);
109         return ret;
110 }
111
112 static void tracing_sched_unregister(void)
113 {
114         unregister_trace_sched_switch(probe_sched_switch);
115         unregister_trace_sched_wakeup_new(probe_sched_wakeup);
116         unregister_trace_sched_wakeup(probe_sched_wakeup);
117 }
118
119 static void tracing_start_sched_switch(void)
120 {
121         mutex_lock(&sched_register_mutex);
122         if (!(sched_ref++))
123                 tracing_sched_register();
124         mutex_unlock(&sched_register_mutex);
125 }
126
127 static void tracing_stop_sched_switch(void)
128 {
129         mutex_lock(&sched_register_mutex);
130         if (!(--sched_ref))
131                 tracing_sched_unregister();
132         mutex_unlock(&sched_register_mutex);
133 }
134
135 void tracing_start_cmdline_record(void)
136 {
137         tracing_start_sched_switch();
138 }
139
140 void tracing_stop_cmdline_record(void)
141 {
142         tracing_stop_sched_switch();
143 }
144
145 /**
146  * tracing_start_sched_switch_record - start tracing context switches
147  *
148  * Turns on context switch tracing for a tracer.
149  */
150 void tracing_start_sched_switch_record(void)
151 {
152         if (unlikely(!ctx_trace)) {
153                 WARN_ON(1);
154                 return;
155         }
156
157         tracing_start_sched_switch();
158
159         mutex_lock(&sched_register_mutex);
160         tracer_enabled++;
161         mutex_unlock(&sched_register_mutex);
162 }
163
164 /**
165  * tracing_stop_sched_switch_record - start tracing context switches
166  *
167  * Turns off context switch tracing for a tracer.
168  */
169 void tracing_stop_sched_switch_record(void)
170 {
171         mutex_lock(&sched_register_mutex);
172         tracer_enabled--;
173         WARN_ON(tracer_enabled < 0);
174         mutex_unlock(&sched_register_mutex);
175
176         tracing_stop_sched_switch();
177 }
178
179 /**
180  * tracing_sched_switch_assign_trace - assign a trace array for ctx switch
181  * @tr: trace array pointer to assign
182  *
183  * Some tracers might want to record the context switches in their
184  * trace. This function lets those tracers assign the trace array
185  * to use.
186  */
187 void tracing_sched_switch_assign_trace(struct trace_array *tr)
188 {
189         ctx_trace = tr;
190 }
191
192 static void stop_sched_trace(struct trace_array *tr)
193 {
194         tracing_stop_sched_switch_record();
195 }
196
197 static int sched_switch_trace_init(struct trace_array *tr)
198 {
199         ctx_trace = tr;
200         tracing_reset_online_cpus(tr);
201         tracing_start_sched_switch_record();
202         return 0;
203 }
204
205 static void sched_switch_trace_reset(struct trace_array *tr)
206 {
207         if (sched_ref)
208                 stop_sched_trace(tr);
209 }
210
211 static void sched_switch_trace_start(struct trace_array *tr)
212 {
213         sched_stopped = 0;
214 }
215
216 static void sched_switch_trace_stop(struct trace_array *tr)
217 {
218         sched_stopped = 1;
219 }
220
221 static struct tracer sched_switch_trace __read_mostly =
222 {
223         .name           = "sched_switch",
224         .init           = sched_switch_trace_init,
225         .reset          = sched_switch_trace_reset,
226         .start          = sched_switch_trace_start,
227         .stop           = sched_switch_trace_stop,
228         .wait_pipe      = poll_wait_pipe,
229 #ifdef CONFIG_FTRACE_SELFTEST
230         .selftest    = trace_selftest_startup_sched_switch,
231 #endif
232 };
233
234 __init static int init_sched_switch_trace(void)
235 {
236         return register_tracer(&sched_switch_trace);
237 }
238 device_initcall(init_sched_switch_trace);
239