Merge branch 'linus' into cpus4096
[linux-2.6] / kernel / trace / trace_sched_switch.c
1 /*
2  * trace context switch
3  *
4  * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com>
5  *
6  */
7 #include <linux/module.h>
8 #include <linux/fs.h>
9 #include <linux/debugfs.h>
10 #include <linux/kallsyms.h>
11 #include <linux/uaccess.h>
12 #include <linux/ftrace.h>
13 #include <trace/sched.h>
14
15 #include "trace.h"
16
17 static struct trace_array       *ctx_trace;
18 static int __read_mostly        tracer_enabled;
19 static int                      sched_ref;
20 static DEFINE_MUTEX(sched_register_mutex);
21
22 static void
23 probe_sched_switch(struct rq *__rq, struct task_struct *prev,
24                         struct task_struct *next)
25 {
26         struct trace_array_cpu *data;
27         unsigned long flags;
28         int cpu;
29         int pc;
30
31         if (!sched_ref)
32                 return;
33
34         tracing_record_cmdline(prev);
35         tracing_record_cmdline(next);
36
37         if (!tracer_enabled)
38                 return;
39
40         pc = preempt_count();
41         local_irq_save(flags);
42         cpu = raw_smp_processor_id();
43         data = ctx_trace->data[cpu];
44
45         if (likely(!atomic_read(&data->disabled)))
46                 tracing_sched_switch_trace(ctx_trace, data, prev, next, flags, pc);
47
48         local_irq_restore(flags);
49 }
50
51 static void
52 probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee)
53 {
54         struct trace_array_cpu *data;
55         unsigned long flags;
56         int cpu, pc;
57
58         if (!likely(tracer_enabled))
59                 return;
60
61         pc = preempt_count();
62         tracing_record_cmdline(current);
63
64         local_irq_save(flags);
65         cpu = raw_smp_processor_id();
66         data = ctx_trace->data[cpu];
67
68         if (likely(!atomic_read(&data->disabled)))
69                 tracing_sched_wakeup_trace(ctx_trace, data, wakee, current,
70                                            flags, pc);
71
72         local_irq_restore(flags);
73 }
74
75 static void sched_switch_reset(struct trace_array *tr)
76 {
77         int cpu;
78
79         tr->time_start = ftrace_now(tr->cpu);
80
81         for_each_online_cpu(cpu)
82                 tracing_reset(tr, cpu);
83 }
84
85 static int tracing_sched_register(void)
86 {
87         int ret;
88
89         ret = register_trace_sched_wakeup(probe_sched_wakeup);
90         if (ret) {
91                 pr_info("wakeup trace: Couldn't activate tracepoint"
92                         " probe to kernel_sched_wakeup\n");
93                 return ret;
94         }
95
96         ret = register_trace_sched_wakeup_new(probe_sched_wakeup);
97         if (ret) {
98                 pr_info("wakeup trace: Couldn't activate tracepoint"
99                         " probe to kernel_sched_wakeup_new\n");
100                 goto fail_deprobe;
101         }
102
103         ret = register_trace_sched_switch(probe_sched_switch);
104         if (ret) {
105                 pr_info("sched trace: Couldn't activate tracepoint"
106                         " probe to kernel_sched_schedule\n");
107                 goto fail_deprobe_wake_new;
108         }
109
110         return ret;
111 fail_deprobe_wake_new:
112         unregister_trace_sched_wakeup_new(probe_sched_wakeup);
113 fail_deprobe:
114         unregister_trace_sched_wakeup(probe_sched_wakeup);
115         return ret;
116 }
117
118 static void tracing_sched_unregister(void)
119 {
120         unregister_trace_sched_switch(probe_sched_switch);
121         unregister_trace_sched_wakeup_new(probe_sched_wakeup);
122         unregister_trace_sched_wakeup(probe_sched_wakeup);
123 }
124
125 static void tracing_start_sched_switch(void)
126 {
127         mutex_lock(&sched_register_mutex);
128         if (!(sched_ref++))
129                 tracing_sched_register();
130         mutex_unlock(&sched_register_mutex);
131 }
132
133 static void tracing_stop_sched_switch(void)
134 {
135         mutex_lock(&sched_register_mutex);
136         if (!(--sched_ref))
137                 tracing_sched_unregister();
138         mutex_unlock(&sched_register_mutex);
139 }
140
141 void tracing_start_cmdline_record(void)
142 {
143         tracing_start_sched_switch();
144 }
145
146 void tracing_stop_cmdline_record(void)
147 {
148         tracing_stop_sched_switch();
149 }
150
151 /**
152  * tracing_start_sched_switch_record - start tracing context switches
153  *
154  * Turns on context switch tracing for a tracer.
155  */
156 void tracing_start_sched_switch_record(void)
157 {
158         if (unlikely(!ctx_trace)) {
159                 WARN_ON(1);
160                 return;
161         }
162
163         tracing_start_sched_switch();
164
165         mutex_lock(&sched_register_mutex);
166         tracer_enabled++;
167         mutex_unlock(&sched_register_mutex);
168 }
169
170 /**
171  * tracing_stop_sched_switch_record - start tracing context switches
172  *
173  * Turns off context switch tracing for a tracer.
174  */
175 void tracing_stop_sched_switch_record(void)
176 {
177         mutex_lock(&sched_register_mutex);
178         tracer_enabled--;
179         WARN_ON(tracer_enabled < 0);
180         mutex_unlock(&sched_register_mutex);
181
182         tracing_stop_sched_switch();
183 }
184
185 /**
186  * tracing_sched_switch_assign_trace - assign a trace array for ctx switch
187  * @tr: trace array pointer to assign
188  *
189  * Some tracers might want to record the context switches in their
190  * trace. This function lets those tracers assign the trace array
191  * to use.
192  */
193 void tracing_sched_switch_assign_trace(struct trace_array *tr)
194 {
195         ctx_trace = tr;
196 }
197
198 static void start_sched_trace(struct trace_array *tr)
199 {
200         sched_switch_reset(tr);
201         tracing_start_sched_switch_record();
202 }
203
204 static void stop_sched_trace(struct trace_array *tr)
205 {
206         tracing_stop_sched_switch_record();
207 }
208
209 static int sched_switch_trace_init(struct trace_array *tr)
210 {
211         ctx_trace = tr;
212         start_sched_trace(tr);
213         return 0;
214 }
215
216 static void sched_switch_trace_reset(struct trace_array *tr)
217 {
218         if (sched_ref)
219                 stop_sched_trace(tr);
220 }
221
222 static void sched_switch_trace_start(struct trace_array *tr)
223 {
224         sched_switch_reset(tr);
225         tracing_start_sched_switch();
226 }
227
228 static void sched_switch_trace_stop(struct trace_array *tr)
229 {
230         tracing_stop_sched_switch();
231 }
232
233 static struct tracer sched_switch_trace __read_mostly =
234 {
235         .name           = "sched_switch",
236         .init           = sched_switch_trace_init,
237         .reset          = sched_switch_trace_reset,
238         .start          = sched_switch_trace_start,
239         .stop           = sched_switch_trace_stop,
240 #ifdef CONFIG_FTRACE_SELFTEST
241         .selftest    = trace_selftest_startup_sched_switch,
242 #endif
243 };
244
245 __init static int init_sched_switch_trace(void)
246 {
247         return register_tracer(&sched_switch_trace);
248 }
249 device_initcall(init_sched_switch_trace);