tracing/ftrace: remove unused code in sched_switch tracer
[linux-2.6] / kernel / trace / trace_sched_switch.c
1 /*
2  * trace context switch
3  *
4  * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com>
5  *
6  */
7 #include <linux/module.h>
8 #include <linux/fs.h>
9 #include <linux/debugfs.h>
10 #include <linux/kallsyms.h>
11 #include <linux/uaccess.h>
12 #include <linux/ftrace.h>
13 #include <trace/sched.h>
14
15 #include "trace.h"
16
17 static struct trace_array       *ctx_trace;
18 static int __read_mostly        tracer_enabled;
19 static atomic_t                 sched_ref;
20 static DEFINE_MUTEX(tracepoint_mutex);
21
22 static void
23 probe_sched_switch(struct rq *__rq, struct task_struct *prev,
24                         struct task_struct *next)
25 {
26         struct trace_array_cpu *data;
27         unsigned long flags;
28         int cpu;
29         int pc;
30
31         if (!atomic_read(&sched_ref))
32                 return;
33
34         tracing_record_cmdline(prev);
35         tracing_record_cmdline(next);
36
37         if (!tracer_enabled)
38                 return;
39
40         pc = preempt_count();
41         local_irq_save(flags);
42         cpu = raw_smp_processor_id();
43         data = ctx_trace->data[cpu];
44
45         if (likely(!atomic_read(&data->disabled)))
46                 tracing_sched_switch_trace(ctx_trace, data, prev, next, flags, pc);
47
48         local_irq_restore(flags);
49 }
50
51 static void
52 probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee)
53 {
54         struct trace_array_cpu *data;
55         unsigned long flags;
56         int cpu, pc;
57
58         if (!likely(tracer_enabled))
59                 return;
60
61         pc = preempt_count();
62         tracing_record_cmdline(current);
63
64         local_irq_save(flags);
65         cpu = raw_smp_processor_id();
66         data = ctx_trace->data[cpu];
67
68         if (likely(!atomic_read(&data->disabled)))
69                 tracing_sched_wakeup_trace(ctx_trace, data, wakee, current,
70                                            flags, pc);
71
72         local_irq_restore(flags);
73 }
74
75 static void sched_switch_reset(struct trace_array *tr)
76 {
77         int cpu;
78
79         tr->time_start = ftrace_now(tr->cpu);
80
81         for_each_online_cpu(cpu)
82                 tracing_reset(tr, cpu);
83 }
84
85 static int tracing_sched_register(void)
86 {
87         int ret;
88
89         ret = register_trace_sched_wakeup(probe_sched_wakeup);
90         if (ret) {
91                 pr_info("wakeup trace: Couldn't activate tracepoint"
92                         " probe to kernel_sched_wakeup\n");
93                 return ret;
94         }
95
96         ret = register_trace_sched_wakeup_new(probe_sched_wakeup);
97         if (ret) {
98                 pr_info("wakeup trace: Couldn't activate tracepoint"
99                         " probe to kernel_sched_wakeup_new\n");
100                 goto fail_deprobe;
101         }
102
103         ret = register_trace_sched_switch(probe_sched_switch);
104         if (ret) {
105                 pr_info("sched trace: Couldn't activate tracepoint"
106                         " probe to kernel_sched_schedule\n");
107                 goto fail_deprobe_wake_new;
108         }
109
110         return ret;
111 fail_deprobe_wake_new:
112         unregister_trace_sched_wakeup_new(probe_sched_wakeup);
113 fail_deprobe:
114         unregister_trace_sched_wakeup(probe_sched_wakeup);
115         return ret;
116 }
117
118 static void tracing_sched_unregister(void)
119 {
120         unregister_trace_sched_switch(probe_sched_switch);
121         unregister_trace_sched_wakeup_new(probe_sched_wakeup);
122         unregister_trace_sched_wakeup(probe_sched_wakeup);
123 }
124
125 static void tracing_start_sched_switch(void)
126 {
127         long ref;
128
129         mutex_lock(&tracepoint_mutex);
130         ref = atomic_inc_return(&sched_ref);
131         if (ref == 1)
132                 tracing_sched_register();
133         mutex_unlock(&tracepoint_mutex);
134 }
135
136 static void tracing_stop_sched_switch(void)
137 {
138         long ref;
139
140         mutex_lock(&tracepoint_mutex);
141         ref = atomic_dec_and_test(&sched_ref);
142         if (ref)
143                 tracing_sched_unregister();
144         mutex_unlock(&tracepoint_mutex);
145 }
146
147 void tracing_start_cmdline_record(void)
148 {
149         tracing_start_sched_switch();
150 }
151
152 void tracing_stop_cmdline_record(void)
153 {
154         tracing_stop_sched_switch();
155 }
156
157 static void start_sched_trace(struct trace_array *tr)
158 {
159         sched_switch_reset(tr);
160         tracing_start_cmdline_record();
161         tracer_enabled = 1;
162 }
163
164 static void stop_sched_trace(struct trace_array *tr)
165 {
166         tracer_enabled = 0;
167         tracing_stop_cmdline_record();
168 }
169
170 static void sched_switch_trace_init(struct trace_array *tr)
171 {
172         ctx_trace = tr;
173
174         if (tr->ctrl)
175                 start_sched_trace(tr);
176 }
177
178 static void sched_switch_trace_reset(struct trace_array *tr)
179 {
180         if (tr->ctrl)
181                 stop_sched_trace(tr);
182 }
183
184 static void sched_switch_trace_ctrl_update(struct trace_array *tr)
185 {
186         /* When starting a new trace, reset the buffers */
187         if (tr->ctrl)
188                 start_sched_trace(tr);
189         else
190                 stop_sched_trace(tr);
191 }
192
193 static struct tracer sched_switch_trace __read_mostly =
194 {
195         .name           = "sched_switch",
196         .init           = sched_switch_trace_init,
197         .reset          = sched_switch_trace_reset,
198         .ctrl_update    = sched_switch_trace_ctrl_update,
199 #ifdef CONFIG_FTRACE_SELFTEST
200         .selftest    = trace_selftest_startup_sched_switch,
201 #endif
202 };
203
204 __init static int init_sched_switch_trace(void)
205 {
206         return register_tracer(&sched_switch_trace);
207 }
208 device_initcall(init_sched_switch_trace);