4 * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com>
7 #include <linux/module.h>
9 #include <linux/debugfs.h>
10 #include <linux/kallsyms.h>
11 #include <linux/uaccess.h>
12 #include <linux/marker.h>
13 #include <linux/ftrace.h>
17 static struct trace_array *ctx_trace;
18 static int __read_mostly tracer_enabled;
21 ctx_switch_func(void *__rq, struct task_struct *prev, struct task_struct *next)
23 struct trace_array *tr = ctx_trace;
24 struct trace_array_cpu *data;
32 tracing_record_cmdline(prev);
34 local_irq_save(flags);
35 cpu = raw_smp_processor_id();
37 disabled = atomic_inc_return(&data->disabled);
39 if (likely(disabled == 1))
40 tracing_sched_switch_trace(tr, data, prev, next, flags);
42 atomic_dec(&data->disabled);
43 local_irq_restore(flags);
47 wakeup_func(void *__rq, struct task_struct *wakee, struct task_struct *curr)
49 struct trace_array *tr = ctx_trace;
50 struct trace_array_cpu *data;
58 tracing_record_cmdline(curr);
60 local_irq_save(flags);
61 cpu = raw_smp_processor_id();
63 disabled = atomic_inc_return(&data->disabled);
65 if (likely(disabled == 1))
66 tracing_sched_wakeup_trace(tr, data, wakee, curr, flags);
68 atomic_dec(&data->disabled);
69 local_irq_restore(flags);
73 ftrace_ctx_switch(void *__rq, struct task_struct *prev,
74 struct task_struct *next)
77 * If tracer_switch_func only points to the local
78 * switch func, it still needs the ptr passed to it.
80 ctx_switch_func(__rq, prev, next);
83 * Chain to the wakeup tracer (this is a NOP if disabled):
85 wakeup_sched_switch(prev, next);
89 ftrace_wake_up_task(void *__rq, struct task_struct *wakee,
90 struct task_struct *curr)
92 wakeup_func(__rq, wakee, curr);
95 * Chain to the wakeup tracer (this is a NOP if disabled):
97 wakeup_sched_wakeup(wakee, curr);
101 ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
103 struct trace_array *tr = ctx_trace;
104 struct trace_array_cpu *data;
112 local_irq_save(flags);
113 cpu = raw_smp_processor_id();
114 data = tr->data[cpu];
115 disabled = atomic_inc_return(&data->disabled);
117 if (likely(disabled == 1))
118 __trace_special(tr, data, arg1, arg2, arg3);
120 atomic_dec(&data->disabled);
121 local_irq_restore(flags);
124 static void sched_switch_reset(struct trace_array *tr)
128 tr->time_start = ftrace_now(tr->cpu);
130 for_each_online_cpu(cpu)
131 tracing_reset(tr->data[cpu]);
134 static void start_sched_trace(struct trace_array *tr)
136 sched_switch_reset(tr);
140 static void stop_sched_trace(struct trace_array *tr)
145 static void sched_switch_trace_init(struct trace_array *tr)
150 start_sched_trace(tr);
153 static void sched_switch_trace_reset(struct trace_array *tr)
156 stop_sched_trace(tr);
159 static void sched_switch_trace_ctrl_update(struct trace_array *tr)
161 /* When starting a new trace, reset the buffers */
163 start_sched_trace(tr);
165 stop_sched_trace(tr);
168 static struct tracer sched_switch_trace __read_mostly =
170 .name = "sched_switch",
171 .init = sched_switch_trace_init,
172 .reset = sched_switch_trace_reset,
173 .ctrl_update = sched_switch_trace_ctrl_update,
174 #ifdef CONFIG_FTRACE_SELFTEST
175 .selftest = trace_selftest_startup_sched_switch,
179 __init static int init_sched_switch_trace(void)
181 return register_tracer(&sched_switch_trace);
183 device_initcall(init_sched_switch_trace);