2 * trace task wakeup timings
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 * Based on code from the latency_tracer, that is:
9 * Copyright (C) 2004-2006 Ingo Molnar
10 * Copyright (C) 2004 William Lee Irwin III
12 #include <linux/module.h>
14 #include <linux/debugfs.h>
15 #include <linux/kallsyms.h>
16 #include <linux/uaccess.h>
17 #include <linux/ftrace.h>
18 #include <trace/sched.h>
22 static struct trace_array *wakeup_trace;
23 static int __read_mostly tracer_enabled;
25 static struct task_struct *wakeup_task;
26 static int wakeup_cpu;
27 static unsigned wakeup_prio = -1;
29 static raw_spinlock_t wakeup_lock =
30 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
32 static void __wakeup_reset(struct trace_array *tr);
34 #ifdef CONFIG_FUNCTION_TRACER
36 * irqsoff uses its own tracer function to keep the overhead down:
39 wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
41 struct trace_array *tr = wakeup_trace;
42 struct trace_array_cpu *data;
49 if (likely(!wakeup_task))
53 resched = ftrace_preempt_disable();
55 cpu = raw_smp_processor_id();
57 disabled = atomic_inc_return(&data->disabled);
58 if (unlikely(disabled != 1))
61 local_irq_save(flags);
62 __raw_spin_lock(&wakeup_lock);
64 if (unlikely(!wakeup_task))
68 * The task can't disappear because it needs to
69 * wake up first, and we have the wakeup_lock.
71 if (task_cpu(wakeup_task) != cpu)
74 trace_function(tr, data, ip, parent_ip, flags, pc);
77 __raw_spin_unlock(&wakeup_lock);
78 local_irq_restore(flags);
81 atomic_dec(&data->disabled);
83 ftrace_preempt_enable(resched);
86 static struct ftrace_ops trace_ops __read_mostly =
88 .func = wakeup_tracer_call,
90 #endif /* CONFIG_FUNCTION_TRACER */
93 * Should this new latency be reported/recorded?
95 static int report_latency(cycle_t delta)
98 if (delta < tracing_thresh)
101 if (delta <= tracing_max_latency)
108 probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev,
109 struct task_struct *next)
111 unsigned long latency = 0, t0 = 0, t1 = 0;
112 struct trace_array_cpu *data;
113 cycle_t T0, T1, delta;
119 tracing_record_cmdline(prev);
121 if (unlikely(!tracer_enabled))
125 * When we start a new trace, we set wakeup_task to NULL
126 * and then set tracer_enabled = 1. We want to make sure
127 * that another CPU does not see the tracer_enabled = 1
128 * and the wakeup_task with an older task, that might
129 * actually be the same as next.
133 if (next != wakeup_task)
136 pc = preempt_count();
138 /* The task we are waiting for is waking up */
139 data = wakeup_trace->data[wakeup_cpu];
141 /* disable local data, not wakeup_cpu data */
142 cpu = raw_smp_processor_id();
143 disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled);
144 if (likely(disabled != 1))
147 local_irq_save(flags);
148 __raw_spin_lock(&wakeup_lock);
150 /* We could race with grabbing wakeup_lock */
151 if (unlikely(!tracer_enabled || next != wakeup_task))
154 trace_function(wakeup_trace, data, CALLER_ADDR1, CALLER_ADDR2, flags, pc);
157 * usecs conversion is slow so we try to delay the conversion
158 * as long as possible:
160 T0 = data->preempt_timestamp;
161 T1 = ftrace_now(cpu);
164 if (!report_latency(delta))
167 latency = nsecs_to_usecs(delta);
169 tracing_max_latency = delta;
170 t0 = nsecs_to_usecs(T0);
171 t1 = nsecs_to_usecs(T1);
173 update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu);
176 __wakeup_reset(wakeup_trace);
177 __raw_spin_unlock(&wakeup_lock);
178 local_irq_restore(flags);
180 atomic_dec(&wakeup_trace->data[cpu]->disabled);
183 static void __wakeup_reset(struct trace_array *tr)
185 struct trace_array_cpu *data;
188 for_each_possible_cpu(cpu) {
189 data = tr->data[cpu];
190 tracing_reset(tr, cpu);
197 put_task_struct(wakeup_task);
202 static void wakeup_reset(struct trace_array *tr)
206 local_irq_save(flags);
207 __raw_spin_lock(&wakeup_lock);
209 __raw_spin_unlock(&wakeup_lock);
210 local_irq_restore(flags);
214 probe_wakeup(struct rq *rq, struct task_struct *p, int success)
216 int cpu = smp_processor_id();
221 if (likely(!tracer_enabled))
224 tracing_record_cmdline(p);
225 tracing_record_cmdline(current);
227 if (likely(!rt_task(p)) ||
228 p->prio >= wakeup_prio ||
229 p->prio >= current->prio)
232 pc = preempt_count();
233 disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled);
234 if (unlikely(disabled != 1))
237 /* interrupts should be off from try_to_wake_up */
238 __raw_spin_lock(&wakeup_lock);
240 /* check for races. */
241 if (!tracer_enabled || p->prio >= wakeup_prio)
244 /* reset the trace */
245 __wakeup_reset(wakeup_trace);
247 wakeup_cpu = task_cpu(p);
248 wakeup_prio = p->prio;
251 get_task_struct(wakeup_task);
253 local_save_flags(flags);
255 wakeup_trace->data[wakeup_cpu]->preempt_timestamp = ftrace_now(cpu);
256 trace_function(wakeup_trace, wakeup_trace->data[wakeup_cpu],
257 CALLER_ADDR1, CALLER_ADDR2, flags, pc);
260 __raw_spin_unlock(&wakeup_lock);
262 atomic_dec(&wakeup_trace->data[cpu]->disabled);
266 * save_tracer_enabled is used to save the state of the tracer_enabled
267 * variable when we disable it when we open a trace output file.
269 static int save_tracer_enabled;
271 static void start_wakeup_tracer(struct trace_array *tr)
275 ret = register_trace_sched_wakeup(probe_wakeup);
277 pr_info("wakeup trace: Couldn't activate tracepoint"
278 " probe to kernel_sched_wakeup\n");
282 ret = register_trace_sched_wakeup_new(probe_wakeup);
284 pr_info("wakeup trace: Couldn't activate tracepoint"
285 " probe to kernel_sched_wakeup_new\n");
289 ret = register_trace_sched_switch(probe_wakeup_sched_switch);
291 pr_info("sched trace: Couldn't activate tracepoint"
292 " probe to kernel_sched_schedule\n");
293 goto fail_deprobe_wake_new;
299 * Don't let the tracer_enabled = 1 show up before
300 * the wakeup_task is reset. This may be overkill since
301 * wakeup_reset does a spin_unlock after setting the
302 * wakeup_task to NULL, but I want to be safe.
303 * This is a slow path anyway.
307 register_ftrace_function(&trace_ops);
309 if (tracing_is_enabled()) {
311 save_tracer_enabled = 1;
314 save_tracer_enabled = 0;
318 fail_deprobe_wake_new:
319 unregister_trace_sched_wakeup_new(probe_wakeup);
321 unregister_trace_sched_wakeup(probe_wakeup);
324 static void stop_wakeup_tracer(struct trace_array *tr)
327 save_tracer_enabled = 0;
328 unregister_ftrace_function(&trace_ops);
329 unregister_trace_sched_switch(probe_wakeup_sched_switch);
330 unregister_trace_sched_wakeup_new(probe_wakeup);
331 unregister_trace_sched_wakeup(probe_wakeup);
334 static int wakeup_tracer_init(struct trace_array *tr)
336 tracing_max_latency = 0;
338 start_wakeup_tracer(tr);
342 static void wakeup_tracer_reset(struct trace_array *tr)
344 stop_wakeup_tracer(tr);
345 /* make sure we put back any tasks we are tracing */
349 static void wakeup_tracer_start(struct trace_array *tr)
353 save_tracer_enabled = 1;
356 static void wakeup_tracer_stop(struct trace_array *tr)
359 save_tracer_enabled = 0;
362 static void wakeup_tracer_open(struct trace_iterator *iter)
364 /* stop the trace while dumping */
368 static void wakeup_tracer_close(struct trace_iterator *iter)
370 /* forget about any processes we were recording */
371 if (save_tracer_enabled) {
372 wakeup_reset(iter->tr);
377 static struct tracer wakeup_tracer __read_mostly =
380 .init = wakeup_tracer_init,
381 .reset = wakeup_tracer_reset,
382 .start = wakeup_tracer_start,
383 .stop = wakeup_tracer_stop,
384 .open = wakeup_tracer_open,
385 .close = wakeup_tracer_close,
387 #ifdef CONFIG_FTRACE_SELFTEST
388 .selftest = trace_selftest_startup_wakeup,
392 __init static int init_wakeup_tracer(void)
396 ret = register_tracer(&wakeup_tracer);
402 device_initcall(init_wakeup_tracer);