2 * trace task wakeup timings
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 * Based on code from the latency_tracer, that is:
9 * Copyright (C) 2004-2006 Ingo Molnar
10 * Copyright (C) 2004 William Lee Irwin III
12 #include <linux/module.h>
14 #include <linux/debugfs.h>
15 #include <linux/kallsyms.h>
16 #include <linux/uaccess.h>
17 #include <linux/ftrace.h>
18 #include <linux/marker.h>
22 static struct trace_array *wakeup_trace;
23 static int __read_mostly tracer_enabled;
25 static struct task_struct *wakeup_task;
26 static int wakeup_cpu;
27 static unsigned wakeup_prio = -1;
29 static DEFINE_SPINLOCK(wakeup_lock);
31 static void __wakeup_reset(struct trace_array *tr);
35 * irqsoff uses its own tracer function to keep the overhead down:
38 wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
40 struct trace_array *tr = wakeup_trace;
41 struct trace_array_cpu *data;
47 if (likely(!wakeup_task))
50 resched = need_resched();
51 preempt_disable_notrace();
53 cpu = raw_smp_processor_id();
55 disabled = atomic_inc_return(&data->disabled);
56 if (unlikely(disabled != 1))
59 spin_lock_irqsave(&wakeup_lock, flags);
61 if (unlikely(!wakeup_task))
65 * The task can't disappear because it needs to
66 * wake up first, and we have the wakeup_lock.
68 if (task_cpu(wakeup_task) != cpu)
71 trace_function(tr, data, ip, parent_ip, flags);
74 spin_unlock_irqrestore(&wakeup_lock, flags);
77 atomic_dec(&data->disabled);
80 * To prevent recursion from the scheduler, if the
81 * resched flag was set before we entered, then
85 preempt_enable_no_resched_notrace();
87 preempt_enable_notrace();
90 static struct ftrace_ops trace_ops __read_mostly =
92 .func = wakeup_tracer_call,
94 #endif /* CONFIG_FTRACE */
97 * Should this new latency be reported/recorded?
99 static int report_latency(cycle_t delta)
101 if (tracing_thresh) {
102 if (delta < tracing_thresh)
105 if (delta <= tracing_max_latency)
112 wakeup_sched_switch(void *private, void *rq, struct task_struct *prev,
113 struct task_struct *next)
115 unsigned long latency = 0, t0 = 0, t1 = 0;
116 struct trace_array **ptr = private;
117 struct trace_array *tr = *ptr;
118 struct trace_array_cpu *data;
119 cycle_t T0, T1, delta;
124 if (unlikely(!tracer_enabled))
128 * When we start a new trace, we set wakeup_task to NULL
129 * and then set tracer_enabled = 1. We want to make sure
130 * that another CPU does not see the tracer_enabled = 1
131 * and the wakeup_task with an older task, that might
132 * actually be the same as next.
136 if (next != wakeup_task)
139 /* The task we are waiting for is waking up */
140 data = tr->data[wakeup_cpu];
142 /* disable local data, not wakeup_cpu data */
143 cpu = raw_smp_processor_id();
144 disabled = atomic_inc_return(&tr->data[cpu]->disabled);
145 if (likely(disabled != 1))
148 spin_lock_irqsave(&wakeup_lock, flags);
150 /* We could race with grabbing wakeup_lock */
151 if (unlikely(!tracer_enabled || next != wakeup_task))
154 trace_function(tr, data, CALLER_ADDR1, CALLER_ADDR2, flags);
157 * usecs conversion is slow so we try to delay the conversion
158 * as long as possible:
160 T0 = data->preempt_timestamp;
161 T1 = ftrace_now(cpu);
164 if (!report_latency(delta))
167 latency = nsecs_to_usecs(delta);
169 tracing_max_latency = delta;
170 t0 = nsecs_to_usecs(T0);
171 t1 = nsecs_to_usecs(T1);
173 update_max_tr(tr, wakeup_task, wakeup_cpu);
177 spin_unlock_irqrestore(&wakeup_lock, flags);
179 atomic_dec(&tr->data[cpu]->disabled);
183 sched_switch_callback(void *probe_data, void *call_data,
184 const char *format, va_list *args)
186 struct task_struct *prev;
187 struct task_struct *next;
190 /* skip prev_pid %d next_pid %d prev_state %ld */
191 (void)va_arg(*args, int);
192 (void)va_arg(*args, int);
193 (void)va_arg(*args, long);
194 __rq = va_arg(*args, typeof(__rq));
195 prev = va_arg(*args, typeof(prev));
196 next = va_arg(*args, typeof(next));
198 tracing_record_cmdline(prev);
201 * If tracer_switch_func only points to the local
202 * switch func, it still needs the ptr passed to it.
204 wakeup_sched_switch(probe_data, __rq, prev, next);
207 static void __wakeup_reset(struct trace_array *tr)
209 struct trace_array_cpu *data;
212 assert_spin_locked(&wakeup_lock);
214 for_each_possible_cpu(cpu) {
215 data = tr->data[cpu];
223 put_task_struct(wakeup_task);
228 static void wakeup_reset(struct trace_array *tr)
232 spin_lock_irqsave(&wakeup_lock, flags);
234 spin_unlock_irqrestore(&wakeup_lock, flags);
238 wakeup_check_start(struct trace_array *tr, struct task_struct *p,
239 struct task_struct *curr)
241 int cpu = smp_processor_id();
245 if (likely(!rt_task(p)) ||
246 p->prio >= wakeup_prio ||
247 p->prio >= curr->prio)
250 disabled = atomic_inc_return(&tr->data[cpu]->disabled);
251 if (unlikely(disabled != 1))
254 /* interrupts should be off from try_to_wake_up */
255 spin_lock(&wakeup_lock);
257 /* check for races. */
258 if (!tracer_enabled || p->prio >= wakeup_prio)
261 /* reset the trace */
264 wakeup_cpu = task_cpu(p);
265 wakeup_prio = p->prio;
268 get_task_struct(wakeup_task);
270 local_save_flags(flags);
272 tr->data[wakeup_cpu]->preempt_timestamp = ftrace_now(cpu);
273 trace_function(tr, tr->data[wakeup_cpu],
274 CALLER_ADDR1, CALLER_ADDR2, flags);
277 spin_unlock(&wakeup_lock);
279 atomic_dec(&tr->data[cpu]->disabled);
283 wake_up_callback(void *probe_data, void *call_data,
284 const char *format, va_list *args)
286 struct trace_array **ptr = probe_data;
287 struct trace_array *tr = *ptr;
288 struct task_struct *curr;
289 struct task_struct *task;
292 if (likely(!tracer_enabled))
295 /* Skip pid %d state %ld */
296 (void)va_arg(*args, int);
297 (void)va_arg(*args, long);
298 /* now get the meat: "rq %p task %p rq->curr %p" */
299 __rq = va_arg(*args, typeof(__rq));
300 task = va_arg(*args, typeof(task));
301 curr = va_arg(*args, typeof(curr));
303 tracing_record_cmdline(task);
304 tracing_record_cmdline(curr);
306 wakeup_check_start(tr, task, curr);
309 static void start_wakeup_tracer(struct trace_array *tr)
313 ret = marker_probe_register("kernel_sched_wakeup",
314 "pid %d state %ld ## rq %p task %p rq->curr %p",
318 pr_info("wakeup trace: Couldn't add marker"
319 " probe to kernel_sched_wakeup\n");
323 ret = marker_probe_register("kernel_sched_wakeup_new",
324 "pid %d state %ld ## rq %p task %p rq->curr %p",
328 pr_info("wakeup trace: Couldn't add marker"
329 " probe to kernel_sched_wakeup_new\n");
333 ret = marker_probe_register("kernel_sched_schedule",
334 "prev_pid %d next_pid %d prev_state %ld "
335 "## rq %p prev %p next %p",
336 sched_switch_callback,
339 pr_info("sched trace: Couldn't add marker"
340 " probe to kernel_sched_schedule\n");
341 goto fail_deprobe_wake_new;
347 * Don't let the tracer_enabled = 1 show up before
348 * the wakeup_task is reset. This may be overkill since
349 * wakeup_reset does a spin_unlock after setting the
350 * wakeup_task to NULL, but I want to be safe.
351 * This is a slow path anyway.
356 register_ftrace_function(&trace_ops);
359 fail_deprobe_wake_new:
360 marker_probe_unregister("kernel_sched_wakeup_new",
364 marker_probe_unregister("kernel_sched_wakeup",
369 static void stop_wakeup_tracer(struct trace_array *tr)
372 unregister_ftrace_function(&trace_ops);
373 marker_probe_unregister("kernel_sched_schedule",
374 sched_switch_callback,
376 marker_probe_unregister("kernel_sched_wakeup_new",
379 marker_probe_unregister("kernel_sched_wakeup",
384 static void wakeup_tracer_init(struct trace_array *tr)
389 start_wakeup_tracer(tr);
392 static void wakeup_tracer_reset(struct trace_array *tr)
395 stop_wakeup_tracer(tr);
396 /* make sure we put back any tasks we are tracing */
401 static void wakeup_tracer_ctrl_update(struct trace_array *tr)
404 start_wakeup_tracer(tr);
406 stop_wakeup_tracer(tr);
409 static void wakeup_tracer_open(struct trace_iterator *iter)
411 /* stop the trace while dumping */
413 stop_wakeup_tracer(iter->tr);
416 static void wakeup_tracer_close(struct trace_iterator *iter)
418 /* forget about any processes we were recording */
420 start_wakeup_tracer(iter->tr);
423 static struct tracer wakeup_tracer __read_mostly =
426 .init = wakeup_tracer_init,
427 .reset = wakeup_tracer_reset,
428 .open = wakeup_tracer_open,
429 .close = wakeup_tracer_close,
430 .ctrl_update = wakeup_tracer_ctrl_update,
432 #ifdef CONFIG_FTRACE_SELFTEST
433 .selftest = trace_selftest_startup_wakeup,
437 __init static int init_wakeup_tracer(void)
441 ret = register_tracer(&wakeup_tracer);
447 device_initcall(init_wakeup_tracer);