1 /* Include in trace.c */
3 #include <linux/kthread.h>
4 #include <linux/delay.h>
6 static inline int trace_valid_entry(struct trace_entry *entry)
22 static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
24 struct ring_buffer_event *event;
25 struct trace_entry *entry;
27 while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) {
28 entry = ring_buffer_event_data(event);
30 if (!trace_valid_entry(entry)) {
31 printk(KERN_CONT ".. invalid entry %d ",
41 printk(KERN_CONT ".. corrupted trace buffer .. ");
46 * Test the trace buffer to see if all the elements
49 static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
51 unsigned long flags, cnt = 0;
54 /* Don't allow flipping of max traces now */
55 local_irq_save(flags);
56 __raw_spin_lock(&ftrace_max_lock);
58 cnt = ring_buffer_entries(tr->buffer);
60 for_each_possible_cpu(cpu) {
61 ret = trace_test_buffer_cpu(tr, cpu);
65 __raw_spin_unlock(&ftrace_max_lock);
66 local_irq_restore(flags);
74 static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
76 printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
77 trace->name, init_ret);
79 #ifdef CONFIG_FUNCTION_TRACER
81 #ifdef CONFIG_DYNAMIC_FTRACE
84 #define STR(x) __STR(x)
86 /* Test dynamic code modification and ftrace filters */
87 int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
88 struct trace_array *tr,
91 int save_ftrace_enabled = ftrace_enabled;
92 int save_tracer_enabled = tracer_enabled;
97 /* The ftrace test PASSED */
98 printk(KERN_CONT "PASSED\n");
99 pr_info("Testing dynamic ftrace: ");
101 /* enable tracing, and record the filter function */
105 /* passed in by parameter to fool gcc from optimizing */
109 * Some archs *cough*PowerPC*cough* add charachters to the
110 * start of the function names. We simply put a '*' to
113 func_name = "*" STR(DYN_FTRACE_TEST_NAME);
115 /* filter only on our function */
116 ftrace_set_filter(func_name, strlen(func_name), 1);
119 ret = trace->init(tr);
121 warn_failed_init_tracer(trace, ret);
125 /* Sleep for a 1/10 of a second */
128 /* we should have nothing in the buffer */
129 ret = trace_test_buffer(tr, &count);
135 printk(KERN_CONT ".. filter did not filter .. ");
139 /* call our function again */
145 /* stop the tracing. */
149 /* check the trace buffer */
150 ret = trace_test_buffer(tr, &count);
154 /* we should only have one item */
155 if (!ret && count != 1) {
156 printk(KERN_CONT ".. filter failed count=%ld ..", count);
162 ftrace_enabled = save_ftrace_enabled;
163 tracer_enabled = save_tracer_enabled;
165 /* Enable tracing on all functions again */
166 ftrace_set_filter(NULL, 0, 1);
171 # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
172 #endif /* CONFIG_DYNAMIC_FTRACE */
174 * Simple verification test of ftrace function tracer.
175 * Enable ftrace, sleep 1/10 second, and then read the trace
176 * buffer to see if all is in order.
179 trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
181 int save_ftrace_enabled = ftrace_enabled;
182 int save_tracer_enabled = tracer_enabled;
186 /* make sure msleep has been recorded */
189 /* start the tracing */
193 ret = trace->init(tr);
195 warn_failed_init_tracer(trace, ret);
199 /* Sleep for a 1/10 of a second */
201 /* stop the tracing. */
205 /* check the trace buffer */
206 ret = trace_test_buffer(tr, &count);
210 if (!ret && !count) {
211 printk(KERN_CONT ".. no entries found ..");
216 ret = trace_selftest_startup_dynamic_tracing(trace, tr,
217 DYN_FTRACE_TEST_NAME);
220 ftrace_enabled = save_ftrace_enabled;
221 tracer_enabled = save_tracer_enabled;
223 /* kill ftrace totally if we failed */
229 #endif /* CONFIG_FUNCTION_TRACER */
231 #ifdef CONFIG_IRQSOFF_TRACER
233 trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
235 unsigned long save_max = tracing_max_latency;
239 /* start the tracing */
240 ret = trace->init(tr);
242 warn_failed_init_tracer(trace, ret);
246 /* reset the max latency */
247 tracing_max_latency = 0;
248 /* disable interrupts for a bit */
252 /* stop the tracing. */
254 /* check both trace buffers */
255 ret = trace_test_buffer(tr, NULL);
257 ret = trace_test_buffer(&max_tr, &count);
261 if (!ret && !count) {
262 printk(KERN_CONT ".. no entries found ..");
266 tracing_max_latency = save_max;
270 #endif /* CONFIG_IRQSOFF_TRACER */
272 #ifdef CONFIG_PREEMPT_TRACER
274 trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
276 unsigned long save_max = tracing_max_latency;
281 * Now that the big kernel lock is no longer preemptable,
282 * and this is called with the BKL held, it will always
283 * fail. If preemption is already disabled, simply
284 * pass the test. When the BKL is removed, or becomes
285 * preemptible again, we will once again test this,
288 if (preempt_count()) {
289 printk(KERN_CONT "can not test ... force ");
293 /* start the tracing */
294 ret = trace->init(tr);
296 warn_failed_init_tracer(trace, ret);
300 /* reset the max latency */
301 tracing_max_latency = 0;
302 /* disable preemption for a bit */
306 /* stop the tracing. */
308 /* check both trace buffers */
309 ret = trace_test_buffer(tr, NULL);
311 ret = trace_test_buffer(&max_tr, &count);
315 if (!ret && !count) {
316 printk(KERN_CONT ".. no entries found ..");
320 tracing_max_latency = save_max;
324 #endif /* CONFIG_PREEMPT_TRACER */
326 #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
328 trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
330 unsigned long save_max = tracing_max_latency;
335 * Now that the big kernel lock is no longer preemptable,
336 * and this is called with the BKL held, it will always
337 * fail. If preemption is already disabled, simply
338 * pass the test. When the BKL is removed, or becomes
339 * preemptible again, we will once again test this,
342 if (preempt_count()) {
343 printk(KERN_CONT "can not test ... force ");
347 /* start the tracing */
348 ret = trace->init(tr);
350 warn_failed_init_tracer(trace, ret);
354 /* reset the max latency */
355 tracing_max_latency = 0;
357 /* disable preemption and interrupts for a bit */
362 /* reverse the order of preempt vs irqs */
365 /* stop the tracing. */
367 /* check both trace buffers */
368 ret = trace_test_buffer(tr, NULL);
374 ret = trace_test_buffer(&max_tr, &count);
380 if (!ret && !count) {
381 printk(KERN_CONT ".. no entries found ..");
387 /* do the test by disabling interrupts first this time */
388 tracing_max_latency = 0;
394 /* reverse the order of preempt vs irqs */
397 /* stop the tracing. */
399 /* check both trace buffers */
400 ret = trace_test_buffer(tr, NULL);
404 ret = trace_test_buffer(&max_tr, &count);
406 if (!ret && !count) {
407 printk(KERN_CONT ".. no entries found ..");
415 tracing_max_latency = save_max;
419 #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
421 #ifdef CONFIG_NOP_TRACER
423 trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
425 /* What could possibly go wrong? */
430 #ifdef CONFIG_SCHED_TRACER
431 static int trace_wakeup_test_thread(void *data)
433 /* Make this a RT thread, doesn't need to be too high */
434 struct sched_param param = { .sched_priority = 5 };
435 struct completion *x = data;
437 sched_setscheduler(current, SCHED_FIFO, ¶m);
439 /* Make it know we have a new prio */
442 /* now go to sleep and let the test wake us up */
443 set_current_state(TASK_INTERRUPTIBLE);
446 /* we are awake, now wait to disappear */
447 while (!kthread_should_stop()) {
449 * This is an RT task, do short sleeps to let
459 trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
461 unsigned long save_max = tracing_max_latency;
462 struct task_struct *p;
463 struct completion isrt;
467 init_completion(&isrt);
469 /* create a high prio thread */
470 p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
472 printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
476 /* make sure the thread is running at an RT prio */
477 wait_for_completion(&isrt);
479 /* start the tracing */
480 ret = trace->init(tr);
482 warn_failed_init_tracer(trace, ret);
486 /* reset the max latency */
487 tracing_max_latency = 0;
489 /* sleep to let the RT thread sleep too */
493 * Yes this is slightly racy. It is possible that for some
494 * strange reason that the RT thread we created, did not
495 * call schedule for 100ms after doing the completion,
496 * and we do a wakeup on a task that already is awake.
497 * But that is extremely unlikely, and the worst thing that
498 * happens in such a case, is that we disable tracing.
499 * Honestly, if this race does happen something is horrible
500 * wrong with the system.
505 /* give a little time to let the thread wake up */
508 /* stop the tracing. */
510 /* check both trace buffers */
511 ret = trace_test_buffer(tr, NULL);
513 ret = trace_test_buffer(&max_tr, &count);
519 tracing_max_latency = save_max;
521 /* kill the thread */
524 if (!ret && !count) {
525 printk(KERN_CONT ".. no entries found ..");
531 #endif /* CONFIG_SCHED_TRACER */
533 #ifdef CONFIG_CONTEXT_SWITCH_TRACER
535 trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
540 /* start the tracing */
541 ret = trace->init(tr);
543 warn_failed_init_tracer(trace, ret);
547 /* Sleep for a 1/10 of a second */
549 /* stop the tracing. */
551 /* check the trace buffer */
552 ret = trace_test_buffer(tr, &count);
556 if (!ret && !count) {
557 printk(KERN_CONT ".. no entries found ..");
563 #endif /* CONFIG_CONTEXT_SWITCH_TRACER */
565 #ifdef CONFIG_SYSPROF_TRACER
567 trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr)
572 /* start the tracing */
573 ret = trace->init(tr);
575 warn_failed_init_tracer(trace, ret);
579 /* Sleep for a 1/10 of a second */
581 /* stop the tracing. */
583 /* check the trace buffer */
584 ret = trace_test_buffer(tr, &count);
590 #endif /* CONFIG_SYSPROF_TRACER */
592 #ifdef CONFIG_BRANCH_TRACER
594 trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
599 /* start the tracing */
600 ret = trace->init(tr);
602 warn_failed_init_tracer(trace, ret);
606 /* Sleep for a 1/10 of a second */
608 /* stop the tracing. */
610 /* check the trace buffer */
611 ret = trace_test_buffer(tr, &count);
617 #endif /* CONFIG_BRANCH_TRACER */