1 /* Include in trace.c */
3 #include <linux/stringify.h>
4 #include <linux/kthread.h>
5 #include <linux/delay.h>
7 static inline int trace_valid_entry(struct trace_entry *entry)
24 static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
26 struct ring_buffer_event *event;
27 struct trace_entry *entry;
28 unsigned int loops = 0;
30 while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) {
31 entry = ring_buffer_event_data(event);
34 * The ring buffer is a size of trace_buf_size, if
35 * we loop more than the size, there's something wrong
36 * with the ring buffer.
38 if (loops++ > trace_buf_size) {
39 printk(KERN_CONT ".. bad ring buffer ");
42 if (!trace_valid_entry(entry)) {
43 printk(KERN_CONT ".. invalid entry %d ",
53 printk(KERN_CONT ".. corrupted trace buffer .. ");
58 * Test the trace buffer to see if all the elements
61 static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
63 unsigned long flags, cnt = 0;
66 /* Don't allow flipping of max traces now */
67 local_irq_save(flags);
68 __raw_spin_lock(&ftrace_max_lock);
70 cnt = ring_buffer_entries(tr->buffer);
73 * The trace_test_buffer_cpu runs a while loop to consume all data.
74 * If the calling tracer is broken, and is constantly filling
75 * the buffer, this will run forever, and hard lock the box.
76 * We disable the ring buffer while we do this test to prevent
80 for_each_possible_cpu(cpu) {
81 ret = trace_test_buffer_cpu(tr, cpu);
86 __raw_spin_unlock(&ftrace_max_lock);
87 local_irq_restore(flags);
95 static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
97 printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
98 trace->name, init_ret);
100 #ifdef CONFIG_FUNCTION_TRACER
102 #ifdef CONFIG_DYNAMIC_FTRACE
104 /* Test dynamic code modification and ftrace filters */
105 int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
106 struct trace_array *tr,
109 int save_ftrace_enabled = ftrace_enabled;
110 int save_tracer_enabled = tracer_enabled;
115 /* The ftrace test PASSED */
116 printk(KERN_CONT "PASSED\n");
117 pr_info("Testing dynamic ftrace: ");
119 /* enable tracing, and record the filter function */
123 /* passed in by parameter to fool gcc from optimizing */
127 * Some archs *cough*PowerPC*cough* add characters to the
128 * start of the function names. We simply put a '*' to
131 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
133 /* filter only on our function */
134 ftrace_set_filter(func_name, strlen(func_name), 1);
137 ret = tracer_init(trace, tr);
139 warn_failed_init_tracer(trace, ret);
143 /* Sleep for a 1/10 of a second */
146 /* we should have nothing in the buffer */
147 ret = trace_test_buffer(tr, &count);
153 printk(KERN_CONT ".. filter did not filter .. ");
157 /* call our function again */
163 /* stop the tracing. */
167 /* check the trace buffer */
168 ret = trace_test_buffer(tr, &count);
172 /* we should only have one item */
173 if (!ret && count != 1) {
174 printk(KERN_CONT ".. filter failed count=%ld ..", count);
180 ftrace_enabled = save_ftrace_enabled;
181 tracer_enabled = save_tracer_enabled;
183 /* Enable tracing on all functions again */
184 ftrace_set_filter(NULL, 0, 1);
189 # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
190 #endif /* CONFIG_DYNAMIC_FTRACE */
192 * Simple verification test of ftrace function tracer.
193 * Enable ftrace, sleep 1/10 second, and then read the trace
194 * buffer to see if all is in order.
197 trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
199 int save_ftrace_enabled = ftrace_enabled;
200 int save_tracer_enabled = tracer_enabled;
204 /* make sure msleep has been recorded */
207 /* start the tracing */
211 ret = tracer_init(trace, tr);
213 warn_failed_init_tracer(trace, ret);
217 /* Sleep for a 1/10 of a second */
219 /* stop the tracing. */
223 /* check the trace buffer */
224 ret = trace_test_buffer(tr, &count);
228 if (!ret && !count) {
229 printk(KERN_CONT ".. no entries found ..");
234 ret = trace_selftest_startup_dynamic_tracing(trace, tr,
235 DYN_FTRACE_TEST_NAME);
238 ftrace_enabled = save_ftrace_enabled;
239 tracer_enabled = save_tracer_enabled;
241 /* kill ftrace totally if we failed */
247 #endif /* CONFIG_FUNCTION_TRACER */
250 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
252 * Pretty much the same than for the function tracer from which the selftest
256 trace_selftest_startup_function_graph(struct tracer *trace,
257 struct trace_array *tr)
262 ret = tracer_init(trace, tr);
264 warn_failed_init_tracer(trace, ret);
268 /* Sleep for a 1/10 of a second */
273 /* check the trace buffer */
274 ret = trace_test_buffer(tr, &count);
279 if (!ret && !count) {
280 printk(KERN_CONT ".. no entries found ..");
285 /* Don't test dynamic tracing, the function tracer already did */
288 /* Stop it if we failed */
294 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
297 #ifdef CONFIG_IRQSOFF_TRACER
299 trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
301 unsigned long save_max = tracing_max_latency;
305 /* start the tracing */
306 ret = tracer_init(trace, tr);
308 warn_failed_init_tracer(trace, ret);
312 /* reset the max latency */
313 tracing_max_latency = 0;
314 /* disable interrupts for a bit */
320 * Stop the tracer to avoid a warning subsequent
321 * to buffer flipping failure because tracing_stop()
322 * disables the tr and max buffers, making flipping impossible
323 * in case of parallels max irqs off latencies.
326 /* stop the tracing. */
328 /* check both trace buffers */
329 ret = trace_test_buffer(tr, NULL);
331 ret = trace_test_buffer(&max_tr, &count);
335 if (!ret && !count) {
336 printk(KERN_CONT ".. no entries found ..");
340 tracing_max_latency = save_max;
344 #endif /* CONFIG_IRQSOFF_TRACER */
346 #ifdef CONFIG_PREEMPT_TRACER
348 trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
350 unsigned long save_max = tracing_max_latency;
355 * Now that the big kernel lock is no longer preemptable,
356 * and this is called with the BKL held, it will always
357 * fail. If preemption is already disabled, simply
358 * pass the test. When the BKL is removed, or becomes
359 * preemptible again, we will once again test this,
362 if (preempt_count()) {
363 printk(KERN_CONT "can not test ... force ");
367 /* start the tracing */
368 ret = tracer_init(trace, tr);
370 warn_failed_init_tracer(trace, ret);
374 /* reset the max latency */
375 tracing_max_latency = 0;
376 /* disable preemption for a bit */
382 * Stop the tracer to avoid a warning subsequent
383 * to buffer flipping failure because tracing_stop()
384 * disables the tr and max buffers, making flipping impossible
385 * in case of parallels max preempt off latencies.
388 /* stop the tracing. */
390 /* check both trace buffers */
391 ret = trace_test_buffer(tr, NULL);
393 ret = trace_test_buffer(&max_tr, &count);
397 if (!ret && !count) {
398 printk(KERN_CONT ".. no entries found ..");
402 tracing_max_latency = save_max;
406 #endif /* CONFIG_PREEMPT_TRACER */
408 #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
410 trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
412 unsigned long save_max = tracing_max_latency;
417 * Now that the big kernel lock is no longer preemptable,
418 * and this is called with the BKL held, it will always
419 * fail. If preemption is already disabled, simply
420 * pass the test. When the BKL is removed, or becomes
421 * preemptible again, we will once again test this,
424 if (preempt_count()) {
425 printk(KERN_CONT "can not test ... force ");
429 /* start the tracing */
430 ret = tracer_init(trace, tr);
432 warn_failed_init_tracer(trace, ret);
436 /* reset the max latency */
437 tracing_max_latency = 0;
439 /* disable preemption and interrupts for a bit */
444 /* reverse the order of preempt vs irqs */
448 * Stop the tracer to avoid a warning subsequent
449 * to buffer flipping failure because tracing_stop()
450 * disables the tr and max buffers, making flipping impossible
451 * in case of parallels max irqs/preempt off latencies.
454 /* stop the tracing. */
456 /* check both trace buffers */
457 ret = trace_test_buffer(tr, NULL);
461 ret = trace_test_buffer(&max_tr, &count);
465 if (!ret && !count) {
466 printk(KERN_CONT ".. no entries found ..");
471 /* do the test by disabling interrupts first this time */
472 tracing_max_latency = 0;
480 /* reverse the order of preempt vs irqs */
484 /* stop the tracing. */
486 /* check both trace buffers */
487 ret = trace_test_buffer(tr, NULL);
491 ret = trace_test_buffer(&max_tr, &count);
493 if (!ret && !count) {
494 printk(KERN_CONT ".. no entries found ..");
503 tracing_max_latency = save_max;
507 #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
509 #ifdef CONFIG_NOP_TRACER
511 trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
513 /* What could possibly go wrong? */
518 #ifdef CONFIG_SCHED_TRACER
519 static int trace_wakeup_test_thread(void *data)
521 /* Make this a RT thread, doesn't need to be too high */
522 struct sched_param param = { .sched_priority = 5 };
523 struct completion *x = data;
525 sched_setscheduler(current, SCHED_FIFO, ¶m);
527 /* Make it know we have a new prio */
530 /* now go to sleep and let the test wake us up */
531 set_current_state(TASK_INTERRUPTIBLE);
534 /* we are awake, now wait to disappear */
535 while (!kthread_should_stop()) {
537 * This is an RT task, do short sleeps to let
547 trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
549 unsigned long save_max = tracing_max_latency;
550 struct task_struct *p;
551 struct completion isrt;
555 init_completion(&isrt);
557 /* create a high prio thread */
558 p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
560 printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
564 /* make sure the thread is running at an RT prio */
565 wait_for_completion(&isrt);
567 /* start the tracing */
568 ret = tracer_init(trace, tr);
570 warn_failed_init_tracer(trace, ret);
574 /* reset the max latency */
575 tracing_max_latency = 0;
577 /* sleep to let the RT thread sleep too */
581 * Yes this is slightly racy. It is possible that for some
582 * strange reason that the RT thread we created, did not
583 * call schedule for 100ms after doing the completion,
584 * and we do a wakeup on a task that already is awake.
585 * But that is extremely unlikely, and the worst thing that
586 * happens in such a case, is that we disable tracing.
587 * Honestly, if this race does happen something is horrible
588 * wrong with the system.
593 /* give a little time to let the thread wake up */
596 /* stop the tracing. */
598 /* check both trace buffers */
599 ret = trace_test_buffer(tr, NULL);
601 ret = trace_test_buffer(&max_tr, &count);
607 tracing_max_latency = save_max;
609 /* kill the thread */
612 if (!ret && !count) {
613 printk(KERN_CONT ".. no entries found ..");
619 #endif /* CONFIG_SCHED_TRACER */
621 #ifdef CONFIG_CONTEXT_SWITCH_TRACER
623 trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
628 /* start the tracing */
629 ret = tracer_init(trace, tr);
631 warn_failed_init_tracer(trace, ret);
635 /* Sleep for a 1/10 of a second */
637 /* stop the tracing. */
639 /* check the trace buffer */
640 ret = trace_test_buffer(tr, &count);
644 if (!ret && !count) {
645 printk(KERN_CONT ".. no entries found ..");
651 #endif /* CONFIG_CONTEXT_SWITCH_TRACER */
653 #ifdef CONFIG_SYSPROF_TRACER
655 trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr)
660 /* start the tracing */
661 ret = tracer_init(trace, tr);
663 warn_failed_init_tracer(trace, ret);
667 /* Sleep for a 1/10 of a second */
669 /* stop the tracing. */
671 /* check the trace buffer */
672 ret = trace_test_buffer(tr, &count);
676 if (!ret && !count) {
677 printk(KERN_CONT ".. no entries found ..");
683 #endif /* CONFIG_SYSPROF_TRACER */
685 #ifdef CONFIG_BRANCH_TRACER
687 trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
692 /* start the tracing */
693 ret = tracer_init(trace, tr);
695 warn_failed_init_tracer(trace, ret);
699 /* Sleep for a 1/10 of a second */
701 /* stop the tracing. */
703 /* check the trace buffer */
704 ret = trace_test_buffer(tr, &count);
708 if (!ret && !count) {
709 printk(KERN_CONT ".. no entries found ..");
715 #endif /* CONFIG_BRANCH_TRACER */