1 /* Include in trace.c */
3 #include <linux/kthread.h>
4 #include <linux/delay.h>
6 static inline int trace_valid_entry(struct trace_entry *entry)
22 static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
24 struct ring_buffer_event *event;
25 struct trace_entry *entry;
27 while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) {
28 entry = ring_buffer_event_data(event);
30 if (!trace_valid_entry(entry)) {
31 printk(KERN_CONT ".. invalid entry %d ",
41 printk(KERN_CONT ".. corrupted trace buffer .. ");
46 * Test the trace buffer to see if all the elements
49 static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
51 unsigned long flags, cnt = 0;
54 /* Don't allow flipping of max traces now */
55 local_irq_save(flags);
56 __raw_spin_lock(&ftrace_max_lock);
58 cnt = ring_buffer_entries(tr->buffer);
61 * The trace_test_buffer_cpu runs a while loop to consume all data.
62 * If the calling tracer is broken, and is constantly filling
63 * the buffer, this will run forever, and hard lock the box.
64 * We disable the ring buffer while we do this test to prevent
68 for_each_possible_cpu(cpu) {
69 ret = trace_test_buffer_cpu(tr, cpu);
74 __raw_spin_unlock(&ftrace_max_lock);
75 local_irq_restore(flags);
83 static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
85 printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
86 trace->name, init_ret);
88 #ifdef CONFIG_FUNCTION_TRACER
90 #ifdef CONFIG_DYNAMIC_FTRACE
93 #define STR(x) __STR(x)
95 /* Test dynamic code modification and ftrace filters */
96 int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
97 struct trace_array *tr,
100 int save_ftrace_enabled = ftrace_enabled;
101 int save_tracer_enabled = tracer_enabled;
106 /* The ftrace test PASSED */
107 printk(KERN_CONT "PASSED\n");
108 pr_info("Testing dynamic ftrace: ");
110 /* enable tracing, and record the filter function */
114 /* passed in by parameter to fool gcc from optimizing */
118 * Some archs *cough*PowerPC*cough* add charachters to the
119 * start of the function names. We simply put a '*' to
122 func_name = "*" STR(DYN_FTRACE_TEST_NAME);
124 /* filter only on our function */
125 ftrace_set_filter(func_name, strlen(func_name), 1);
128 ret = trace->init(tr);
130 warn_failed_init_tracer(trace, ret);
134 /* Sleep for a 1/10 of a second */
137 /* we should have nothing in the buffer */
138 ret = trace_test_buffer(tr, &count);
144 printk(KERN_CONT ".. filter did not filter .. ");
148 /* call our function again */
154 /* stop the tracing. */
158 /* check the trace buffer */
159 ret = trace_test_buffer(tr, &count);
163 /* we should only have one item */
164 if (!ret && count != 1) {
165 printk(KERN_CONT ".. filter failed count=%ld ..", count);
171 ftrace_enabled = save_ftrace_enabled;
172 tracer_enabled = save_tracer_enabled;
174 /* Enable tracing on all functions again */
175 ftrace_set_filter(NULL, 0, 1);
180 # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
181 #endif /* CONFIG_DYNAMIC_FTRACE */
183 * Simple verification test of ftrace function tracer.
184 * Enable ftrace, sleep 1/10 second, and then read the trace
185 * buffer to see if all is in order.
188 trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
190 int save_ftrace_enabled = ftrace_enabled;
191 int save_tracer_enabled = tracer_enabled;
195 /* make sure msleep has been recorded */
198 /* start the tracing */
202 ret = trace->init(tr);
204 warn_failed_init_tracer(trace, ret);
208 /* Sleep for a 1/10 of a second */
210 /* stop the tracing. */
214 /* check the trace buffer */
215 ret = trace_test_buffer(tr, &count);
219 if (!ret && !count) {
220 printk(KERN_CONT ".. no entries found ..");
225 ret = trace_selftest_startup_dynamic_tracing(trace, tr,
226 DYN_FTRACE_TEST_NAME);
229 ftrace_enabled = save_ftrace_enabled;
230 tracer_enabled = save_tracer_enabled;
232 /* kill ftrace totally if we failed */
238 #endif /* CONFIG_FUNCTION_TRACER */
240 #ifdef CONFIG_IRQSOFF_TRACER
242 trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
244 unsigned long save_max = tracing_max_latency;
248 /* start the tracing */
249 ret = trace->init(tr);
251 warn_failed_init_tracer(trace, ret);
255 /* reset the max latency */
256 tracing_max_latency = 0;
257 /* disable interrupts for a bit */
261 /* stop the tracing. */
263 /* check both trace buffers */
264 ret = trace_test_buffer(tr, NULL);
266 ret = trace_test_buffer(&max_tr, &count);
270 if (!ret && !count) {
271 printk(KERN_CONT ".. no entries found ..");
275 tracing_max_latency = save_max;
279 #endif /* CONFIG_IRQSOFF_TRACER */
281 #ifdef CONFIG_PREEMPT_TRACER
283 trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
285 unsigned long save_max = tracing_max_latency;
290 * Now that the big kernel lock is no longer preemptable,
291 * and this is called with the BKL held, it will always
292 * fail. If preemption is already disabled, simply
293 * pass the test. When the BKL is removed, or becomes
294 * preemptible again, we will once again test this,
297 if (preempt_count()) {
298 printk(KERN_CONT "can not test ... force ");
302 /* start the tracing */
303 ret = trace->init(tr);
305 warn_failed_init_tracer(trace, ret);
309 /* reset the max latency */
310 tracing_max_latency = 0;
311 /* disable preemption for a bit */
315 /* stop the tracing. */
317 /* check both trace buffers */
318 ret = trace_test_buffer(tr, NULL);
320 ret = trace_test_buffer(&max_tr, &count);
324 if (!ret && !count) {
325 printk(KERN_CONT ".. no entries found ..");
329 tracing_max_latency = save_max;
333 #endif /* CONFIG_PREEMPT_TRACER */
335 #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
337 trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
339 unsigned long save_max = tracing_max_latency;
344 * Now that the big kernel lock is no longer preemptable,
345 * and this is called with the BKL held, it will always
346 * fail. If preemption is already disabled, simply
347 * pass the test. When the BKL is removed, or becomes
348 * preemptible again, we will once again test this,
351 if (preempt_count()) {
352 printk(KERN_CONT "can not test ... force ");
356 /* start the tracing */
357 ret = trace->init(tr);
359 warn_failed_init_tracer(trace, ret);
363 /* reset the max latency */
364 tracing_max_latency = 0;
366 /* disable preemption and interrupts for a bit */
371 /* reverse the order of preempt vs irqs */
374 /* stop the tracing. */
376 /* check both trace buffers */
377 ret = trace_test_buffer(tr, NULL);
383 ret = trace_test_buffer(&max_tr, &count);
389 if (!ret && !count) {
390 printk(KERN_CONT ".. no entries found ..");
396 /* do the test by disabling interrupts first this time */
397 tracing_max_latency = 0;
403 /* reverse the order of preempt vs irqs */
406 /* stop the tracing. */
408 /* check both trace buffers */
409 ret = trace_test_buffer(tr, NULL);
413 ret = trace_test_buffer(&max_tr, &count);
415 if (!ret && !count) {
416 printk(KERN_CONT ".. no entries found ..");
424 tracing_max_latency = save_max;
428 #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
430 #ifdef CONFIG_NOP_TRACER
432 trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
434 /* What could possibly go wrong? */
439 #ifdef CONFIG_SCHED_TRACER
440 static int trace_wakeup_test_thread(void *data)
442 /* Make this a RT thread, doesn't need to be too high */
443 struct sched_param param = { .sched_priority = 5 };
444 struct completion *x = data;
446 sched_setscheduler(current, SCHED_FIFO, ¶m);
448 /* Make it know we have a new prio */
451 /* now go to sleep and let the test wake us up */
452 set_current_state(TASK_INTERRUPTIBLE);
455 /* we are awake, now wait to disappear */
456 while (!kthread_should_stop()) {
458 * This is an RT task, do short sleeps to let
468 trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
470 unsigned long save_max = tracing_max_latency;
471 struct task_struct *p;
472 struct completion isrt;
476 init_completion(&isrt);
478 /* create a high prio thread */
479 p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
481 printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
485 /* make sure the thread is running at an RT prio */
486 wait_for_completion(&isrt);
488 /* start the tracing */
489 ret = trace->init(tr);
491 warn_failed_init_tracer(trace, ret);
495 /* reset the max latency */
496 tracing_max_latency = 0;
498 /* sleep to let the RT thread sleep too */
502 * Yes this is slightly racy. It is possible that for some
503 * strange reason that the RT thread we created, did not
504 * call schedule for 100ms after doing the completion,
505 * and we do a wakeup on a task that already is awake.
506 * But that is extremely unlikely, and the worst thing that
507 * happens in such a case, is that we disable tracing.
508 * Honestly, if this race does happen something is horrible
509 * wrong with the system.
514 /* give a little time to let the thread wake up */
517 /* stop the tracing. */
519 /* check both trace buffers */
520 ret = trace_test_buffer(tr, NULL);
522 ret = trace_test_buffer(&max_tr, &count);
528 tracing_max_latency = save_max;
530 /* kill the thread */
533 if (!ret && !count) {
534 printk(KERN_CONT ".. no entries found ..");
540 #endif /* CONFIG_SCHED_TRACER */
542 #ifdef CONFIG_CONTEXT_SWITCH_TRACER
544 trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
549 /* start the tracing */
550 ret = trace->init(tr);
552 warn_failed_init_tracer(trace, ret);
556 /* Sleep for a 1/10 of a second */
558 /* stop the tracing. */
560 /* check the trace buffer */
561 ret = trace_test_buffer(tr, &count);
565 if (!ret && !count) {
566 printk(KERN_CONT ".. no entries found ..");
572 #endif /* CONFIG_CONTEXT_SWITCH_TRACER */
574 #ifdef CONFIG_SYSPROF_TRACER
576 trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr)
581 /* start the tracing */
582 ret = trace->init(tr);
584 warn_failed_init_tracer(trace, ret);
588 /* Sleep for a 1/10 of a second */
590 /* stop the tracing. */
592 /* check the trace buffer */
593 ret = trace_test_buffer(tr, &count);
599 #endif /* CONFIG_SYSPROF_TRACER */
601 #ifdef CONFIG_BRANCH_TRACER
603 trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
608 /* start the tracing */
609 ret = trace->init(tr);
611 warn_failed_init_tracer(trace, ret);
615 /* Sleep for a 1/10 of a second */
617 /* stop the tracing. */
619 /* check the trace buffer */
620 ret = trace_test_buffer(tr, &count);
626 #endif /* CONFIG_BRANCH_TRACER */