1 /* Include in trace.c */
3 #include <linux/kthread.h>
4 #include <linux/delay.h>
6 static inline int trace_valid_entry(struct trace_entry *entry)
17 trace_test_buffer_cpu(struct trace_array *tr, struct trace_array_cpu *data)
19 struct trace_entry *entries;
24 BUG_ON(list_empty(&data->trace_pages));
25 page = list_entry(data->trace_pages.next, struct page, lru);
26 entries = page_address(page);
28 if (head_page(data) != entries)
32 * The starting trace buffer always has valid elements,
33 * if any element exists.
35 entries = head_page(data);
37 for (i = 0; i < tr->entries; i++) {
39 if (i < data->trace_idx && !trace_valid_entry(&entries[idx])) {
40 printk(KERN_CONT ".. invalid entry %d ",
46 if (idx >= ENTRIES_PER_PAGE) {
47 page = virt_to_page(entries);
48 if (page->lru.next == &data->trace_pages) {
49 if (i != tr->entries - 1) {
50 printk(KERN_CONT ".. entries buffer mismatch");
54 page = list_entry(page->lru.next, struct page, lru);
55 entries = page_address(page);
61 page = virt_to_page(entries);
62 if (page->lru.next != &data->trace_pages) {
63 printk(KERN_CONT ".. too many entries");
72 printk(KERN_CONT ".. corrupted trace buffer .. ");
77 * Test the trace buffer to see if all the elements
80 static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
82 unsigned long cnt = 0;
86 for_each_possible_cpu(cpu) {
87 if (!head_page(tr->data[cpu]))
90 cnt += tr->data[cpu]->trace_idx;
92 ret = trace_test_buffer_cpu(tr, tr->data[cpu]);
105 #ifdef CONFIG_DYNAMIC_FTRACE
107 #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
109 #define STR(x) __STR(x)
110 static int DYN_FTRACE_TEST_NAME(void)
112 /* used to call mcount */
116 /* Test dynamic code modification and ftrace filters */
117 int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
118 struct trace_array *tr,
123 int save_ftrace_enabled = ftrace_enabled;
124 int save_tracer_enabled = tracer_enabled;
126 /* The ftrace test PASSED */
127 printk(KERN_CONT "PASSED\n");
128 pr_info("Testing dynamic ftrace: ");
130 /* enable tracing, and record the filter function */
134 /* passed in by parameter to fool gcc from optimizing */
137 /* update the records */
138 ret = ftrace_force_update();
140 printk(KERN_CONT ".. ftraced failed .. ");
144 /* filter only on our function */
145 ftrace_set_filter(STR(DYN_FTRACE_TEST_NAME),
146 sizeof(STR(DYN_FTRACE_TEST_NAME)), 1);
151 /* Sleep for a 1/10 of a second */
154 /* we should have nothing in the buffer */
155 ret = trace_test_buffer(tr, &count);
161 printk(KERN_CONT ".. filter did not filter .. ");
165 /* call our function again */
171 /* stop the tracing. */
173 trace->ctrl_update(tr);
176 /* check the trace buffer */
177 ret = trace_test_buffer(tr, &count);
180 /* we should only have one item */
181 if (!ret && count != 1) {
182 printk(KERN_CONT ".. filter failed ..");
187 ftrace_enabled = save_ftrace_enabled;
188 tracer_enabled = save_tracer_enabled;
190 /* Enable tracing on all functions again */
191 ftrace_set_filter(NULL, 0, 1);
196 # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
197 #endif /* CONFIG_DYNAMIC_FTRACE */
199 * Simple verification test of ftrace function tracer.
200 * Enable ftrace, sleep 1/10 second, and then read the trace
201 * buffer to see if all is in order.
204 trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
208 int save_ftrace_enabled = ftrace_enabled;
209 int save_tracer_enabled = tracer_enabled;
211 /* make sure msleep has been recorded */
214 /* force the recorded functions to be traced */
215 ret = ftrace_force_update();
217 printk(KERN_CONT ".. ftraced failed .. ");
221 /* start the tracing */
227 /* Sleep for a 1/10 of a second */
229 /* stop the tracing. */
231 trace->ctrl_update(tr);
234 /* check the trace buffer */
235 ret = trace_test_buffer(tr, &count);
238 if (!ret && !count) {
239 printk(KERN_CONT ".. no entries found ..");
244 ret = trace_selftest_startup_dynamic_tracing(trace, tr,
245 DYN_FTRACE_TEST_NAME);
248 ftrace_enabled = save_ftrace_enabled;
249 tracer_enabled = save_tracer_enabled;
251 /* kill ftrace totally if we failed */
257 #endif /* CONFIG_FTRACE */
259 #ifdef CONFIG_IRQSOFF_TRACER
261 trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
263 unsigned long save_max = tracing_max_latency;
267 /* start the tracing */
270 /* reset the max latency */
271 tracing_max_latency = 0;
272 /* disable interrupts for a bit */
276 /* stop the tracing. */
278 trace->ctrl_update(tr);
279 /* check both trace buffers */
280 ret = trace_test_buffer(tr, NULL);
282 ret = trace_test_buffer(&max_tr, &count);
285 if (!ret && !count) {
286 printk(KERN_CONT ".. no entries found ..");
290 tracing_max_latency = save_max;
294 #endif /* CONFIG_IRQSOFF_TRACER */
296 #ifdef CONFIG_PREEMPT_TRACER
298 trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
300 unsigned long save_max = tracing_max_latency;
304 /* start the tracing */
307 /* reset the max latency */
308 tracing_max_latency = 0;
309 /* disable preemption for a bit */
313 /* stop the tracing. */
315 trace->ctrl_update(tr);
316 /* check both trace buffers */
317 ret = trace_test_buffer(tr, NULL);
319 ret = trace_test_buffer(&max_tr, &count);
322 if (!ret && !count) {
323 printk(KERN_CONT ".. no entries found ..");
327 tracing_max_latency = save_max;
331 #endif /* CONFIG_PREEMPT_TRACER */
333 #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
335 trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
337 unsigned long save_max = tracing_max_latency;
341 /* start the tracing */
345 /* reset the max latency */
346 tracing_max_latency = 0;
348 /* disable preemption and interrupts for a bit */
353 /* reverse the order of preempt vs irqs */
356 /* stop the tracing. */
358 trace->ctrl_update(tr);
359 /* check both trace buffers */
360 ret = trace_test_buffer(tr, NULL);
364 ret = trace_test_buffer(&max_tr, &count);
368 if (!ret && !count) {
369 printk(KERN_CONT ".. no entries found ..");
374 /* do the test by disabling interrupts first this time */
375 tracing_max_latency = 0;
377 trace->ctrl_update(tr);
382 /* reverse the order of preempt vs irqs */
385 /* stop the tracing. */
387 trace->ctrl_update(tr);
388 /* check both trace buffers */
389 ret = trace_test_buffer(tr, NULL);
393 ret = trace_test_buffer(&max_tr, &count);
395 if (!ret && !count) {
396 printk(KERN_CONT ".. no entries found ..");
403 tracing_max_latency = save_max;
407 #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
409 #ifdef CONFIG_SCHED_TRACER
410 static int trace_wakeup_test_thread(void *data)
412 struct completion *x = data;
414 /* Make this a RT thread, doesn't need to be too high */
416 rt_mutex_setprio(current, MAX_RT_PRIO - 5);
418 /* Make it know we have a new prio */
421 /* now go to sleep and let the test wake us up */
422 set_current_state(TASK_INTERRUPTIBLE);
425 /* we are awake, now wait to disappear */
426 while (!kthread_should_stop()) {
428 * This is an RT task, do short sleeps to let
438 trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
440 unsigned long save_max = tracing_max_latency;
441 struct task_struct *p;
442 struct completion isrt;
446 init_completion(&isrt);
448 /* create a high prio thread */
449 p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
451 printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
455 /* make sure the thread is running at an RT prio */
456 wait_for_completion(&isrt);
458 /* start the tracing */
461 /* reset the max latency */
462 tracing_max_latency = 0;
464 /* sleep to let the RT thread sleep too */
468 * Yes this is slightly racy. It is possible that for some
469 * strange reason that the RT thread we created, did not
470 * call schedule for 100ms after doing the completion,
471 * and we do a wakeup on a task that already is awake.
472 * But that is extremely unlikely, and the worst thing that
473 * happens in such a case, is that we disable tracing.
474 * Honestly, if this race does happen something is horrible
475 * wrong with the system.
480 /* stop the tracing. */
482 trace->ctrl_update(tr);
483 /* check both trace buffers */
484 ret = trace_test_buffer(tr, NULL);
486 ret = trace_test_buffer(&max_tr, &count);
491 tracing_max_latency = save_max;
493 /* kill the thread */
496 if (!ret && !count) {
497 printk(KERN_CONT ".. no entries found ..");
503 #endif /* CONFIG_SCHED_TRACER */
505 #ifdef CONFIG_CONTEXT_SWITCH_TRACER
507 trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
512 /* start the tracing */
515 /* Sleep for a 1/10 of a second */
517 /* stop the tracing. */
519 trace->ctrl_update(tr);
520 /* check the trace buffer */
521 ret = trace_test_buffer(tr, &count);
524 if (!ret && !count) {
525 printk(KERN_CONT ".. no entries found ..");
531 #endif /* CONFIG_CONTEXT_SWITCH_TRACER */