1 #ifndef _LINUX_KERNEL_TRACE_H
2 #define _LINUX_KERNEL_TRACE_H
5 #include <asm/atomic.h>
6 #include <linux/sched.h>
7 #include <linux/clocksource.h>
8 #include <linux/ring_buffer.h>
9 #include <linux/mmiotrace.h>
10 #include <linux/ftrace.h>
11 #include <trace/boot.h>
14 __TRACE_FIRST_TYPE = 0,
37 * The trace entry - the most basic unit of tracing. This is what
38 * is printed in the end as a single line in the trace output, such as:
40 * bash-15816 [01] 235.197585: idle_cpu <- irq_enter
46 unsigned char preempt_count;
52 * Function trace entry - function address and parent function addres:
55 struct trace_entry ent;
57 unsigned long parent_ip;
60 /* Function call entry */
61 struct ftrace_graph_ent_entry {
62 struct trace_entry ent;
63 struct ftrace_graph_ent graph_ent;
66 /* Function return entry */
67 struct ftrace_graph_ret_entry {
68 struct trace_entry ent;
69 struct ftrace_graph_ret ret;
71 extern struct tracer boot_tracer;
74 * Context switch trace entry - which task (and prio) we switched from/to:
76 struct ctx_switch_entry {
77 struct trace_entry ent;
78 unsigned int prev_pid;
79 unsigned char prev_prio;
80 unsigned char prev_state;
81 unsigned int next_pid;
82 unsigned char next_prio;
83 unsigned char next_state;
84 unsigned int next_cpu;
88 * Special (free-form) trace entry:
90 struct special_entry {
91 struct trace_entry ent;
101 #define FTRACE_STACK_ENTRIES 8
104 struct trace_entry ent;
105 unsigned long caller[FTRACE_STACK_ENTRIES];
108 struct userstack_entry {
109 struct trace_entry ent;
110 unsigned long caller[FTRACE_STACK_ENTRIES];
114 * ftrace_printk entry:
117 struct trace_entry ent;
122 #define TRACE_OLD_SIZE 88
124 struct trace_field_cont {
126 /* Temporary till we get rid of this completely */
127 char buf[TRACE_OLD_SIZE - 1];
130 struct trace_mmiotrace_rw {
131 struct trace_entry ent;
132 struct mmiotrace_rw rw;
135 struct trace_mmiotrace_map {
136 struct trace_entry ent;
137 struct mmiotrace_map map;
140 struct trace_boot_call {
141 struct trace_entry ent;
142 struct boot_trace_call boot_call;
145 struct trace_boot_ret {
146 struct trace_entry ent;
147 struct boot_trace_ret boot_ret;
150 #define TRACE_FUNC_SIZE 30
151 #define TRACE_FILE_SIZE 20
152 struct trace_branch {
153 struct trace_entry ent;
155 char func[TRACE_FUNC_SIZE+1];
156 char file[TRACE_FILE_SIZE+1];
161 struct trace_entry ent;
167 * trace_flag_type is an enumeration that holds different
168 * states when a trace occurs. These are:
169 * IRQS_OFF - interrupts were disabled
170 * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags
171 * NEED_RESCED - reschedule is requested
172 * HARDIRQ - inside an interrupt handler
173 * SOFTIRQ - inside a softirq handler
174 * CONT - multiple entries hold the trace item
176 enum trace_flag_type {
177 TRACE_FLAG_IRQS_OFF = 0x01,
178 TRACE_FLAG_IRQS_NOSUPPORT = 0x02,
179 TRACE_FLAG_NEED_RESCHED = 0x04,
180 TRACE_FLAG_HARDIRQ = 0x08,
181 TRACE_FLAG_SOFTIRQ = 0x10,
182 TRACE_FLAG_CONT = 0x20,
185 #define TRACE_BUF_SIZE 1024
188 * The CPU trace array - it consists of thousands of trace entries
189 * plus some other descriptor data: (for example which task started
192 struct trace_array_cpu {
195 /* these fields get copied into max-trace: */
196 unsigned long trace_idx;
197 unsigned long overrun;
198 unsigned long saved_latency;
199 unsigned long critical_start;
200 unsigned long critical_end;
201 unsigned long critical_sequence;
203 unsigned long policy;
204 unsigned long rt_priority;
205 cycle_t preempt_timestamp;
208 char comm[TASK_COMM_LEN];
211 struct trace_iterator;
214 * The trace array - an array of per-CPU trace arrays. This is the
215 * highest level data structure that individual tracers deal with.
216 * They have on/off state as well:
219 struct ring_buffer *buffer;
220 unsigned long entries;
223 struct task_struct *waiter;
224 struct trace_array_cpu *data[NR_CPUS];
227 #define FTRACE_CMP_TYPE(var, type) \
228 __builtin_types_compatible_p(typeof(var), type *)
231 #define IF_ASSIGN(var, entry, etype, id) \
232 if (FTRACE_CMP_TYPE(var, etype)) { \
233 var = (typeof(var))(entry); \
234 WARN_ON(id && (entry)->type != id); \
238 /* Will cause compile errors if type is not found. */
239 extern void __ftrace_bad_type(void);
242 * The trace_assign_type is a verifier that the entry type is
243 * the same as the type being assigned. To add new types simply
244 * add a line with the following format:
246 * IF_ASSIGN(var, ent, type, id);
248 * Where "type" is the trace type that includes the trace_entry
249 * as the "ent" item. And "id" is the trace identifier that is
250 * used in the trace_type enum.
252 * If the type can have more than one id, then use zero.
254 #define trace_assign_type(var, ent) \
256 IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \
257 IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \
258 IF_ASSIGN(var, ent, struct trace_field_cont, TRACE_CONT); \
259 IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \
260 IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\
261 IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \
262 IF_ASSIGN(var, ent, struct special_entry, 0); \
263 IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \
265 IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \
267 IF_ASSIGN(var, ent, struct trace_boot_call, TRACE_BOOT_CALL);\
268 IF_ASSIGN(var, ent, struct trace_boot_ret, TRACE_BOOT_RET);\
269 IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
270 IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \
272 IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \
274 IF_ASSIGN(var, ent, struct bts_entry, TRACE_BTS);\
275 __ftrace_bad_type(); \
278 /* Return values for print_line callback */
280 TRACE_TYPE_PARTIAL_LINE = 0, /* Retry after flushing the seq */
281 TRACE_TYPE_HANDLED = 1,
282 TRACE_TYPE_UNHANDLED = 2 /* Relay to other output functions */
287 * An option specific to a tracer. This is a boolean value.
288 * The bit is the bit index that sets its value on the
289 * flags value in struct tracer_flags.
292 const char *name; /* Will appear on the trace_options file */
293 u32 bit; /* Mask assigned in val field in tracer_flags */
297 * The set of specific options for a tracer. Your tracer
298 * have to set the initial value of the flags val.
300 struct tracer_flags {
302 struct tracer_opt *opts;
305 /* Makes more easy to define a tracer opt */
306 #define TRACER_OPT(s, b) .name = #s, .bit = b
309 * A specific tracer, represented by methods that operate on a trace array:
313 /* Your tracer should raise a warning if init fails */
314 int (*init)(struct trace_array *tr);
315 void (*reset)(struct trace_array *tr);
316 void (*start)(struct trace_array *tr);
317 void (*stop)(struct trace_array *tr);
318 void (*open)(struct trace_iterator *iter);
319 void (*pipe_open)(struct trace_iterator *iter);
320 void (*close)(struct trace_iterator *iter);
321 ssize_t (*read)(struct trace_iterator *iter,
322 struct file *filp, char __user *ubuf,
323 size_t cnt, loff_t *ppos);
324 #ifdef CONFIG_FTRACE_STARTUP_TEST
325 int (*selftest)(struct tracer *trace,
326 struct trace_array *tr);
328 void (*print_header)(struct seq_file *m);
329 enum print_line_t (*print_line)(struct trace_iterator *iter);
330 /* If you handled the flag setting, return 0 */
331 int (*set_flag)(u32 old_flags, u32 bit, int set);
334 struct tracer_flags *flags;
338 unsigned char buffer[PAGE_SIZE];
340 unsigned int readpos;
344 * Trace iterator - used by printout routines who present trace
345 * results to users and which routines might sleep, etc:
347 struct trace_iterator {
348 struct trace_array *tr;
349 struct tracer *trace;
351 struct ring_buffer_iter *buffer_iter[NR_CPUS];
353 /* The below is zeroed out in pipe_read */
354 struct trace_seq seq;
355 struct trace_entry *ent;
359 unsigned long iter_flags;
366 int tracing_is_enabled(void);
367 void trace_wake_up(void);
368 void tracing_reset(struct trace_array *tr, int cpu);
369 int tracing_open_generic(struct inode *inode, struct file *filp);
370 struct dentry *tracing_init_dentry(void);
371 void init_tracer_sysprof_debugfs(struct dentry *d_tracer);
373 struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
374 struct trace_array_cpu *data);
375 void tracing_generic_entry_update(struct trace_entry *entry,
379 void ftrace(struct trace_array *tr,
380 struct trace_array_cpu *data,
382 unsigned long parent_ip,
383 unsigned long flags, int pc);
384 void tracing_sched_switch_trace(struct trace_array *tr,
385 struct trace_array_cpu *data,
386 struct task_struct *prev,
387 struct task_struct *next,
388 unsigned long flags, int pc);
389 void tracing_record_cmdline(struct task_struct *tsk);
391 void tracing_sched_wakeup_trace(struct trace_array *tr,
392 struct trace_array_cpu *data,
393 struct task_struct *wakee,
394 struct task_struct *cur,
395 unsigned long flags, int pc);
396 void trace_special(struct trace_array *tr,
397 struct trace_array_cpu *data,
400 unsigned long arg3, int pc);
401 void trace_function(struct trace_array *tr,
402 struct trace_array_cpu *data,
404 unsigned long parent_ip,
405 unsigned long flags, int pc);
407 void trace_graph_return(struct ftrace_graph_ret *trace);
408 void trace_graph_entry(struct ftrace_graph_ent *trace);
409 void trace_bts(struct trace_array *tr,
413 void tracing_start_cmdline_record(void);
414 void tracing_stop_cmdline_record(void);
415 void tracing_sched_switch_assign_trace(struct trace_array *tr);
416 void tracing_stop_sched_switch_record(void);
417 void tracing_start_sched_switch_record(void);
418 int register_tracer(struct tracer *type);
419 void unregister_tracer(struct tracer *type);
421 extern unsigned long nsecs_to_usecs(unsigned long nsecs);
423 extern unsigned long tracing_max_latency;
424 extern unsigned long tracing_thresh;
426 void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu);
427 void update_max_tr_single(struct trace_array *tr,
428 struct task_struct *tsk, int cpu);
430 extern cycle_t ftrace_now(int cpu);
432 #ifdef CONFIG_FUNCTION_TRACER
433 void tracing_start_function_trace(void);
434 void tracing_stop_function_trace(void);
436 # define tracing_start_function_trace() do { } while (0)
437 # define tracing_stop_function_trace() do { } while (0)
440 #ifdef CONFIG_CONTEXT_SWITCH_TRACER
442 (*tracer_switch_func_t)(void *private,
444 struct task_struct *prev,
445 struct task_struct *next);
447 struct tracer_switch_ops {
448 tracer_switch_func_t func;
450 struct tracer_switch_ops *next;
453 #endif /* CONFIG_CONTEXT_SWITCH_TRACER */
455 #ifdef CONFIG_DYNAMIC_FTRACE
456 extern unsigned long ftrace_update_tot_cnt;
457 #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
458 extern int DYN_FTRACE_TEST_NAME(void);
461 #ifdef CONFIG_FTRACE_STARTUP_TEST
462 extern int trace_selftest_startup_function(struct tracer *trace,
463 struct trace_array *tr);
464 extern int trace_selftest_startup_irqsoff(struct tracer *trace,
465 struct trace_array *tr);
466 extern int trace_selftest_startup_preemptoff(struct tracer *trace,
467 struct trace_array *tr);
468 extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace,
469 struct trace_array *tr);
470 extern int trace_selftest_startup_wakeup(struct tracer *trace,
471 struct trace_array *tr);
472 extern int trace_selftest_startup_nop(struct tracer *trace,
473 struct trace_array *tr);
474 extern int trace_selftest_startup_sched_switch(struct tracer *trace,
475 struct trace_array *tr);
476 extern int trace_selftest_startup_sysprof(struct tracer *trace,
477 struct trace_array *tr);
478 extern int trace_selftest_startup_branch(struct tracer *trace,
479 struct trace_array *tr);
480 #endif /* CONFIG_FTRACE_STARTUP_TEST */
482 extern void *head_page(struct trace_array_cpu *data);
483 extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...);
484 extern void trace_seq_print_cont(struct trace_seq *s,
485 struct trace_iterator *iter);
488 seq_print_ip_sym(struct trace_seq *s, unsigned long ip,
489 unsigned long sym_flags);
490 extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf,
492 extern long ns2usecs(cycle_t nsec);
493 extern int trace_vprintk(unsigned long ip, const char *fmt, va_list args);
495 extern unsigned long trace_flags;
497 /* Standard output formatting function used for function return traces */
498 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
499 extern enum print_line_t print_graph_function(struct trace_iterator *iter);
501 static inline enum print_line_t
502 print_graph_function(struct trace_iterator *iter)
504 return TRACE_TYPE_UNHANDLED;
509 * trace_iterator_flags is an enumeration that defines bit
510 * positions into trace_flags that controls the output.
512 * NOTE: These bits must match the trace_options array in
515 enum trace_iterator_flags {
516 TRACE_ITER_PRINT_PARENT = 0x01,
517 TRACE_ITER_SYM_OFFSET = 0x02,
518 TRACE_ITER_SYM_ADDR = 0x04,
519 TRACE_ITER_VERBOSE = 0x08,
520 TRACE_ITER_RAW = 0x10,
521 TRACE_ITER_HEX = 0x20,
522 TRACE_ITER_BIN = 0x40,
523 TRACE_ITER_BLOCK = 0x80,
524 TRACE_ITER_STACKTRACE = 0x100,
525 TRACE_ITER_SCHED_TREE = 0x200,
526 TRACE_ITER_PRINTK = 0x400,
527 TRACE_ITER_PREEMPTONLY = 0x800,
528 TRACE_ITER_BRANCH = 0x1000,
529 TRACE_ITER_ANNOTATE = 0x2000,
530 TRACE_ITER_USERSTACKTRACE = 0x4000,
531 TRACE_ITER_SYM_USEROBJ = 0x8000
535 * TRACE_ITER_SYM_MASK masks the options in trace_flags that
536 * control the output of kernel symbols.
538 #define TRACE_ITER_SYM_MASK \
539 (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
541 extern struct tracer nop_trace;
544 * ftrace_preempt_disable - disable preemption scheduler safe
546 * When tracing can happen inside the scheduler, there exists
547 * cases that the tracing might happen before the need_resched
548 * flag is checked. If this happens and the tracer calls
549 * preempt_enable (after a disable), a schedule might take place
550 * causing an infinite recursion.
552 * To prevent this, we read the need_recshed flag before
553 * disabling preemption. When we want to enable preemption we
554 * check the flag, if it is set, then we call preempt_enable_no_resched.
555 * Otherwise, we call preempt_enable.
557 * The rational for doing the above is that if need resched is set
558 * and we have yet to reschedule, we are either in an atomic location
559 * (where we do not need to check for scheduling) or we are inside
560 * the scheduler and do not want to resched.
562 static inline int ftrace_preempt_disable(void)
566 resched = need_resched();
567 preempt_disable_notrace();
573 * ftrace_preempt_enable - enable preemption scheduler safe
574 * @resched: the return value from ftrace_preempt_disable
576 * This is a scheduler safe way to enable preemption and not miss
577 * any preemption checks. The disabled saved the state of preemption.
578 * If resched is set, then we were either inside an atomic or
579 * are inside the scheduler (we would have already scheduled
580 * otherwise). In this case, we do not want to call normal
581 * preempt_enable, but preempt_enable_no_resched instead.
583 static inline void ftrace_preempt_enable(int resched)
586 preempt_enable_no_resched_notrace();
588 preempt_enable_notrace();
591 #ifdef CONFIG_BRANCH_TRACER
592 extern int enable_branch_tracing(struct trace_array *tr);
593 extern void disable_branch_tracing(void);
594 static inline int trace_branch_enable(struct trace_array *tr)
596 if (trace_flags & TRACE_ITER_BRANCH)
597 return enable_branch_tracing(tr);
600 static inline void trace_branch_disable(void)
602 /* due to races, always disable */
603 disable_branch_tracing();
606 static inline int trace_branch_enable(struct trace_array *tr)
610 static inline void trace_branch_disable(void)
613 #endif /* CONFIG_BRANCH_TRACER */
615 #endif /* _LINUX_KERNEL_TRACE_H */