2 * Infrastructure for profiling code inserted by 'gcc -pg'.
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code in the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/suspend.h>
21 #include <linux/debugfs.h>
22 #include <linux/hardirq.h>
23 #include <linux/kthread.h>
24 #include <linux/uaccess.h>
25 #include <linux/kprobes.h>
26 #include <linux/ftrace.h>
27 #include <linux/sysctl.h>
28 #include <linux/ctype.h>
29 #include <linux/list.h>
30 #include <linux/hash.h>
32 #include <asm/ftrace.h>
36 #define FTRACE_WARN_ON(cond) \
42 #define FTRACE_WARN_ON_ONCE(cond) \
44 if (WARN_ON_ONCE(cond)) \
48 /* hash bits for specific function selection */
49 #define FTRACE_HASH_BITS 7
50 #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
52 /* ftrace_enabled is a method to turn ftrace on or off */
53 int ftrace_enabled __read_mostly;
54 static int last_ftrace_enabled;
56 /* Quick disabling of function tracer. */
57 int function_trace_stop;
60 * ftrace_disabled is set when an anomaly is discovered.
61 * ftrace_disabled is much stronger than ftrace_enabled.
63 static int ftrace_disabled __read_mostly;
65 static DEFINE_MUTEX(ftrace_lock);
67 static struct ftrace_ops ftrace_list_end __read_mostly =
72 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
73 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
74 ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
75 ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
77 static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
79 struct ftrace_ops *op = ftrace_list;
81 /* in case someone actually ports this to alpha! */
82 read_barrier_depends();
84 while (op != &ftrace_list_end) {
86 read_barrier_depends();
87 op->func(ip, parent_ip);
92 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
94 if (!test_tsk_trace_trace(current))
97 ftrace_pid_function(ip, parent_ip);
100 static void set_ftrace_pid_function(ftrace_func_t func)
102 /* do not set ftrace_pid_function to itself! */
103 if (func != ftrace_pid_func)
104 ftrace_pid_function = func;
108 * clear_ftrace_function - reset the ftrace function
110 * This NULLs the ftrace function and in essence stops
111 * tracing. There may be lag
113 void clear_ftrace_function(void)
115 ftrace_trace_function = ftrace_stub;
116 __ftrace_trace_function = ftrace_stub;
117 ftrace_pid_function = ftrace_stub;
120 #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
122 * For those archs that do not test ftrace_trace_stop in their
123 * mcount call site, we need to do it from C.
125 static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
127 if (function_trace_stop)
130 __ftrace_trace_function(ip, parent_ip);
134 static int __register_ftrace_function(struct ftrace_ops *ops)
136 ops->next = ftrace_list;
138 * We are entering ops into the ftrace_list but another
139 * CPU might be walking that list. We need to make sure
140 * the ops->next pointer is valid before another CPU sees
141 * the ops pointer included into the ftrace_list.
146 if (ftrace_enabled) {
149 if (ops->next == &ftrace_list_end)
152 func = ftrace_list_func;
154 if (ftrace_pid_trace) {
155 set_ftrace_pid_function(func);
156 func = ftrace_pid_func;
160 * For one func, simply call it directly.
161 * For more than one func, call the chain.
163 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
164 ftrace_trace_function = func;
166 __ftrace_trace_function = func;
167 ftrace_trace_function = ftrace_test_stop_func;
174 static int __unregister_ftrace_function(struct ftrace_ops *ops)
176 struct ftrace_ops **p;
179 * If we are removing the last function, then simply point
180 * to the ftrace_stub.
182 if (ftrace_list == ops && ops->next == &ftrace_list_end) {
183 ftrace_trace_function = ftrace_stub;
184 ftrace_list = &ftrace_list_end;
188 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
197 if (ftrace_enabled) {
198 /* If we only have one func left, then call that directly */
199 if (ftrace_list->next == &ftrace_list_end) {
200 ftrace_func_t func = ftrace_list->func;
202 if (ftrace_pid_trace) {
203 set_ftrace_pid_function(func);
204 func = ftrace_pid_func;
206 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
207 ftrace_trace_function = func;
209 __ftrace_trace_function = func;
217 static void ftrace_update_pid_func(void)
221 if (ftrace_trace_function == ftrace_stub)
224 func = ftrace_trace_function;
226 if (ftrace_pid_trace) {
227 set_ftrace_pid_function(func);
228 func = ftrace_pid_func;
230 if (func == ftrace_pid_func)
231 func = ftrace_pid_function;
234 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
235 ftrace_trace_function = func;
237 __ftrace_trace_function = func;
241 /* set when tracing only a pid */
242 struct pid *ftrace_pid_trace;
243 static struct pid * const ftrace_swapper_pid = &init_struct_pid;
245 #ifdef CONFIG_DYNAMIC_FTRACE
247 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
248 # error Dynamic ftrace depends on MCOUNT_RECORD
251 static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
253 struct ftrace_func_probe {
254 struct hlist_node node;
255 struct ftrace_probe_ops *ops;
264 FTRACE_ENABLE_CALLS = (1 << 0),
265 FTRACE_DISABLE_CALLS = (1 << 1),
266 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
267 FTRACE_ENABLE_MCOUNT = (1 << 3),
268 FTRACE_DISABLE_MCOUNT = (1 << 4),
269 FTRACE_START_FUNC_RET = (1 << 5),
270 FTRACE_STOP_FUNC_RET = (1 << 6),
273 static int ftrace_filtered;
275 static LIST_HEAD(ftrace_new_addrs);
277 static DEFINE_MUTEX(ftrace_regex_lock);
280 struct ftrace_page *next;
282 struct dyn_ftrace records[];
285 #define ENTRIES_PER_PAGE \
286 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
288 /* estimate from running different kernels */
289 #define NR_TO_INIT 10000
291 static struct ftrace_page *ftrace_pages_start;
292 static struct ftrace_page *ftrace_pages;
294 static struct dyn_ftrace *ftrace_free_records;
297 * This is a double for. Do not use 'break' to break out of the loop,
298 * you must use a goto.
300 #define do_for_each_ftrace_rec(pg, rec) \
301 for (pg = ftrace_pages_start; pg; pg = pg->next) { \
303 for (_____i = 0; _____i < pg->index; _____i++) { \
304 rec = &pg->records[_____i];
306 #define while_for_each_ftrace_rec() \
310 #ifdef CONFIG_KPROBES
312 static int frozen_record_count;
314 static inline void freeze_record(struct dyn_ftrace *rec)
316 if (!(rec->flags & FTRACE_FL_FROZEN)) {
317 rec->flags |= FTRACE_FL_FROZEN;
318 frozen_record_count++;
322 static inline void unfreeze_record(struct dyn_ftrace *rec)
324 if (rec->flags & FTRACE_FL_FROZEN) {
325 rec->flags &= ~FTRACE_FL_FROZEN;
326 frozen_record_count--;
330 static inline int record_frozen(struct dyn_ftrace *rec)
332 return rec->flags & FTRACE_FL_FROZEN;
335 # define freeze_record(rec) ({ 0; })
336 # define unfreeze_record(rec) ({ 0; })
337 # define record_frozen(rec) ({ 0; })
338 #endif /* CONFIG_KPROBES */
340 static void ftrace_free_rec(struct dyn_ftrace *rec)
342 rec->ip = (unsigned long)ftrace_free_records;
343 ftrace_free_records = rec;
344 rec->flags |= FTRACE_FL_FREE;
347 void ftrace_release(void *start, unsigned long size)
349 struct dyn_ftrace *rec;
350 struct ftrace_page *pg;
351 unsigned long s = (unsigned long)start;
352 unsigned long e = s + size;
354 if (ftrace_disabled || !start)
357 mutex_lock(&ftrace_lock);
358 do_for_each_ftrace_rec(pg, rec) {
359 if ((rec->ip >= s) && (rec->ip < e) &&
360 !(rec->flags & FTRACE_FL_FREE))
361 ftrace_free_rec(rec);
362 } while_for_each_ftrace_rec();
363 mutex_unlock(&ftrace_lock);
366 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
368 struct dyn_ftrace *rec;
370 /* First check for freed records */
371 if (ftrace_free_records) {
372 rec = ftrace_free_records;
374 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
375 FTRACE_WARN_ON_ONCE(1);
376 ftrace_free_records = NULL;
380 ftrace_free_records = (void *)rec->ip;
381 memset(rec, 0, sizeof(*rec));
385 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
386 if (!ftrace_pages->next) {
387 /* allocate another page */
389 (void *)get_zeroed_page(GFP_KERNEL);
390 if (!ftrace_pages->next)
393 ftrace_pages = ftrace_pages->next;
396 return &ftrace_pages->records[ftrace_pages->index++];
399 static struct dyn_ftrace *
400 ftrace_record_ip(unsigned long ip)
402 struct dyn_ftrace *rec;
407 rec = ftrace_alloc_dyn_node(ip);
413 list_add(&rec->list, &ftrace_new_addrs);
418 static void print_ip_ins(const char *fmt, unsigned char *p)
422 printk(KERN_CONT "%s", fmt);
424 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
425 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
428 static void ftrace_bug(int failed, unsigned long ip)
432 FTRACE_WARN_ON_ONCE(1);
433 pr_info("ftrace faulted on modifying ");
437 FTRACE_WARN_ON_ONCE(1);
438 pr_info("ftrace failed to modify ");
440 print_ip_ins(" actual: ", (unsigned char *)ip);
441 printk(KERN_CONT "\n");
444 FTRACE_WARN_ON_ONCE(1);
445 pr_info("ftrace faulted on writing ");
449 FTRACE_WARN_ON_ONCE(1);
450 pr_info("ftrace faulted on unknown error ");
457 __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
459 unsigned long ftrace_addr;
460 unsigned long ip, fl;
462 ftrace_addr = (unsigned long)FTRACE_ADDR;
467 * If this record is not to be traced and
468 * it is not enabled then do nothing.
470 * If this record is not to be traced and
471 * it is enabled then disable it.
474 if (rec->flags & FTRACE_FL_NOTRACE) {
475 if (rec->flags & FTRACE_FL_ENABLED)
476 rec->flags &= ~FTRACE_FL_ENABLED;
480 } else if (ftrace_filtered && enable) {
485 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
487 /* Record is filtered and enabled, do nothing */
488 if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED))
491 /* Record is not filtered or enabled, do nothing */
495 /* Record is not filtered but enabled, disable it */
496 if (fl == FTRACE_FL_ENABLED)
497 rec->flags &= ~FTRACE_FL_ENABLED;
499 /* Otherwise record is filtered but not enabled, enable it */
500 rec->flags |= FTRACE_FL_ENABLED;
502 /* Disable or not filtered */
505 /* if record is enabled, do nothing */
506 if (rec->flags & FTRACE_FL_ENABLED)
509 rec->flags |= FTRACE_FL_ENABLED;
513 /* if record is not enabled, do nothing */
514 if (!(rec->flags & FTRACE_FL_ENABLED))
517 rec->flags &= ~FTRACE_FL_ENABLED;
521 if (rec->flags & FTRACE_FL_ENABLED)
522 return ftrace_make_call(rec, ftrace_addr);
524 return ftrace_make_nop(NULL, rec, ftrace_addr);
527 static void ftrace_replace_code(int enable)
529 struct dyn_ftrace *rec;
530 struct ftrace_page *pg;
533 do_for_each_ftrace_rec(pg, rec) {
535 * Skip over free records, records that have
536 * failed and not converted.
538 if (rec->flags & FTRACE_FL_FREE ||
539 rec->flags & FTRACE_FL_FAILED ||
540 rec->flags & FTRACE_FL_CONVERTED)
543 /* ignore updates to this record's mcount site */
544 if (get_kprobe((void *)rec->ip)) {
548 unfreeze_record(rec);
551 failed = __ftrace_replace_code(rec, enable);
553 rec->flags |= FTRACE_FL_FAILED;
554 if ((system_state == SYSTEM_BOOTING) ||
555 !core_kernel_text(rec->ip)) {
556 ftrace_free_rec(rec);
558 ftrace_bug(failed, rec->ip);
559 /* Stop processing */
563 } while_for_each_ftrace_rec();
567 ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
574 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
577 rec->flags |= FTRACE_FL_FAILED;
584 * archs can override this function if they must do something
585 * before the modifying code is performed.
587 int __weak ftrace_arch_code_modify_prepare(void)
593 * archs can override this function if they must do something
594 * after the modifying code is performed.
596 int __weak ftrace_arch_code_modify_post_process(void)
601 static int __ftrace_modify_code(void *data)
605 if (*command & FTRACE_ENABLE_CALLS)
606 ftrace_replace_code(1);
607 else if (*command & FTRACE_DISABLE_CALLS)
608 ftrace_replace_code(0);
610 if (*command & FTRACE_UPDATE_TRACE_FUNC)
611 ftrace_update_ftrace_func(ftrace_trace_function);
613 if (*command & FTRACE_START_FUNC_RET)
614 ftrace_enable_ftrace_graph_caller();
615 else if (*command & FTRACE_STOP_FUNC_RET)
616 ftrace_disable_ftrace_graph_caller();
621 static void ftrace_run_update_code(int command)
625 ret = ftrace_arch_code_modify_prepare();
630 stop_machine(__ftrace_modify_code, &command, NULL);
632 ret = ftrace_arch_code_modify_post_process();
636 static ftrace_func_t saved_ftrace_func;
637 static int ftrace_start_up;
639 static void ftrace_startup_enable(int command)
641 if (saved_ftrace_func != ftrace_trace_function) {
642 saved_ftrace_func = ftrace_trace_function;
643 command |= FTRACE_UPDATE_TRACE_FUNC;
646 if (!command || !ftrace_enabled)
649 ftrace_run_update_code(command);
652 static void ftrace_startup(int command)
654 if (unlikely(ftrace_disabled))
658 command |= FTRACE_ENABLE_CALLS;
660 ftrace_startup_enable(command);
663 static void ftrace_shutdown(int command)
665 if (unlikely(ftrace_disabled))
669 if (!ftrace_start_up)
670 command |= FTRACE_DISABLE_CALLS;
672 if (saved_ftrace_func != ftrace_trace_function) {
673 saved_ftrace_func = ftrace_trace_function;
674 command |= FTRACE_UPDATE_TRACE_FUNC;
677 if (!command || !ftrace_enabled)
680 ftrace_run_update_code(command);
683 static void ftrace_startup_sysctl(void)
685 int command = FTRACE_ENABLE_MCOUNT;
687 if (unlikely(ftrace_disabled))
690 /* Force update next time */
691 saved_ftrace_func = NULL;
692 /* ftrace_start_up is true if we want ftrace running */
694 command |= FTRACE_ENABLE_CALLS;
696 ftrace_run_update_code(command);
699 static void ftrace_shutdown_sysctl(void)
701 int command = FTRACE_DISABLE_MCOUNT;
703 if (unlikely(ftrace_disabled))
706 /* ftrace_start_up is true if ftrace is running */
708 command |= FTRACE_DISABLE_CALLS;
710 ftrace_run_update_code(command);
713 static cycle_t ftrace_update_time;
714 static unsigned long ftrace_update_cnt;
715 unsigned long ftrace_update_tot_cnt;
717 static int ftrace_update_code(struct module *mod)
719 struct dyn_ftrace *p, *t;
722 start = ftrace_now(raw_smp_processor_id());
723 ftrace_update_cnt = 0;
725 list_for_each_entry_safe(p, t, &ftrace_new_addrs, list) {
727 /* If something went wrong, bail without enabling anything */
728 if (unlikely(ftrace_disabled))
731 list_del_init(&p->list);
733 /* convert record (i.e, patch mcount-call with NOP) */
734 if (ftrace_code_disable(mod, p)) {
735 p->flags |= FTRACE_FL_CONVERTED;
741 stop = ftrace_now(raw_smp_processor_id());
742 ftrace_update_time = stop - start;
743 ftrace_update_tot_cnt += ftrace_update_cnt;
748 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
750 struct ftrace_page *pg;
754 /* allocate a few pages */
755 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
756 if (!ftrace_pages_start)
760 * Allocate a few more pages.
762 * TODO: have some parser search vmlinux before
763 * final linking to find all calls to ftrace.
765 * a) know how many pages to allocate.
767 * b) set up the table then.
769 * The dynamic code is still necessary for
773 pg = ftrace_pages = ftrace_pages_start;
775 cnt = num_to_init / ENTRIES_PER_PAGE;
776 pr_info("ftrace: allocating %ld entries in %d pages\n",
777 num_to_init, cnt + 1);
779 for (i = 0; i < cnt; i++) {
780 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
782 /* If we fail, we'll try later anyway */
793 FTRACE_ITER_FILTER = (1 << 0),
794 FTRACE_ITER_CONT = (1 << 1),
795 FTRACE_ITER_NOTRACE = (1 << 2),
796 FTRACE_ITER_FAILURES = (1 << 3),
797 FTRACE_ITER_PRINTALL = (1 << 4),
798 FTRACE_ITER_HASH = (1 << 5),
801 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
803 struct ftrace_iterator {
804 struct ftrace_page *pg;
808 unsigned char buffer[FTRACE_BUFF_MAX+1];
814 t_hash_next(struct seq_file *m, void *v, loff_t *pos)
816 struct ftrace_iterator *iter = m->private;
817 struct hlist_node *hnd = v;
818 struct hlist_head *hhd;
820 WARN_ON(!(iter->flags & FTRACE_ITER_HASH));
825 if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
828 hhd = &ftrace_func_hash[iter->hidx];
830 if (hlist_empty(hhd)) {
849 static void *t_hash_start(struct seq_file *m, loff_t *pos)
851 struct ftrace_iterator *iter = m->private;
854 iter->flags |= FTRACE_ITER_HASH;
856 return t_hash_next(m, p, pos);
859 static int t_hash_show(struct seq_file *m, void *v)
861 struct ftrace_func_probe *rec;
862 struct hlist_node *hnd = v;
863 char str[KSYM_SYMBOL_LEN];
865 rec = hlist_entry(hnd, struct ftrace_func_probe, node);
868 return rec->ops->print(m, rec->ip, rec->ops, rec->data);
870 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
871 seq_printf(m, "%s:", str);
873 kallsyms_lookup((unsigned long)rec->ops->func, NULL, NULL, NULL, str);
874 seq_printf(m, "%s", str);
877 seq_printf(m, ":%p", rec->data);
884 t_next(struct seq_file *m, void *v, loff_t *pos)
886 struct ftrace_iterator *iter = m->private;
887 struct dyn_ftrace *rec = NULL;
889 if (iter->flags & FTRACE_ITER_HASH)
890 return t_hash_next(m, v, pos);
894 if (iter->flags & FTRACE_ITER_PRINTALL)
898 if (iter->idx >= iter->pg->index) {
899 if (iter->pg->next) {
900 iter->pg = iter->pg->next;
907 rec = &iter->pg->records[iter->idx++];
908 if ((rec->flags & FTRACE_FL_FREE) ||
910 (!(iter->flags & FTRACE_ITER_FAILURES) &&
911 (rec->flags & FTRACE_FL_FAILED)) ||
913 ((iter->flags & FTRACE_ITER_FAILURES) &&
914 !(rec->flags & FTRACE_FL_FAILED)) ||
916 ((iter->flags & FTRACE_ITER_FILTER) &&
917 !(rec->flags & FTRACE_FL_FILTER)) ||
919 ((iter->flags & FTRACE_ITER_NOTRACE) &&
920 !(rec->flags & FTRACE_FL_NOTRACE))) {
929 static void *t_start(struct seq_file *m, loff_t *pos)
931 struct ftrace_iterator *iter = m->private;
934 mutex_lock(&ftrace_lock);
936 * For set_ftrace_filter reading, if we have the filter
937 * off, we can short cut and just print out that all
938 * functions are enabled.
940 if (iter->flags & FTRACE_ITER_FILTER && !ftrace_filtered) {
942 return t_hash_start(m, pos);
943 iter->flags |= FTRACE_ITER_PRINTALL;
948 if (iter->flags & FTRACE_ITER_HASH)
949 return t_hash_start(m, pos);
958 p = t_next(m, p, pos);
961 return t_hash_start(m, pos);
966 static void t_stop(struct seq_file *m, void *p)
968 mutex_unlock(&ftrace_lock);
971 static int t_show(struct seq_file *m, void *v)
973 struct ftrace_iterator *iter = m->private;
974 struct dyn_ftrace *rec = v;
975 char str[KSYM_SYMBOL_LEN];
977 if (iter->flags & FTRACE_ITER_HASH)
978 return t_hash_show(m, v);
980 if (iter->flags & FTRACE_ITER_PRINTALL) {
981 seq_printf(m, "#### all functions enabled ####\n");
988 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
990 seq_printf(m, "%s\n", str);
995 static struct seq_operations show_ftrace_seq_ops = {
1003 ftrace_avail_open(struct inode *inode, struct file *file)
1005 struct ftrace_iterator *iter;
1008 if (unlikely(ftrace_disabled))
1011 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1015 iter->pg = ftrace_pages_start;
1017 ret = seq_open(file, &show_ftrace_seq_ops);
1019 struct seq_file *m = file->private_data;
1029 int ftrace_avail_release(struct inode *inode, struct file *file)
1031 struct seq_file *m = (struct seq_file *)file->private_data;
1032 struct ftrace_iterator *iter = m->private;
1034 seq_release(inode, file);
1041 ftrace_failures_open(struct inode *inode, struct file *file)
1045 struct ftrace_iterator *iter;
1047 ret = ftrace_avail_open(inode, file);
1049 m = (struct seq_file *)file->private_data;
1050 iter = (struct ftrace_iterator *)m->private;
1051 iter->flags = FTRACE_ITER_FAILURES;
1058 static void ftrace_filter_reset(int enable)
1060 struct ftrace_page *pg;
1061 struct dyn_ftrace *rec;
1062 unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1064 mutex_lock(&ftrace_lock);
1066 ftrace_filtered = 0;
1067 do_for_each_ftrace_rec(pg, rec) {
1068 if (rec->flags & FTRACE_FL_FAILED)
1070 rec->flags &= ~type;
1071 } while_for_each_ftrace_rec();
1072 mutex_unlock(&ftrace_lock);
1076 ftrace_regex_open(struct inode *inode, struct file *file, int enable)
1078 struct ftrace_iterator *iter;
1081 if (unlikely(ftrace_disabled))
1084 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1088 mutex_lock(&ftrace_regex_lock);
1089 if ((file->f_mode & FMODE_WRITE) &&
1090 !(file->f_flags & O_APPEND))
1091 ftrace_filter_reset(enable);
1093 if (file->f_mode & FMODE_READ) {
1094 iter->pg = ftrace_pages_start;
1095 iter->flags = enable ? FTRACE_ITER_FILTER :
1096 FTRACE_ITER_NOTRACE;
1098 ret = seq_open(file, &show_ftrace_seq_ops);
1100 struct seq_file *m = file->private_data;
1105 file->private_data = iter;
1106 mutex_unlock(&ftrace_regex_lock);
1112 ftrace_filter_open(struct inode *inode, struct file *file)
1114 return ftrace_regex_open(inode, file, 1);
1118 ftrace_notrace_open(struct inode *inode, struct file *file)
1120 return ftrace_regex_open(inode, file, 0);
1124 ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
1128 if (file->f_mode & FMODE_READ)
1129 ret = seq_lseek(file, offset, origin);
1131 file->f_pos = ret = 1;
1144 * (static function - no need for kernel doc)
1146 * Pass in a buffer containing a glob and this function will
1147 * set search to point to the search part of the buffer and
1148 * return the type of search it is (see enum above).
1149 * This does modify buff.
1151 * Returns enum type.
1152 * search returns the pointer to use for comparison.
1153 * not returns 1 if buff started with a '!'
1157 ftrace_setup_glob(char *buff, int len, char **search, int *not)
1159 int type = MATCH_FULL;
1162 if (buff[0] == '!') {
1171 for (i = 0; i < len; i++) {
1172 if (buff[i] == '*') {
1175 type = MATCH_END_ONLY;
1177 if (type == MATCH_END_ONLY)
1178 type = MATCH_MIDDLE_ONLY;
1180 type = MATCH_FRONT_ONLY;
1190 static int ftrace_match(char *str, char *regex, int len, int type)
1197 if (strcmp(str, regex) == 0)
1200 case MATCH_FRONT_ONLY:
1201 if (strncmp(str, regex, len) == 0)
1204 case MATCH_MIDDLE_ONLY:
1205 if (strstr(str, regex))
1208 case MATCH_END_ONLY:
1209 ptr = strstr(str, regex);
1210 if (ptr && (ptr[len] == 0))
1219 ftrace_match_record(struct dyn_ftrace *rec, char *regex, int len, int type)
1221 char str[KSYM_SYMBOL_LEN];
1223 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1224 return ftrace_match(str, regex, len, type);
1227 static void ftrace_match_records(char *buff, int len, int enable)
1229 unsigned int search_len;
1230 struct ftrace_page *pg;
1231 struct dyn_ftrace *rec;
1237 flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1238 type = ftrace_setup_glob(buff, len, &search, ¬);
1240 search_len = strlen(search);
1242 mutex_lock(&ftrace_lock);
1243 do_for_each_ftrace_rec(pg, rec) {
1245 if (rec->flags & FTRACE_FL_FAILED)
1248 if (ftrace_match_record(rec, search, search_len, type)) {
1250 rec->flags &= ~flag;
1255 * Only enable filtering if we have a function that
1258 if (enable && (rec->flags & FTRACE_FL_FILTER))
1259 ftrace_filtered = 1;
1260 } while_for_each_ftrace_rec();
1261 mutex_unlock(&ftrace_lock);
1265 ftrace_match_module_record(struct dyn_ftrace *rec, char *mod,
1266 char *regex, int len, int type)
1268 char str[KSYM_SYMBOL_LEN];
1271 kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
1273 if (!modname || strcmp(modname, mod))
1276 /* blank search means to match all funcs in the mod */
1278 return ftrace_match(str, regex, len, type);
1283 static void ftrace_match_module_records(char *buff, char *mod, int enable)
1285 unsigned search_len = 0;
1286 struct ftrace_page *pg;
1287 struct dyn_ftrace *rec;
1288 int type = MATCH_FULL;
1289 char *search = buff;
1293 flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1295 /* blank or '*' mean the same */
1296 if (strcmp(buff, "*") == 0)
1299 /* handle the case of 'dont filter this module' */
1300 if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
1306 type = ftrace_setup_glob(buff, strlen(buff), &search, ¬);
1307 search_len = strlen(search);
1310 mutex_lock(&ftrace_lock);
1311 do_for_each_ftrace_rec(pg, rec) {
1313 if (rec->flags & FTRACE_FL_FAILED)
1316 if (ftrace_match_module_record(rec, mod,
1317 search, search_len, type)) {
1319 rec->flags &= ~flag;
1323 if (enable && (rec->flags & FTRACE_FL_FILTER))
1324 ftrace_filtered = 1;
1326 } while_for_each_ftrace_rec();
1327 mutex_unlock(&ftrace_lock);
1331 * We register the module command as a template to show others how
1332 * to register the a command as well.
1336 ftrace_mod_callback(char *func, char *cmd, char *param, int enable)
1341 * cmd == 'mod' because we only registered this func
1342 * for the 'mod' ftrace_func_command.
1343 * But if you register one func with multiple commands,
1344 * you can tell which command was used by the cmd
1348 /* we must have a module name */
1352 mod = strsep(¶m, ":");
1356 ftrace_match_module_records(func, mod, enable);
1360 static struct ftrace_func_command ftrace_mod_cmd = {
1362 .func = ftrace_mod_callback,
1365 static int __init ftrace_mod_cmd_init(void)
1367 return register_ftrace_command(&ftrace_mod_cmd);
1369 device_initcall(ftrace_mod_cmd_init);
1372 function_trace_probe_call(unsigned long ip, unsigned long parent_ip)
1374 struct ftrace_func_probe *entry;
1375 struct hlist_head *hhd;
1376 struct hlist_node *n;
1380 key = hash_long(ip, FTRACE_HASH_BITS);
1382 hhd = &ftrace_func_hash[key];
1384 if (hlist_empty(hhd))
1388 * Disable preemption for these calls to prevent a RCU grace
1389 * period. This syncs the hash iteration and freeing of items
1390 * on the hash. rcu_read_lock is too dangerous here.
1392 resched = ftrace_preempt_disable();
1393 hlist_for_each_entry_rcu(entry, n, hhd, node) {
1394 if (entry->ip == ip)
1395 entry->ops->func(ip, parent_ip, &entry->data);
1397 ftrace_preempt_enable(resched);
1400 static struct ftrace_ops trace_probe_ops __read_mostly =
1402 .func = function_trace_probe_call,
1405 static int ftrace_probe_registered;
1407 static void __enable_ftrace_function_probe(void)
1411 if (ftrace_probe_registered)
1414 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
1415 struct hlist_head *hhd = &ftrace_func_hash[i];
1419 /* Nothing registered? */
1420 if (i == FTRACE_FUNC_HASHSIZE)
1423 __register_ftrace_function(&trace_probe_ops);
1425 ftrace_probe_registered = 1;
1428 static void __disable_ftrace_function_probe(void)
1432 if (!ftrace_probe_registered)
1435 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
1436 struct hlist_head *hhd = &ftrace_func_hash[i];
1441 /* no more funcs left */
1442 __unregister_ftrace_function(&trace_probe_ops);
1444 ftrace_probe_registered = 0;
1448 static void ftrace_free_entry_rcu(struct rcu_head *rhp)
1450 struct ftrace_func_probe *entry =
1451 container_of(rhp, struct ftrace_func_probe, rcu);
1453 if (entry->ops->free)
1454 entry->ops->free(&entry->data);
1460 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
1463 struct ftrace_func_probe *entry;
1464 struct ftrace_page *pg;
1465 struct dyn_ftrace *rec;
1471 type = ftrace_setup_glob(glob, strlen(glob), &search, ¬);
1472 len = strlen(search);
1474 /* we do not support '!' for function probes */
1478 mutex_lock(&ftrace_lock);
1479 do_for_each_ftrace_rec(pg, rec) {
1481 if (rec->flags & FTRACE_FL_FAILED)
1484 if (!ftrace_match_record(rec, search, len, type))
1487 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1489 /* If we did not process any, then return error */
1500 * The caller might want to do something special
1501 * for each function we find. We call the callback
1502 * to give the caller an opportunity to do so.
1504 if (ops->callback) {
1505 if (ops->callback(rec->ip, &entry->data) < 0) {
1506 /* caller does not like this func */
1513 entry->ip = rec->ip;
1515 key = hash_long(entry->ip, FTRACE_HASH_BITS);
1516 hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
1518 } while_for_each_ftrace_rec();
1519 __enable_ftrace_function_probe();
1522 mutex_unlock(&ftrace_lock);
1528 PROBE_TEST_FUNC = 1,
1533 __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
1534 void *data, int flags)
1536 struct ftrace_func_probe *entry;
1537 struct hlist_node *n, *tmp;
1538 char str[KSYM_SYMBOL_LEN];
1539 int type = MATCH_FULL;
1543 if (glob && (strcmp(glob, "*") || !strlen(glob)))
1548 type = ftrace_setup_glob(glob, strlen(glob), &search, ¬);
1549 len = strlen(search);
1551 /* we do not support '!' for function probes */
1556 mutex_lock(&ftrace_lock);
1557 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
1558 struct hlist_head *hhd = &ftrace_func_hash[i];
1560 hlist_for_each_entry_safe(entry, n, tmp, hhd, node) {
1562 /* break up if statements for readability */
1563 if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
1566 if ((flags & PROBE_TEST_DATA) && entry->data != data)
1569 /* do this last, since it is the most expensive */
1571 kallsyms_lookup(entry->ip, NULL, NULL,
1573 if (!ftrace_match(str, glob, len, type))
1577 hlist_del(&entry->node);
1578 call_rcu(&entry->rcu, ftrace_free_entry_rcu);
1581 __disable_ftrace_function_probe();
1582 mutex_unlock(&ftrace_lock);
1586 unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
1589 __unregister_ftrace_function_probe(glob, ops, data,
1590 PROBE_TEST_FUNC | PROBE_TEST_DATA);
1594 unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
1596 __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
1599 void unregister_ftrace_function_probe_all(char *glob)
1601 __unregister_ftrace_function_probe(glob, NULL, NULL, 0);
1604 static LIST_HEAD(ftrace_commands);
1605 static DEFINE_MUTEX(ftrace_cmd_mutex);
1607 int register_ftrace_command(struct ftrace_func_command *cmd)
1609 struct ftrace_func_command *p;
1612 mutex_lock(&ftrace_cmd_mutex);
1613 list_for_each_entry(p, &ftrace_commands, list) {
1614 if (strcmp(cmd->name, p->name) == 0) {
1619 list_add(&cmd->list, &ftrace_commands);
1621 mutex_unlock(&ftrace_cmd_mutex);
1626 int unregister_ftrace_command(struct ftrace_func_command *cmd)
1628 struct ftrace_func_command *p, *n;
1631 mutex_lock(&ftrace_cmd_mutex);
1632 list_for_each_entry_safe(p, n, &ftrace_commands, list) {
1633 if (strcmp(cmd->name, p->name) == 0) {
1635 list_del_init(&p->list);
1640 mutex_unlock(&ftrace_cmd_mutex);
1645 static int ftrace_process_regex(char *buff, int len, int enable)
1647 char *func, *command, *next = buff;
1648 struct ftrace_func_command *p;
1651 func = strsep(&next, ":");
1654 ftrace_match_records(func, len, enable);
1660 command = strsep(&next, ":");
1662 mutex_lock(&ftrace_cmd_mutex);
1663 list_for_each_entry(p, &ftrace_commands, list) {
1664 if (strcmp(p->name, command) == 0) {
1665 ret = p->func(func, command, next, enable);
1670 mutex_unlock(&ftrace_cmd_mutex);
1676 ftrace_regex_write(struct file *file, const char __user *ubuf,
1677 size_t cnt, loff_t *ppos, int enable)
1679 struct ftrace_iterator *iter;
1684 if (!cnt || cnt < 0)
1687 mutex_lock(&ftrace_regex_lock);
1689 if (file->f_mode & FMODE_READ) {
1690 struct seq_file *m = file->private_data;
1693 iter = file->private_data;
1696 iter->flags &= ~FTRACE_ITER_CONT;
1697 iter->buffer_idx = 0;
1700 ret = get_user(ch, ubuf++);
1706 if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1707 /* skip white space */
1708 while (cnt && isspace(ch)) {
1709 ret = get_user(ch, ubuf++);
1717 file->f_pos += read;
1722 iter->buffer_idx = 0;
1725 while (cnt && !isspace(ch)) {
1726 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1727 iter->buffer[iter->buffer_idx++] = ch;
1732 ret = get_user(ch, ubuf++);
1741 iter->buffer[iter->buffer_idx] = 0;
1742 ret = ftrace_process_regex(iter->buffer,
1743 iter->buffer_idx, enable);
1746 iter->buffer_idx = 0;
1748 iter->flags |= FTRACE_ITER_CONT;
1751 file->f_pos += read;
1755 mutex_unlock(&ftrace_regex_lock);
1761 ftrace_filter_write(struct file *file, const char __user *ubuf,
1762 size_t cnt, loff_t *ppos)
1764 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
1768 ftrace_notrace_write(struct file *file, const char __user *ubuf,
1769 size_t cnt, loff_t *ppos)
1771 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
1775 ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
1777 if (unlikely(ftrace_disabled))
1780 mutex_lock(&ftrace_regex_lock);
1782 ftrace_filter_reset(enable);
1784 ftrace_match_records(buf, len, enable);
1785 mutex_unlock(&ftrace_regex_lock);
1789 * ftrace_set_filter - set a function to filter on in ftrace
1790 * @buf - the string that holds the function filter text.
1791 * @len - the length of the string.
1792 * @reset - non zero to reset all filters before applying this filter.
1794 * Filters denote which functions should be enabled when tracing is enabled.
1795 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1797 void ftrace_set_filter(unsigned char *buf, int len, int reset)
1799 ftrace_set_regex(buf, len, reset, 1);
1803 * ftrace_set_notrace - set a function to not trace in ftrace
1804 * @buf - the string that holds the function notrace text.
1805 * @len - the length of the string.
1806 * @reset - non zero to reset all filters before applying this filter.
1808 * Notrace Filters denote which functions should not be enabled when tracing
1809 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1812 void ftrace_set_notrace(unsigned char *buf, int len, int reset)
1814 ftrace_set_regex(buf, len, reset, 0);
1818 ftrace_regex_release(struct inode *inode, struct file *file, int enable)
1820 struct seq_file *m = (struct seq_file *)file->private_data;
1821 struct ftrace_iterator *iter;
1823 mutex_lock(&ftrace_regex_lock);
1824 if (file->f_mode & FMODE_READ) {
1827 seq_release(inode, file);
1829 iter = file->private_data;
1831 if (iter->buffer_idx) {
1833 iter->buffer[iter->buffer_idx] = 0;
1834 ftrace_match_records(iter->buffer, iter->buffer_idx, enable);
1837 mutex_lock(&ftrace_lock);
1838 if (ftrace_start_up && ftrace_enabled)
1839 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1840 mutex_unlock(&ftrace_lock);
1843 mutex_unlock(&ftrace_regex_lock);
1848 ftrace_filter_release(struct inode *inode, struct file *file)
1850 return ftrace_regex_release(inode, file, 1);
1854 ftrace_notrace_release(struct inode *inode, struct file *file)
1856 return ftrace_regex_release(inode, file, 0);
1859 static const struct file_operations ftrace_avail_fops = {
1860 .open = ftrace_avail_open,
1862 .llseek = seq_lseek,
1863 .release = ftrace_avail_release,
1866 static const struct file_operations ftrace_failures_fops = {
1867 .open = ftrace_failures_open,
1869 .llseek = seq_lseek,
1870 .release = ftrace_avail_release,
1873 static const struct file_operations ftrace_filter_fops = {
1874 .open = ftrace_filter_open,
1876 .write = ftrace_filter_write,
1877 .llseek = ftrace_regex_lseek,
1878 .release = ftrace_filter_release,
1881 static const struct file_operations ftrace_notrace_fops = {
1882 .open = ftrace_notrace_open,
1884 .write = ftrace_notrace_write,
1885 .llseek = ftrace_regex_lseek,
1886 .release = ftrace_notrace_release,
1889 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1891 static DEFINE_MUTEX(graph_lock);
1893 int ftrace_graph_count;
1894 unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
1897 g_next(struct seq_file *m, void *v, loff_t *pos)
1899 unsigned long *array = m->private;
1904 if (index >= ftrace_graph_count)
1907 return &array[index];
1910 static void *g_start(struct seq_file *m, loff_t *pos)
1914 mutex_lock(&graph_lock);
1916 /* Nothing, tell g_show to print all functions are enabled */
1917 if (!ftrace_graph_count && !*pos)
1920 p = g_next(m, p, pos);
1925 static void g_stop(struct seq_file *m, void *p)
1927 mutex_unlock(&graph_lock);
1930 static int g_show(struct seq_file *m, void *v)
1932 unsigned long *ptr = v;
1933 char str[KSYM_SYMBOL_LEN];
1938 if (ptr == (unsigned long *)1) {
1939 seq_printf(m, "#### all functions enabled ####\n");
1943 kallsyms_lookup(*ptr, NULL, NULL, NULL, str);
1945 seq_printf(m, "%s\n", str);
1950 static struct seq_operations ftrace_graph_seq_ops = {
1958 ftrace_graph_open(struct inode *inode, struct file *file)
1962 if (unlikely(ftrace_disabled))
1965 mutex_lock(&graph_lock);
1966 if ((file->f_mode & FMODE_WRITE) &&
1967 !(file->f_flags & O_APPEND)) {
1968 ftrace_graph_count = 0;
1969 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
1972 if (file->f_mode & FMODE_READ) {
1973 ret = seq_open(file, &ftrace_graph_seq_ops);
1975 struct seq_file *m = file->private_data;
1976 m->private = ftrace_graph_funcs;
1979 file->private_data = ftrace_graph_funcs;
1980 mutex_unlock(&graph_lock);
1986 ftrace_set_func(unsigned long *array, int *idx, char *buffer)
1988 struct dyn_ftrace *rec;
1989 struct ftrace_page *pg;
1997 if (ftrace_disabled)
2001 type = ftrace_setup_glob(buffer, strlen(buffer), &search, ¬);
2005 search_len = strlen(search);
2007 mutex_lock(&ftrace_lock);
2008 do_for_each_ftrace_rec(pg, rec) {
2010 if (*idx >= FTRACE_GRAPH_MAX_FUNCS)
2013 if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE))
2016 if (ftrace_match_record(rec, search, search_len, type)) {
2017 /* ensure it is not already in the array */
2019 for (i = 0; i < *idx; i++)
2020 if (array[i] == rec->ip) {
2025 array[(*idx)++] = rec->ip;
2029 } while_for_each_ftrace_rec();
2031 mutex_unlock(&ftrace_lock);
2033 return found ? 0 : -EINVAL;
2037 ftrace_graph_write(struct file *file, const char __user *ubuf,
2038 size_t cnt, loff_t *ppos)
2040 unsigned char buffer[FTRACE_BUFF_MAX+1];
2041 unsigned long *array;
2047 if (!cnt || cnt < 0)
2050 mutex_lock(&graph_lock);
2052 if (ftrace_graph_count >= FTRACE_GRAPH_MAX_FUNCS) {
2057 if (file->f_mode & FMODE_READ) {
2058 struct seq_file *m = file->private_data;
2061 array = file->private_data;
2063 ret = get_user(ch, ubuf++);
2069 /* skip white space */
2070 while (cnt && isspace(ch)) {
2071 ret = get_user(ch, ubuf++);
2084 while (cnt && !isspace(ch)) {
2085 if (index < FTRACE_BUFF_MAX)
2086 buffer[index++] = ch;
2091 ret = get_user(ch, ubuf++);
2099 /* we allow only one expression at a time */
2100 ret = ftrace_set_func(array, &ftrace_graph_count, buffer);
2104 file->f_pos += read;
2108 mutex_unlock(&graph_lock);
2113 static const struct file_operations ftrace_graph_fops = {
2114 .open = ftrace_graph_open,
2116 .write = ftrace_graph_write,
2118 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2120 static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
2122 struct dentry *entry;
2124 entry = debugfs_create_file("available_filter_functions", 0444,
2125 d_tracer, NULL, &ftrace_avail_fops);
2127 pr_warning("Could not create debugfs "
2128 "'available_filter_functions' entry\n");
2130 entry = debugfs_create_file("failures", 0444,
2131 d_tracer, NULL, &ftrace_failures_fops);
2133 pr_warning("Could not create debugfs 'failures' entry\n");
2135 entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
2136 NULL, &ftrace_filter_fops);
2138 pr_warning("Could not create debugfs "
2139 "'set_ftrace_filter' entry\n");
2141 entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
2142 NULL, &ftrace_notrace_fops);
2144 pr_warning("Could not create debugfs "
2145 "'set_ftrace_notrace' entry\n");
2147 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2148 entry = debugfs_create_file("set_graph_function", 0444, d_tracer,
2150 &ftrace_graph_fops);
2152 pr_warning("Could not create debugfs "
2153 "'set_graph_function' entry\n");
2154 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2159 static int ftrace_convert_nops(struct module *mod,
2160 unsigned long *start,
2165 unsigned long flags;
2167 mutex_lock(&ftrace_lock);
2170 addr = ftrace_call_adjust(*p++);
2172 * Some architecture linkers will pad between
2173 * the different mcount_loc sections of different
2174 * object files to satisfy alignments.
2175 * Skip any NULL pointers.
2179 ftrace_record_ip(addr);
2182 /* disable interrupts to prevent kstop machine */
2183 local_irq_save(flags);
2184 ftrace_update_code(mod);
2185 local_irq_restore(flags);
2186 mutex_unlock(&ftrace_lock);
2191 void ftrace_init_module(struct module *mod,
2192 unsigned long *start, unsigned long *end)
2194 if (ftrace_disabled || start == end)
2196 ftrace_convert_nops(mod, start, end);
2199 extern unsigned long __start_mcount_loc[];
2200 extern unsigned long __stop_mcount_loc[];
2202 void __init ftrace_init(void)
2204 unsigned long count, addr, flags;
2207 /* Keep the ftrace pointer to the stub */
2208 addr = (unsigned long)ftrace_stub;
2210 local_irq_save(flags);
2211 ftrace_dyn_arch_init(&addr);
2212 local_irq_restore(flags);
2214 /* ftrace_dyn_arch_init places the return code in addr */
2218 count = __stop_mcount_loc - __start_mcount_loc;
2220 ret = ftrace_dyn_table_alloc(count);
2224 last_ftrace_enabled = ftrace_enabled = 1;
2226 ret = ftrace_convert_nops(NULL,
2232 ftrace_disabled = 1;
2237 static int __init ftrace_nodyn_init(void)
2242 device_initcall(ftrace_nodyn_init);
2244 static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
2245 static inline void ftrace_startup_enable(int command) { }
2246 /* Keep as macros so we do not need to define the commands */
2247 # define ftrace_startup(command) do { } while (0)
2248 # define ftrace_shutdown(command) do { } while (0)
2249 # define ftrace_startup_sysctl() do { } while (0)
2250 # define ftrace_shutdown_sysctl() do { } while (0)
2251 #endif /* CONFIG_DYNAMIC_FTRACE */
2254 ftrace_pid_read(struct file *file, char __user *ubuf,
2255 size_t cnt, loff_t *ppos)
2260 if (ftrace_pid_trace == ftrace_swapper_pid)
2261 r = sprintf(buf, "swapper tasks\n");
2262 else if (ftrace_pid_trace)
2263 r = sprintf(buf, "%u\n", pid_nr(ftrace_pid_trace));
2265 r = sprintf(buf, "no pid\n");
2267 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2270 static void clear_ftrace_swapper(void)
2272 struct task_struct *p;
2276 for_each_online_cpu(cpu) {
2278 clear_tsk_trace_trace(p);
2283 static void set_ftrace_swapper(void)
2285 struct task_struct *p;
2289 for_each_online_cpu(cpu) {
2291 set_tsk_trace_trace(p);
2296 static void clear_ftrace_pid(struct pid *pid)
2298 struct task_struct *p;
2301 do_each_pid_task(pid, PIDTYPE_PID, p) {
2302 clear_tsk_trace_trace(p);
2303 } while_each_pid_task(pid, PIDTYPE_PID, p);
2309 static void set_ftrace_pid(struct pid *pid)
2311 struct task_struct *p;
2314 do_each_pid_task(pid, PIDTYPE_PID, p) {
2315 set_tsk_trace_trace(p);
2316 } while_each_pid_task(pid, PIDTYPE_PID, p);
2320 static void clear_ftrace_pid_task(struct pid **pid)
2322 if (*pid == ftrace_swapper_pid)
2323 clear_ftrace_swapper();
2325 clear_ftrace_pid(*pid);
2330 static void set_ftrace_pid_task(struct pid *pid)
2332 if (pid == ftrace_swapper_pid)
2333 set_ftrace_swapper();
2335 set_ftrace_pid(pid);
2339 ftrace_pid_write(struct file *filp, const char __user *ubuf,
2340 size_t cnt, loff_t *ppos)
2347 if (cnt >= sizeof(buf))
2350 if (copy_from_user(&buf, ubuf, cnt))
2355 ret = strict_strtol(buf, 10, &val);
2359 mutex_lock(&ftrace_lock);
2361 /* disable pid tracing */
2362 if (!ftrace_pid_trace)
2365 clear_ftrace_pid_task(&ftrace_pid_trace);
2368 /* swapper task is special */
2370 pid = ftrace_swapper_pid;
2371 if (pid == ftrace_pid_trace)
2374 pid = find_get_pid(val);
2376 if (pid == ftrace_pid_trace) {
2382 if (ftrace_pid_trace)
2383 clear_ftrace_pid_task(&ftrace_pid_trace);
2388 ftrace_pid_trace = pid;
2390 set_ftrace_pid_task(ftrace_pid_trace);
2393 /* update the function call */
2394 ftrace_update_pid_func();
2395 ftrace_startup_enable(0);
2398 mutex_unlock(&ftrace_lock);
2403 static const struct file_operations ftrace_pid_fops = {
2404 .read = ftrace_pid_read,
2405 .write = ftrace_pid_write,
2408 static __init int ftrace_init_debugfs(void)
2410 struct dentry *d_tracer;
2411 struct dentry *entry;
2413 d_tracer = tracing_init_dentry();
2417 ftrace_init_dyn_debugfs(d_tracer);
2419 entry = debugfs_create_file("set_ftrace_pid", 0644, d_tracer,
2420 NULL, &ftrace_pid_fops);
2422 pr_warning("Could not create debugfs "
2423 "'set_ftrace_pid' entry\n");
2426 fs_initcall(ftrace_init_debugfs);
2429 * ftrace_kill - kill ftrace
2431 * This function should be used by panic code. It stops ftrace
2432 * but in a not so nice way. If you need to simply kill ftrace
2433 * from a non-atomic section, use ftrace_kill.
2435 void ftrace_kill(void)
2437 ftrace_disabled = 1;
2439 clear_ftrace_function();
2443 * register_ftrace_function - register a function for profiling
2444 * @ops - ops structure that holds the function for profiling.
2446 * Register a function to be called by all functions in the
2449 * Note: @ops->func and all the functions it calls must be labeled
2450 * with "notrace", otherwise it will go into a
2453 int register_ftrace_function(struct ftrace_ops *ops)
2457 if (unlikely(ftrace_disabled))
2460 mutex_lock(&ftrace_lock);
2462 ret = __register_ftrace_function(ops);
2465 mutex_unlock(&ftrace_lock);
2470 * unregister_ftrace_function - unregister a function for profiling.
2471 * @ops - ops structure that holds the function to unregister
2473 * Unregister a function that was added to be called by ftrace profiling.
2475 int unregister_ftrace_function(struct ftrace_ops *ops)
2479 mutex_lock(&ftrace_lock);
2480 ret = __unregister_ftrace_function(ops);
2482 mutex_unlock(&ftrace_lock);
2488 ftrace_enable_sysctl(struct ctl_table *table, int write,
2489 struct file *file, void __user *buffer, size_t *lenp,
2494 if (unlikely(ftrace_disabled))
2497 mutex_lock(&ftrace_lock);
2499 ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
2501 if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
2504 last_ftrace_enabled = ftrace_enabled;
2506 if (ftrace_enabled) {
2508 ftrace_startup_sysctl();
2510 /* we are starting ftrace again */
2511 if (ftrace_list != &ftrace_list_end) {
2512 if (ftrace_list->next == &ftrace_list_end)
2513 ftrace_trace_function = ftrace_list->func;
2515 ftrace_trace_function = ftrace_list_func;
2519 /* stopping ftrace calls (just send to ftrace_stub) */
2520 ftrace_trace_function = ftrace_stub;
2522 ftrace_shutdown_sysctl();
2526 mutex_unlock(&ftrace_lock);
2530 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2532 static atomic_t ftrace_graph_active;
2533 static struct notifier_block ftrace_suspend_notifier;
2535 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
2540 /* The callbacks that hook a function */
2541 trace_func_graph_ret_t ftrace_graph_return =
2542 (trace_func_graph_ret_t)ftrace_stub;
2543 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
2545 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
2546 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
2550 unsigned long flags;
2551 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
2552 struct task_struct *g, *t;
2554 for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
2555 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
2556 * sizeof(struct ftrace_ret_stack),
2558 if (!ret_stack_list[i]) {
2566 read_lock_irqsave(&tasklist_lock, flags);
2567 do_each_thread(g, t) {
2573 if (t->ret_stack == NULL) {
2574 t->curr_ret_stack = -1;
2575 /* Make sure IRQs see the -1 first: */
2577 t->ret_stack = ret_stack_list[start++];
2578 atomic_set(&t->tracing_graph_pause, 0);
2579 atomic_set(&t->trace_overrun, 0);
2581 } while_each_thread(g, t);
2584 read_unlock_irqrestore(&tasklist_lock, flags);
2586 for (i = start; i < end; i++)
2587 kfree(ret_stack_list[i]);
2591 /* Allocate a return stack for each task */
2592 static int start_graph_tracing(void)
2594 struct ftrace_ret_stack **ret_stack_list;
2597 ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
2598 sizeof(struct ftrace_ret_stack *),
2601 if (!ret_stack_list)
2604 /* The cpu_boot init_task->ret_stack will never be freed */
2605 for_each_online_cpu(cpu)
2606 ftrace_graph_init_task(idle_task(cpu));
2609 ret = alloc_retstack_tasklist(ret_stack_list);
2610 } while (ret == -EAGAIN);
2612 kfree(ret_stack_list);
2617 * Hibernation protection.
2618 * The state of the current task is too much unstable during
2619 * suspend/restore to disk. We want to protect against that.
2622 ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
2626 case PM_HIBERNATION_PREPARE:
2627 pause_graph_tracing();
2630 case PM_POST_HIBERNATION:
2631 unpause_graph_tracing();
2637 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
2638 trace_func_graph_ent_t entryfunc)
2642 mutex_lock(&ftrace_lock);
2644 ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
2645 register_pm_notifier(&ftrace_suspend_notifier);
2647 atomic_inc(&ftrace_graph_active);
2648 ret = start_graph_tracing();
2650 atomic_dec(&ftrace_graph_active);
2654 ftrace_graph_return = retfunc;
2655 ftrace_graph_entry = entryfunc;
2657 ftrace_startup(FTRACE_START_FUNC_RET);
2660 mutex_unlock(&ftrace_lock);
2664 void unregister_ftrace_graph(void)
2666 mutex_lock(&ftrace_lock);
2668 atomic_dec(&ftrace_graph_active);
2669 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
2670 ftrace_graph_entry = ftrace_graph_entry_stub;
2671 ftrace_shutdown(FTRACE_STOP_FUNC_RET);
2672 unregister_pm_notifier(&ftrace_suspend_notifier);
2674 mutex_unlock(&ftrace_lock);
2677 /* Allocate a return stack for newly created task */
2678 void ftrace_graph_init_task(struct task_struct *t)
2680 if (atomic_read(&ftrace_graph_active)) {
2681 t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
2682 * sizeof(struct ftrace_ret_stack),
2686 t->curr_ret_stack = -1;
2687 atomic_set(&t->tracing_graph_pause, 0);
2688 atomic_set(&t->trace_overrun, 0);
2690 t->ret_stack = NULL;
2693 void ftrace_graph_exit_task(struct task_struct *t)
2695 struct ftrace_ret_stack *ret_stack = t->ret_stack;
2697 t->ret_stack = NULL;
2698 /* NULL must become visible to IRQs before we free it: */
2704 void ftrace_graph_stop(void)