2 * Infrastructure for profiling code inserted by 'gcc -pg'.
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code in the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/suspend.h>
21 #include <linux/debugfs.h>
22 #include <linux/hardirq.h>
23 #include <linux/kthread.h>
24 #include <linux/uaccess.h>
25 #include <linux/kprobes.h>
26 #include <linux/ftrace.h>
27 #include <linux/sysctl.h>
28 #include <linux/ctype.h>
29 #include <linux/list.h>
31 #include <asm/ftrace.h>
35 #define FTRACE_WARN_ON(cond) \
41 #define FTRACE_WARN_ON_ONCE(cond) \
43 if (WARN_ON_ONCE(cond)) \
47 /* ftrace_enabled is a method to turn ftrace on or off */
48 int ftrace_enabled __read_mostly;
49 static int last_ftrace_enabled;
51 /* set when tracing only a pid */
52 struct pid *ftrace_pid_trace;
53 static struct pid * const ftrace_swapper_pid = &init_struct_pid;
55 /* Quick disabling of function tracer. */
56 int function_trace_stop;
59 * ftrace_disabled is set when an anomaly is discovered.
60 * ftrace_disabled is much stronger than ftrace_enabled.
62 static int ftrace_disabled __read_mostly;
64 static DEFINE_SPINLOCK(ftrace_lock);
65 static DEFINE_MUTEX(ftrace_sysctl_lock);
66 static DEFINE_MUTEX(ftrace_start_lock);
68 static struct ftrace_ops ftrace_list_end __read_mostly =
73 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
74 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
75 ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
76 ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
78 static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
80 struct ftrace_ops *op = ftrace_list;
82 /* in case someone actually ports this to alpha! */
83 read_barrier_depends();
85 while (op != &ftrace_list_end) {
87 read_barrier_depends();
88 op->func(ip, parent_ip);
93 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
95 if (!test_tsk_trace_trace(current))
98 ftrace_pid_function(ip, parent_ip);
101 static void set_ftrace_pid_function(ftrace_func_t func)
103 /* do not set ftrace_pid_function to itself! */
104 if (func != ftrace_pid_func)
105 ftrace_pid_function = func;
109 * clear_ftrace_function - reset the ftrace function
111 * This NULLs the ftrace function and in essence stops
112 * tracing. There may be lag
114 void clear_ftrace_function(void)
116 ftrace_trace_function = ftrace_stub;
117 __ftrace_trace_function = ftrace_stub;
118 ftrace_pid_function = ftrace_stub;
121 #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
123 * For those archs that do not test ftrace_trace_stop in their
124 * mcount call site, we need to do it from C.
126 static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
128 if (function_trace_stop)
131 __ftrace_trace_function(ip, parent_ip);
135 static int __register_ftrace_function(struct ftrace_ops *ops)
137 /* should not be called from interrupt context */
138 spin_lock(&ftrace_lock);
140 ops->next = ftrace_list;
142 * We are entering ops into the ftrace_list but another
143 * CPU might be walking that list. We need to make sure
144 * the ops->next pointer is valid before another CPU sees
145 * the ops pointer included into the ftrace_list.
150 if (ftrace_enabled) {
153 if (ops->next == &ftrace_list_end)
156 func = ftrace_list_func;
158 if (ftrace_pid_trace) {
159 set_ftrace_pid_function(func);
160 func = ftrace_pid_func;
164 * For one func, simply call it directly.
165 * For more than one func, call the chain.
167 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
168 ftrace_trace_function = func;
170 __ftrace_trace_function = func;
171 ftrace_trace_function = ftrace_test_stop_func;
175 spin_unlock(&ftrace_lock);
180 static int __unregister_ftrace_function(struct ftrace_ops *ops)
182 struct ftrace_ops **p;
185 /* should not be called from interrupt context */
186 spin_lock(&ftrace_lock);
189 * If we are removing the last function, then simply point
190 * to the ftrace_stub.
192 if (ftrace_list == ops && ops->next == &ftrace_list_end) {
193 ftrace_trace_function = ftrace_stub;
194 ftrace_list = &ftrace_list_end;
198 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
209 if (ftrace_enabled) {
210 /* If we only have one func left, then call that directly */
211 if (ftrace_list->next == &ftrace_list_end) {
212 ftrace_func_t func = ftrace_list->func;
214 if (ftrace_pid_trace) {
215 set_ftrace_pid_function(func);
216 func = ftrace_pid_func;
218 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
219 ftrace_trace_function = func;
221 __ftrace_trace_function = func;
227 spin_unlock(&ftrace_lock);
232 static void ftrace_update_pid_func(void)
236 /* should not be called from interrupt context */
237 spin_lock(&ftrace_lock);
239 if (ftrace_trace_function == ftrace_stub)
242 func = ftrace_trace_function;
244 if (ftrace_pid_trace) {
245 set_ftrace_pid_function(func);
246 func = ftrace_pid_func;
248 if (func == ftrace_pid_func)
249 func = ftrace_pid_function;
252 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
253 ftrace_trace_function = func;
255 __ftrace_trace_function = func;
259 spin_unlock(&ftrace_lock);
262 #ifdef CONFIG_DYNAMIC_FTRACE
263 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
264 # error Dynamic ftrace depends on MCOUNT_RECORD
268 FTRACE_ENABLE_CALLS = (1 << 0),
269 FTRACE_DISABLE_CALLS = (1 << 1),
270 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
271 FTRACE_ENABLE_MCOUNT = (1 << 3),
272 FTRACE_DISABLE_MCOUNT = (1 << 4),
273 FTRACE_START_FUNC_RET = (1 << 5),
274 FTRACE_STOP_FUNC_RET = (1 << 6),
277 static int ftrace_filtered;
279 static LIST_HEAD(ftrace_new_addrs);
281 static DEFINE_MUTEX(ftrace_regex_lock);
284 struct ftrace_page *next;
286 struct dyn_ftrace records[];
289 #define ENTRIES_PER_PAGE \
290 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
292 /* estimate from running different kernels */
293 #define NR_TO_INIT 10000
295 static struct ftrace_page *ftrace_pages_start;
296 static struct ftrace_page *ftrace_pages;
298 static struct dyn_ftrace *ftrace_free_records;
301 #ifdef CONFIG_KPROBES
303 static int frozen_record_count;
305 static inline void freeze_record(struct dyn_ftrace *rec)
307 if (!(rec->flags & FTRACE_FL_FROZEN)) {
308 rec->flags |= FTRACE_FL_FROZEN;
309 frozen_record_count++;
313 static inline void unfreeze_record(struct dyn_ftrace *rec)
315 if (rec->flags & FTRACE_FL_FROZEN) {
316 rec->flags &= ~FTRACE_FL_FROZEN;
317 frozen_record_count--;
321 static inline int record_frozen(struct dyn_ftrace *rec)
323 return rec->flags & FTRACE_FL_FROZEN;
326 # define freeze_record(rec) ({ 0; })
327 # define unfreeze_record(rec) ({ 0; })
328 # define record_frozen(rec) ({ 0; })
329 #endif /* CONFIG_KPROBES */
331 static void ftrace_free_rec(struct dyn_ftrace *rec)
333 rec->ip = (unsigned long)ftrace_free_records;
334 ftrace_free_records = rec;
335 rec->flags |= FTRACE_FL_FREE;
338 void ftrace_release(void *start, unsigned long size)
340 struct dyn_ftrace *rec;
341 struct ftrace_page *pg;
342 unsigned long s = (unsigned long)start;
343 unsigned long e = s + size;
346 if (ftrace_disabled || !start)
349 /* should not be called from interrupt context */
350 spin_lock(&ftrace_lock);
352 for (pg = ftrace_pages_start; pg; pg = pg->next) {
353 for (i = 0; i < pg->index; i++) {
354 rec = &pg->records[i];
356 if ((rec->ip >= s) && (rec->ip < e))
357 ftrace_free_rec(rec);
360 spin_unlock(&ftrace_lock);
363 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
365 struct dyn_ftrace *rec;
367 /* First check for freed records */
368 if (ftrace_free_records) {
369 rec = ftrace_free_records;
371 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
372 FTRACE_WARN_ON_ONCE(1);
373 ftrace_free_records = NULL;
377 ftrace_free_records = (void *)rec->ip;
378 memset(rec, 0, sizeof(*rec));
382 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
383 if (!ftrace_pages->next) {
384 /* allocate another page */
386 (void *)get_zeroed_page(GFP_KERNEL);
387 if (!ftrace_pages->next)
390 ftrace_pages = ftrace_pages->next;
393 return &ftrace_pages->records[ftrace_pages->index++];
396 static struct dyn_ftrace *
397 ftrace_record_ip(unsigned long ip)
399 struct dyn_ftrace *rec;
404 rec = ftrace_alloc_dyn_node(ip);
410 list_add(&rec->list, &ftrace_new_addrs);
415 static void print_ip_ins(const char *fmt, unsigned char *p)
419 printk(KERN_CONT "%s", fmt);
421 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
422 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
425 static void ftrace_bug(int failed, unsigned long ip)
429 FTRACE_WARN_ON_ONCE(1);
430 pr_info("ftrace faulted on modifying ");
434 FTRACE_WARN_ON_ONCE(1);
435 pr_info("ftrace failed to modify ");
437 print_ip_ins(" actual: ", (unsigned char *)ip);
438 printk(KERN_CONT "\n");
441 FTRACE_WARN_ON_ONCE(1);
442 pr_info("ftrace faulted on writing ");
446 FTRACE_WARN_ON_ONCE(1);
447 pr_info("ftrace faulted on unknown error ");
454 __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
456 unsigned long ip, fl;
457 unsigned long ftrace_addr;
459 ftrace_addr = (unsigned long)FTRACE_ADDR;
464 * If this record is not to be traced and
465 * it is not enabled then do nothing.
467 * If this record is not to be traced and
468 * it is enabled then disable it.
471 if (rec->flags & FTRACE_FL_NOTRACE) {
472 if (rec->flags & FTRACE_FL_ENABLED)
473 rec->flags &= ~FTRACE_FL_ENABLED;
477 } else if (ftrace_filtered && enable) {
482 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
484 /* Record is filtered and enabled, do nothing */
485 if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED))
488 /* Record is not filtered or enabled, do nothing */
492 /* Record is not filtered but enabled, disable it */
493 if (fl == FTRACE_FL_ENABLED)
494 rec->flags &= ~FTRACE_FL_ENABLED;
496 /* Otherwise record is filtered but not enabled, enable it */
497 rec->flags |= FTRACE_FL_ENABLED;
499 /* Disable or not filtered */
502 /* if record is enabled, do nothing */
503 if (rec->flags & FTRACE_FL_ENABLED)
506 rec->flags |= FTRACE_FL_ENABLED;
510 /* if record is not enabled, do nothing */
511 if (!(rec->flags & FTRACE_FL_ENABLED))
514 rec->flags &= ~FTRACE_FL_ENABLED;
518 if (rec->flags & FTRACE_FL_ENABLED)
519 return ftrace_make_call(rec, ftrace_addr);
521 return ftrace_make_nop(NULL, rec, ftrace_addr);
524 static void ftrace_replace_code(int enable)
527 struct dyn_ftrace *rec;
528 struct ftrace_page *pg;
530 for (pg = ftrace_pages_start; pg; pg = pg->next) {
531 for (i = 0; i < pg->index; i++) {
532 rec = &pg->records[i];
535 * Skip over free records and records that have
538 if (rec->flags & FTRACE_FL_FREE ||
539 rec->flags & FTRACE_FL_FAILED)
542 /* ignore updates to this record's mcount site */
543 if (get_kprobe((void *)rec->ip)) {
547 unfreeze_record(rec);
550 failed = __ftrace_replace_code(rec, enable);
551 if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
552 rec->flags |= FTRACE_FL_FAILED;
553 if ((system_state == SYSTEM_BOOTING) ||
554 !core_kernel_text(rec->ip)) {
555 ftrace_free_rec(rec);
557 ftrace_bug(failed, rec->ip);
564 ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
571 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
574 rec->flags |= FTRACE_FL_FAILED;
580 static int __ftrace_modify_code(void *data)
584 if (*command & FTRACE_ENABLE_CALLS)
585 ftrace_replace_code(1);
586 else if (*command & FTRACE_DISABLE_CALLS)
587 ftrace_replace_code(0);
589 if (*command & FTRACE_UPDATE_TRACE_FUNC)
590 ftrace_update_ftrace_func(ftrace_trace_function);
592 if (*command & FTRACE_START_FUNC_RET)
593 ftrace_enable_ftrace_graph_caller();
594 else if (*command & FTRACE_STOP_FUNC_RET)
595 ftrace_disable_ftrace_graph_caller();
600 static void ftrace_run_update_code(int command)
602 stop_machine(__ftrace_modify_code, &command, NULL);
605 static ftrace_func_t saved_ftrace_func;
606 static int ftrace_start_up;
608 static void ftrace_startup_enable(int command)
610 if (saved_ftrace_func != ftrace_trace_function) {
611 saved_ftrace_func = ftrace_trace_function;
612 command |= FTRACE_UPDATE_TRACE_FUNC;
615 if (!command || !ftrace_enabled)
618 ftrace_run_update_code(command);
621 static void ftrace_startup(int command)
623 if (unlikely(ftrace_disabled))
626 mutex_lock(&ftrace_start_lock);
628 command |= FTRACE_ENABLE_CALLS;
630 ftrace_startup_enable(command);
632 mutex_unlock(&ftrace_start_lock);
635 static void ftrace_shutdown(int command)
637 if (unlikely(ftrace_disabled))
640 mutex_lock(&ftrace_start_lock);
642 if (!ftrace_start_up)
643 command |= FTRACE_DISABLE_CALLS;
645 if (saved_ftrace_func != ftrace_trace_function) {
646 saved_ftrace_func = ftrace_trace_function;
647 command |= FTRACE_UPDATE_TRACE_FUNC;
650 if (!command || !ftrace_enabled)
653 ftrace_run_update_code(command);
655 mutex_unlock(&ftrace_start_lock);
658 static void ftrace_startup_sysctl(void)
660 int command = FTRACE_ENABLE_MCOUNT;
662 if (unlikely(ftrace_disabled))
665 mutex_lock(&ftrace_start_lock);
666 /* Force update next time */
667 saved_ftrace_func = NULL;
668 /* ftrace_start_up is true if we want ftrace running */
670 command |= FTRACE_ENABLE_CALLS;
672 ftrace_run_update_code(command);
673 mutex_unlock(&ftrace_start_lock);
676 static void ftrace_shutdown_sysctl(void)
678 int command = FTRACE_DISABLE_MCOUNT;
680 if (unlikely(ftrace_disabled))
683 mutex_lock(&ftrace_start_lock);
684 /* ftrace_start_up is true if ftrace is running */
686 command |= FTRACE_DISABLE_CALLS;
688 ftrace_run_update_code(command);
689 mutex_unlock(&ftrace_start_lock);
692 static cycle_t ftrace_update_time;
693 static unsigned long ftrace_update_cnt;
694 unsigned long ftrace_update_tot_cnt;
696 static int ftrace_update_code(struct module *mod)
698 struct dyn_ftrace *p, *t;
701 start = ftrace_now(raw_smp_processor_id());
702 ftrace_update_cnt = 0;
704 list_for_each_entry_safe(p, t, &ftrace_new_addrs, list) {
706 /* If something went wrong, bail without enabling anything */
707 if (unlikely(ftrace_disabled))
710 list_del_init(&p->list);
712 /* convert record (i.e, patch mcount-call with NOP) */
713 if (ftrace_code_disable(mod, p)) {
714 p->flags |= FTRACE_FL_CONVERTED;
720 stop = ftrace_now(raw_smp_processor_id());
721 ftrace_update_time = stop - start;
722 ftrace_update_tot_cnt += ftrace_update_cnt;
727 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
729 struct ftrace_page *pg;
733 /* allocate a few pages */
734 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
735 if (!ftrace_pages_start)
739 * Allocate a few more pages.
741 * TODO: have some parser search vmlinux before
742 * final linking to find all calls to ftrace.
744 * a) know how many pages to allocate.
746 * b) set up the table then.
748 * The dynamic code is still necessary for
752 pg = ftrace_pages = ftrace_pages_start;
754 cnt = num_to_init / ENTRIES_PER_PAGE;
755 pr_info("ftrace: allocating %ld entries in %d pages\n",
756 num_to_init, cnt + 1);
758 for (i = 0; i < cnt; i++) {
759 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
761 /* If we fail, we'll try later anyway */
772 FTRACE_ITER_FILTER = (1 << 0),
773 FTRACE_ITER_CONT = (1 << 1),
774 FTRACE_ITER_NOTRACE = (1 << 2),
775 FTRACE_ITER_FAILURES = (1 << 3),
778 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
780 struct ftrace_iterator {
781 struct ftrace_page *pg;
784 unsigned char buffer[FTRACE_BUFF_MAX+1];
790 t_next(struct seq_file *m, void *v, loff_t *pos)
792 struct ftrace_iterator *iter = m->private;
793 struct dyn_ftrace *rec = NULL;
797 /* should not be called from interrupt context */
798 spin_lock(&ftrace_lock);
800 if (iter->idx >= iter->pg->index) {
801 if (iter->pg->next) {
802 iter->pg = iter->pg->next;
809 rec = &iter->pg->records[iter->idx++];
810 if ((rec->flags & FTRACE_FL_FREE) ||
812 (!(iter->flags & FTRACE_ITER_FAILURES) &&
813 (rec->flags & FTRACE_FL_FAILED)) ||
815 ((iter->flags & FTRACE_ITER_FAILURES) &&
816 !(rec->flags & FTRACE_FL_FAILED)) ||
818 ((iter->flags & FTRACE_ITER_FILTER) &&
819 !(rec->flags & FTRACE_FL_FILTER)) ||
821 ((iter->flags & FTRACE_ITER_NOTRACE) &&
822 !(rec->flags & FTRACE_FL_NOTRACE))) {
827 spin_unlock(&ftrace_lock);
832 static void *t_start(struct seq_file *m, loff_t *pos)
834 struct ftrace_iterator *iter = m->private;
844 p = t_next(m, p, pos);
849 static void t_stop(struct seq_file *m, void *p)
853 static int t_show(struct seq_file *m, void *v)
855 struct dyn_ftrace *rec = v;
856 char str[KSYM_SYMBOL_LEN];
861 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
863 seq_printf(m, "%s\n", str);
868 static struct seq_operations show_ftrace_seq_ops = {
876 ftrace_avail_open(struct inode *inode, struct file *file)
878 struct ftrace_iterator *iter;
881 if (unlikely(ftrace_disabled))
884 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
888 iter->pg = ftrace_pages_start;
890 ret = seq_open(file, &show_ftrace_seq_ops);
892 struct seq_file *m = file->private_data;
902 int ftrace_avail_release(struct inode *inode, struct file *file)
904 struct seq_file *m = (struct seq_file *)file->private_data;
905 struct ftrace_iterator *iter = m->private;
907 seq_release(inode, file);
914 ftrace_failures_open(struct inode *inode, struct file *file)
918 struct ftrace_iterator *iter;
920 ret = ftrace_avail_open(inode, file);
922 m = (struct seq_file *)file->private_data;
923 iter = (struct ftrace_iterator *)m->private;
924 iter->flags = FTRACE_ITER_FAILURES;
931 static void ftrace_filter_reset(int enable)
933 struct ftrace_page *pg;
934 struct dyn_ftrace *rec;
935 unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
938 /* should not be called from interrupt context */
939 spin_lock(&ftrace_lock);
942 pg = ftrace_pages_start;
944 for (i = 0; i < pg->index; i++) {
945 rec = &pg->records[i];
946 if (rec->flags & FTRACE_FL_FAILED)
952 spin_unlock(&ftrace_lock);
956 ftrace_regex_open(struct inode *inode, struct file *file, int enable)
958 struct ftrace_iterator *iter;
961 if (unlikely(ftrace_disabled))
964 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
968 mutex_lock(&ftrace_regex_lock);
969 if ((file->f_mode & FMODE_WRITE) &&
970 !(file->f_flags & O_APPEND))
971 ftrace_filter_reset(enable);
973 if (file->f_mode & FMODE_READ) {
974 iter->pg = ftrace_pages_start;
975 iter->flags = enable ? FTRACE_ITER_FILTER :
978 ret = seq_open(file, &show_ftrace_seq_ops);
980 struct seq_file *m = file->private_data;
985 file->private_data = iter;
986 mutex_unlock(&ftrace_regex_lock);
992 ftrace_filter_open(struct inode *inode, struct file *file)
994 return ftrace_regex_open(inode, file, 1);
998 ftrace_notrace_open(struct inode *inode, struct file *file)
1000 return ftrace_regex_open(inode, file, 0);
1004 ftrace_regex_read(struct file *file, char __user *ubuf,
1005 size_t cnt, loff_t *ppos)
1007 if (file->f_mode & FMODE_READ)
1008 return seq_read(file, ubuf, cnt, ppos);
1014 ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
1018 if (file->f_mode & FMODE_READ)
1019 ret = seq_lseek(file, offset, origin);
1021 file->f_pos = ret = 1;
1034 ftrace_match(unsigned char *buff, int len, int enable)
1036 char str[KSYM_SYMBOL_LEN];
1037 char *search = NULL;
1038 struct ftrace_page *pg;
1039 struct dyn_ftrace *rec;
1040 int type = MATCH_FULL;
1041 unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1042 unsigned i, match = 0, search_len = 0;
1045 if (buff[0] == '!') {
1051 for (i = 0; i < len; i++) {
1052 if (buff[i] == '*') {
1054 search = buff + i + 1;
1055 type = MATCH_END_ONLY;
1056 search_len = len - (i + 1);
1058 if (type == MATCH_END_ONLY) {
1059 type = MATCH_MIDDLE_ONLY;
1062 type = MATCH_FRONT_ONLY;
1070 /* should not be called from interrupt context */
1071 spin_lock(&ftrace_lock);
1073 ftrace_filtered = 1;
1074 pg = ftrace_pages_start;
1076 for (i = 0; i < pg->index; i++) {
1080 rec = &pg->records[i];
1081 if (rec->flags & FTRACE_FL_FAILED)
1083 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1086 if (strcmp(str, buff) == 0)
1089 case MATCH_FRONT_ONLY:
1090 if (memcmp(str, buff, match) == 0)
1093 case MATCH_MIDDLE_ONLY:
1094 if (strstr(str, search))
1097 case MATCH_END_ONLY:
1098 ptr = strstr(str, search);
1099 if (ptr && (ptr[search_len] == 0))
1105 rec->flags &= ~flag;
1112 spin_unlock(&ftrace_lock);
1116 ftrace_regex_write(struct file *file, const char __user *ubuf,
1117 size_t cnt, loff_t *ppos, int enable)
1119 struct ftrace_iterator *iter;
1124 if (!cnt || cnt < 0)
1127 mutex_lock(&ftrace_regex_lock);
1129 if (file->f_mode & FMODE_READ) {
1130 struct seq_file *m = file->private_data;
1133 iter = file->private_data;
1136 iter->flags &= ~FTRACE_ITER_CONT;
1137 iter->buffer_idx = 0;
1140 ret = get_user(ch, ubuf++);
1146 if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1147 /* skip white space */
1148 while (cnt && isspace(ch)) {
1149 ret = get_user(ch, ubuf++);
1157 file->f_pos += read;
1162 iter->buffer_idx = 0;
1165 while (cnt && !isspace(ch)) {
1166 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1167 iter->buffer[iter->buffer_idx++] = ch;
1172 ret = get_user(ch, ubuf++);
1181 iter->buffer[iter->buffer_idx] = 0;
1182 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1183 iter->buffer_idx = 0;
1185 iter->flags |= FTRACE_ITER_CONT;
1188 file->f_pos += read;
1192 mutex_unlock(&ftrace_regex_lock);
1198 ftrace_filter_write(struct file *file, const char __user *ubuf,
1199 size_t cnt, loff_t *ppos)
1201 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
1205 ftrace_notrace_write(struct file *file, const char __user *ubuf,
1206 size_t cnt, loff_t *ppos)
1208 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
1212 ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
1214 if (unlikely(ftrace_disabled))
1217 mutex_lock(&ftrace_regex_lock);
1219 ftrace_filter_reset(enable);
1221 ftrace_match(buf, len, enable);
1222 mutex_unlock(&ftrace_regex_lock);
1226 * ftrace_set_filter - set a function to filter on in ftrace
1227 * @buf - the string that holds the function filter text.
1228 * @len - the length of the string.
1229 * @reset - non zero to reset all filters before applying this filter.
1231 * Filters denote which functions should be enabled when tracing is enabled.
1232 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1234 void ftrace_set_filter(unsigned char *buf, int len, int reset)
1236 ftrace_set_regex(buf, len, reset, 1);
1240 * ftrace_set_notrace - set a function to not trace in ftrace
1241 * @buf - the string that holds the function notrace text.
1242 * @len - the length of the string.
1243 * @reset - non zero to reset all filters before applying this filter.
1245 * Notrace Filters denote which functions should not be enabled when tracing
1246 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1249 void ftrace_set_notrace(unsigned char *buf, int len, int reset)
1251 ftrace_set_regex(buf, len, reset, 0);
1255 ftrace_regex_release(struct inode *inode, struct file *file, int enable)
1257 struct seq_file *m = (struct seq_file *)file->private_data;
1258 struct ftrace_iterator *iter;
1260 mutex_lock(&ftrace_regex_lock);
1261 if (file->f_mode & FMODE_READ) {
1264 seq_release(inode, file);
1266 iter = file->private_data;
1268 if (iter->buffer_idx) {
1270 iter->buffer[iter->buffer_idx] = 0;
1271 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1274 mutex_lock(&ftrace_sysctl_lock);
1275 mutex_lock(&ftrace_start_lock);
1276 if (ftrace_start_up && ftrace_enabled)
1277 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1278 mutex_unlock(&ftrace_start_lock);
1279 mutex_unlock(&ftrace_sysctl_lock);
1282 mutex_unlock(&ftrace_regex_lock);
1287 ftrace_filter_release(struct inode *inode, struct file *file)
1289 return ftrace_regex_release(inode, file, 1);
1293 ftrace_notrace_release(struct inode *inode, struct file *file)
1295 return ftrace_regex_release(inode, file, 0);
1298 static struct file_operations ftrace_avail_fops = {
1299 .open = ftrace_avail_open,
1301 .llseek = seq_lseek,
1302 .release = ftrace_avail_release,
1305 static struct file_operations ftrace_failures_fops = {
1306 .open = ftrace_failures_open,
1308 .llseek = seq_lseek,
1309 .release = ftrace_avail_release,
1312 static struct file_operations ftrace_filter_fops = {
1313 .open = ftrace_filter_open,
1314 .read = ftrace_regex_read,
1315 .write = ftrace_filter_write,
1316 .llseek = ftrace_regex_lseek,
1317 .release = ftrace_filter_release,
1320 static struct file_operations ftrace_notrace_fops = {
1321 .open = ftrace_notrace_open,
1322 .read = ftrace_regex_read,
1323 .write = ftrace_notrace_write,
1324 .llseek = ftrace_regex_lseek,
1325 .release = ftrace_notrace_release,
1328 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1330 static DEFINE_MUTEX(graph_lock);
1332 int ftrace_graph_count;
1333 unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
1336 g_next(struct seq_file *m, void *v, loff_t *pos)
1338 unsigned long *array = m->private;
1343 if (index >= ftrace_graph_count)
1346 return &array[index];
1349 static void *g_start(struct seq_file *m, loff_t *pos)
1353 mutex_lock(&graph_lock);
1355 p = g_next(m, p, pos);
1360 static void g_stop(struct seq_file *m, void *p)
1362 mutex_unlock(&graph_lock);
1365 static int g_show(struct seq_file *m, void *v)
1367 unsigned long *ptr = v;
1368 char str[KSYM_SYMBOL_LEN];
1373 kallsyms_lookup(*ptr, NULL, NULL, NULL, str);
1375 seq_printf(m, "%s\n", str);
1380 static struct seq_operations ftrace_graph_seq_ops = {
1388 ftrace_graph_open(struct inode *inode, struct file *file)
1392 if (unlikely(ftrace_disabled))
1395 mutex_lock(&graph_lock);
1396 if ((file->f_mode & FMODE_WRITE) &&
1397 !(file->f_flags & O_APPEND)) {
1398 ftrace_graph_count = 0;
1399 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
1402 if (file->f_mode & FMODE_READ) {
1403 ret = seq_open(file, &ftrace_graph_seq_ops);
1405 struct seq_file *m = file->private_data;
1406 m->private = ftrace_graph_funcs;
1409 file->private_data = ftrace_graph_funcs;
1410 mutex_unlock(&graph_lock);
1416 ftrace_graph_read(struct file *file, char __user *ubuf,
1417 size_t cnt, loff_t *ppos)
1419 if (file->f_mode & FMODE_READ)
1420 return seq_read(file, ubuf, cnt, ppos);
1426 ftrace_set_func(unsigned long *array, int idx, char *buffer)
1428 char str[KSYM_SYMBOL_LEN];
1429 struct dyn_ftrace *rec;
1430 struct ftrace_page *pg;
1434 if (ftrace_disabled)
1437 /* should not be called from interrupt context */
1438 spin_lock(&ftrace_lock);
1440 for (pg = ftrace_pages_start; pg; pg = pg->next) {
1441 for (i = 0; i < pg->index; i++) {
1442 rec = &pg->records[i];
1444 if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE))
1447 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1448 if (strcmp(str, buffer) == 0) {
1450 for (j = 0; j < idx; j++)
1451 if (array[j] == rec->ip) {
1456 array[idx] = rec->ip;
1461 spin_unlock(&ftrace_lock);
1463 return found ? 0 : -EINVAL;
1467 ftrace_graph_write(struct file *file, const char __user *ubuf,
1468 size_t cnt, loff_t *ppos)
1470 unsigned char buffer[FTRACE_BUFF_MAX+1];
1471 unsigned long *array;
1477 if (!cnt || cnt < 0)
1480 mutex_lock(&graph_lock);
1482 if (ftrace_graph_count >= FTRACE_GRAPH_MAX_FUNCS) {
1487 if (file->f_mode & FMODE_READ) {
1488 struct seq_file *m = file->private_data;
1491 array = file->private_data;
1493 ret = get_user(ch, ubuf++);
1499 /* skip white space */
1500 while (cnt && isspace(ch)) {
1501 ret = get_user(ch, ubuf++);
1514 while (cnt && !isspace(ch)) {
1515 if (index < FTRACE_BUFF_MAX)
1516 buffer[index++] = ch;
1521 ret = get_user(ch, ubuf++);
1529 /* we allow only one at a time */
1530 ret = ftrace_set_func(array, ftrace_graph_count, buffer);
1534 ftrace_graph_count++;
1536 file->f_pos += read;
1540 mutex_unlock(&graph_lock);
1545 static const struct file_operations ftrace_graph_fops = {
1546 .open = ftrace_graph_open,
1547 .read = ftrace_graph_read,
1548 .write = ftrace_graph_write,
1550 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1552 static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
1554 struct dentry *entry;
1556 entry = debugfs_create_file("available_filter_functions", 0444,
1557 d_tracer, NULL, &ftrace_avail_fops);
1559 pr_warning("Could not create debugfs "
1560 "'available_filter_functions' entry\n");
1562 entry = debugfs_create_file("failures", 0444,
1563 d_tracer, NULL, &ftrace_failures_fops);
1565 pr_warning("Could not create debugfs 'failures' entry\n");
1567 entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1568 NULL, &ftrace_filter_fops);
1570 pr_warning("Could not create debugfs "
1571 "'set_ftrace_filter' entry\n");
1573 entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
1574 NULL, &ftrace_notrace_fops);
1576 pr_warning("Could not create debugfs "
1577 "'set_ftrace_notrace' entry\n");
1579 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1580 entry = debugfs_create_file("set_graph_function", 0444, d_tracer,
1582 &ftrace_graph_fops);
1584 pr_warning("Could not create debugfs "
1585 "'set_graph_function' entry\n");
1586 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1591 static int ftrace_convert_nops(struct module *mod,
1592 unsigned long *start,
1597 unsigned long flags;
1599 mutex_lock(&ftrace_start_lock);
1602 addr = ftrace_call_adjust(*p++);
1604 * Some architecture linkers will pad between
1605 * the different mcount_loc sections of different
1606 * object files to satisfy alignments.
1607 * Skip any NULL pointers.
1611 ftrace_record_ip(addr);
1614 /* disable interrupts to prevent kstop machine */
1615 local_irq_save(flags);
1616 ftrace_update_code(mod);
1617 local_irq_restore(flags);
1618 mutex_unlock(&ftrace_start_lock);
1623 void ftrace_init_module(struct module *mod,
1624 unsigned long *start, unsigned long *end)
1626 if (ftrace_disabled || start == end)
1628 ftrace_convert_nops(mod, start, end);
1631 extern unsigned long __start_mcount_loc[];
1632 extern unsigned long __stop_mcount_loc[];
1634 void __init ftrace_init(void)
1636 unsigned long count, addr, flags;
1639 /* Keep the ftrace pointer to the stub */
1640 addr = (unsigned long)ftrace_stub;
1642 local_irq_save(flags);
1643 ftrace_dyn_arch_init(&addr);
1644 local_irq_restore(flags);
1646 /* ftrace_dyn_arch_init places the return code in addr */
1650 count = __stop_mcount_loc - __start_mcount_loc;
1652 ret = ftrace_dyn_table_alloc(count);
1656 last_ftrace_enabled = ftrace_enabled = 1;
1658 ret = ftrace_convert_nops(NULL,
1664 ftrace_disabled = 1;
1669 static int __init ftrace_nodyn_init(void)
1674 device_initcall(ftrace_nodyn_init);
1676 static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
1677 static inline void ftrace_startup_enable(int command) { }
1678 /* Keep as macros so we do not need to define the commands */
1679 # define ftrace_startup(command) do { } while (0)
1680 # define ftrace_shutdown(command) do { } while (0)
1681 # define ftrace_startup_sysctl() do { } while (0)
1682 # define ftrace_shutdown_sysctl() do { } while (0)
1683 #endif /* CONFIG_DYNAMIC_FTRACE */
1686 ftrace_pid_read(struct file *file, char __user *ubuf,
1687 size_t cnt, loff_t *ppos)
1692 if (ftrace_pid_trace == ftrace_swapper_pid)
1693 r = sprintf(buf, "swapper tasks\n");
1694 else if (ftrace_pid_trace)
1695 r = sprintf(buf, "%u\n", pid_nr(ftrace_pid_trace));
1697 r = sprintf(buf, "no pid\n");
1699 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1702 static void clear_ftrace_swapper(void)
1704 struct task_struct *p;
1708 for_each_online_cpu(cpu) {
1710 clear_tsk_trace_trace(p);
1715 static void set_ftrace_swapper(void)
1717 struct task_struct *p;
1721 for_each_online_cpu(cpu) {
1723 set_tsk_trace_trace(p);
1728 static void clear_ftrace_pid(struct pid *pid)
1730 struct task_struct *p;
1733 do_each_pid_task(pid, PIDTYPE_PID, p) {
1734 clear_tsk_trace_trace(p);
1735 } while_each_pid_task(pid, PIDTYPE_PID, p);
1741 static void set_ftrace_pid(struct pid *pid)
1743 struct task_struct *p;
1746 do_each_pid_task(pid, PIDTYPE_PID, p) {
1747 set_tsk_trace_trace(p);
1748 } while_each_pid_task(pid, PIDTYPE_PID, p);
1752 static void clear_ftrace_pid_task(struct pid **pid)
1754 if (*pid == ftrace_swapper_pid)
1755 clear_ftrace_swapper();
1757 clear_ftrace_pid(*pid);
1762 static void set_ftrace_pid_task(struct pid *pid)
1764 if (pid == ftrace_swapper_pid)
1765 set_ftrace_swapper();
1767 set_ftrace_pid(pid);
1771 ftrace_pid_write(struct file *filp, const char __user *ubuf,
1772 size_t cnt, loff_t *ppos)
1779 if (cnt >= sizeof(buf))
1782 if (copy_from_user(&buf, ubuf, cnt))
1787 ret = strict_strtol(buf, 10, &val);
1791 mutex_lock(&ftrace_start_lock);
1793 /* disable pid tracing */
1794 if (!ftrace_pid_trace)
1797 clear_ftrace_pid_task(&ftrace_pid_trace);
1800 /* swapper task is special */
1802 pid = ftrace_swapper_pid;
1803 if (pid == ftrace_pid_trace)
1806 pid = find_get_pid(val);
1808 if (pid == ftrace_pid_trace) {
1814 if (ftrace_pid_trace)
1815 clear_ftrace_pid_task(&ftrace_pid_trace);
1820 ftrace_pid_trace = pid;
1822 set_ftrace_pid_task(ftrace_pid_trace);
1825 /* update the function call */
1826 ftrace_update_pid_func();
1827 ftrace_startup_enable(0);
1830 mutex_unlock(&ftrace_start_lock);
1835 static struct file_operations ftrace_pid_fops = {
1836 .read = ftrace_pid_read,
1837 .write = ftrace_pid_write,
1840 static __init int ftrace_init_debugfs(void)
1842 struct dentry *d_tracer;
1843 struct dentry *entry;
1845 d_tracer = tracing_init_dentry();
1849 ftrace_init_dyn_debugfs(d_tracer);
1851 entry = debugfs_create_file("set_ftrace_pid", 0644, d_tracer,
1852 NULL, &ftrace_pid_fops);
1854 pr_warning("Could not create debugfs "
1855 "'set_ftrace_pid' entry\n");
1859 fs_initcall(ftrace_init_debugfs);
1862 * ftrace_kill - kill ftrace
1864 * This function should be used by panic code. It stops ftrace
1865 * but in a not so nice way. If you need to simply kill ftrace
1866 * from a non-atomic section, use ftrace_kill.
1868 void ftrace_kill(void)
1870 ftrace_disabled = 1;
1872 clear_ftrace_function();
1876 * register_ftrace_function - register a function for profiling
1877 * @ops - ops structure that holds the function for profiling.
1879 * Register a function to be called by all functions in the
1882 * Note: @ops->func and all the functions it calls must be labeled
1883 * with "notrace", otherwise it will go into a
1886 int register_ftrace_function(struct ftrace_ops *ops)
1890 if (unlikely(ftrace_disabled))
1893 mutex_lock(&ftrace_sysctl_lock);
1895 ret = __register_ftrace_function(ops);
1898 mutex_unlock(&ftrace_sysctl_lock);
1903 * unregister_ftrace_function - unregister a function for profiling.
1904 * @ops - ops structure that holds the function to unregister
1906 * Unregister a function that was added to be called by ftrace profiling.
1908 int unregister_ftrace_function(struct ftrace_ops *ops)
1912 mutex_lock(&ftrace_sysctl_lock);
1913 ret = __unregister_ftrace_function(ops);
1915 mutex_unlock(&ftrace_sysctl_lock);
1921 ftrace_enable_sysctl(struct ctl_table *table, int write,
1922 struct file *file, void __user *buffer, size_t *lenp,
1927 if (unlikely(ftrace_disabled))
1930 mutex_lock(&ftrace_sysctl_lock);
1932 ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
1934 if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
1937 last_ftrace_enabled = ftrace_enabled;
1939 if (ftrace_enabled) {
1941 ftrace_startup_sysctl();
1943 /* we are starting ftrace again */
1944 if (ftrace_list != &ftrace_list_end) {
1945 if (ftrace_list->next == &ftrace_list_end)
1946 ftrace_trace_function = ftrace_list->func;
1948 ftrace_trace_function = ftrace_list_func;
1952 /* stopping ftrace calls (just send to ftrace_stub) */
1953 ftrace_trace_function = ftrace_stub;
1955 ftrace_shutdown_sysctl();
1959 mutex_unlock(&ftrace_sysctl_lock);
1963 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1965 static atomic_t ftrace_graph_active;
1966 static struct notifier_block ftrace_suspend_notifier;
1968 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
1973 /* The callbacks that hook a function */
1974 trace_func_graph_ret_t ftrace_graph_return =
1975 (trace_func_graph_ret_t)ftrace_stub;
1976 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
1978 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
1979 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
1983 unsigned long flags;
1984 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
1985 struct task_struct *g, *t;
1987 for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
1988 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
1989 * sizeof(struct ftrace_ret_stack),
1991 if (!ret_stack_list[i]) {
1999 read_lock_irqsave(&tasklist_lock, flags);
2000 do_each_thread(g, t) {
2006 if (t->ret_stack == NULL) {
2007 t->curr_ret_stack = -1;
2008 /* Make sure IRQs see the -1 first: */
2010 t->ret_stack = ret_stack_list[start++];
2011 atomic_set(&t->tracing_graph_pause, 0);
2012 atomic_set(&t->trace_overrun, 0);
2014 } while_each_thread(g, t);
2017 read_unlock_irqrestore(&tasklist_lock, flags);
2019 for (i = start; i < end; i++)
2020 kfree(ret_stack_list[i]);
2024 /* Allocate a return stack for each task */
2025 static int start_graph_tracing(void)
2027 struct ftrace_ret_stack **ret_stack_list;
2030 ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
2031 sizeof(struct ftrace_ret_stack *),
2034 if (!ret_stack_list)
2038 ret = alloc_retstack_tasklist(ret_stack_list);
2039 } while (ret == -EAGAIN);
2041 kfree(ret_stack_list);
2046 * Hibernation protection.
2047 * The state of the current task is too much unstable during
2048 * suspend/restore to disk. We want to protect against that.
2051 ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
2055 case PM_HIBERNATION_PREPARE:
2056 pause_graph_tracing();
2059 case PM_POST_HIBERNATION:
2060 unpause_graph_tracing();
2066 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
2067 trace_func_graph_ent_t entryfunc)
2071 mutex_lock(&ftrace_sysctl_lock);
2073 ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
2074 register_pm_notifier(&ftrace_suspend_notifier);
2076 atomic_inc(&ftrace_graph_active);
2077 ret = start_graph_tracing();
2079 atomic_dec(&ftrace_graph_active);
2083 ftrace_graph_return = retfunc;
2084 ftrace_graph_entry = entryfunc;
2086 ftrace_startup(FTRACE_START_FUNC_RET);
2089 mutex_unlock(&ftrace_sysctl_lock);
2093 void unregister_ftrace_graph(void)
2095 mutex_lock(&ftrace_sysctl_lock);
2097 atomic_dec(&ftrace_graph_active);
2098 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
2099 ftrace_graph_entry = ftrace_graph_entry_stub;
2100 ftrace_shutdown(FTRACE_STOP_FUNC_RET);
2101 unregister_pm_notifier(&ftrace_suspend_notifier);
2103 mutex_unlock(&ftrace_sysctl_lock);
2106 /* Allocate a return stack for newly created task */
2107 void ftrace_graph_init_task(struct task_struct *t)
2109 if (atomic_read(&ftrace_graph_active)) {
2110 t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
2111 * sizeof(struct ftrace_ret_stack),
2115 t->curr_ret_stack = -1;
2116 atomic_set(&t->tracing_graph_pause, 0);
2117 atomic_set(&t->trace_overrun, 0);
2119 t->ret_stack = NULL;
2122 void ftrace_graph_exit_task(struct task_struct *t)
2124 struct ftrace_ret_stack *ret_stack = t->ret_stack;
2126 t->ret_stack = NULL;
2127 /* NULL must become visible to IRQs before we free it: */
2133 void ftrace_graph_stop(void)