2 * Infrastructure for profiling code inserted by 'gcc -pg'.
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code in the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/debugfs.h>
21 #include <linux/hardirq.h>
22 #include <linux/kthread.h>
23 #include <linux/uaccess.h>
24 #include <linux/kprobes.h>
25 #include <linux/ftrace.h>
26 #include <linux/sysctl.h>
27 #include <linux/ctype.h>
28 #include <linux/hash.h>
29 #include <linux/list.h>
31 #include <asm/ftrace.h>
35 /* ftrace_enabled is a method to turn ftrace on or off */
36 int ftrace_enabled __read_mostly;
37 static int last_ftrace_enabled;
40 * ftrace_disabled is set when an anomaly is discovered.
41 * ftrace_disabled is much stronger than ftrace_enabled.
43 static int ftrace_disabled __read_mostly;
45 static DEFINE_SPINLOCK(ftrace_lock);
46 static DEFINE_MUTEX(ftrace_sysctl_lock);
48 static struct ftrace_ops ftrace_list_end __read_mostly =
53 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
54 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
56 static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
58 struct ftrace_ops *op = ftrace_list;
60 /* in case someone actually ports this to alpha! */
61 read_barrier_depends();
63 while (op != &ftrace_list_end) {
65 read_barrier_depends();
66 op->func(ip, parent_ip);
72 * clear_ftrace_function - reset the ftrace function
74 * This NULLs the ftrace function and in essence stops
75 * tracing. There may be lag
77 void clear_ftrace_function(void)
79 ftrace_trace_function = ftrace_stub;
82 static int __register_ftrace_function(struct ftrace_ops *ops)
84 /* Should never be called by interrupts */
85 spin_lock(&ftrace_lock);
87 ops->next = ftrace_list;
89 * We are entering ops into the ftrace_list but another
90 * CPU might be walking that list. We need to make sure
91 * the ops->next pointer is valid before another CPU sees
92 * the ops pointer included into the ftrace_list.
99 * For one func, simply call it directly.
100 * For more than one func, call the chain.
102 if (ops->next == &ftrace_list_end)
103 ftrace_trace_function = ops->func;
105 ftrace_trace_function = ftrace_list_func;
108 spin_unlock(&ftrace_lock);
113 static int __unregister_ftrace_function(struct ftrace_ops *ops)
115 struct ftrace_ops **p;
118 spin_lock(&ftrace_lock);
121 * If we are removing the last function, then simply point
122 * to the ftrace_stub.
124 if (ftrace_list == ops && ops->next == &ftrace_list_end) {
125 ftrace_trace_function = ftrace_stub;
126 ftrace_list = &ftrace_list_end;
130 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
141 if (ftrace_enabled) {
142 /* If we only have one func left, then call that directly */
143 if (ftrace_list == &ftrace_list_end ||
144 ftrace_list->next == &ftrace_list_end)
145 ftrace_trace_function = ftrace_list->func;
149 spin_unlock(&ftrace_lock);
154 #ifdef CONFIG_DYNAMIC_FTRACE
156 static struct task_struct *ftraced_task;
159 FTRACE_ENABLE_CALLS = (1 << 0),
160 FTRACE_DISABLE_CALLS = (1 << 1),
161 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
162 FTRACE_ENABLE_MCOUNT = (1 << 3),
163 FTRACE_DISABLE_MCOUNT = (1 << 4),
166 static int ftrace_filtered;
167 static int tracing_on;
168 static int frozen_record_count;
170 static struct hlist_head ftrace_hash[FTRACE_HASHSIZE];
172 static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu);
174 static DEFINE_SPINLOCK(ftrace_shutdown_lock);
175 static DEFINE_MUTEX(ftraced_lock);
176 static DEFINE_MUTEX(ftrace_regex_lock);
179 struct ftrace_page *next;
181 struct dyn_ftrace records[];
184 #define ENTRIES_PER_PAGE \
185 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
187 /* estimate from running different kernels */
188 #define NR_TO_INIT 10000
190 static struct ftrace_page *ftrace_pages_start;
191 static struct ftrace_page *ftrace_pages;
193 static int ftraced_trigger;
194 static int ftraced_suspend;
195 static int ftraced_stop;
197 static int ftrace_record_suspend;
199 static struct dyn_ftrace *ftrace_free_records;
202 #ifdef CONFIG_KPROBES
203 static inline void freeze_record(struct dyn_ftrace *rec)
205 if (!(rec->flags & FTRACE_FL_FROZEN)) {
206 rec->flags |= FTRACE_FL_FROZEN;
207 frozen_record_count++;
211 static inline void unfreeze_record(struct dyn_ftrace *rec)
213 if (rec->flags & FTRACE_FL_FROZEN) {
214 rec->flags &= ~FTRACE_FL_FROZEN;
215 frozen_record_count--;
219 static inline int record_frozen(struct dyn_ftrace *rec)
221 return rec->flags & FTRACE_FL_FROZEN;
224 # define freeze_record(rec) ({ 0; })
225 # define unfreeze_record(rec) ({ 0; })
226 # define record_frozen(rec) ({ 0; })
227 #endif /* CONFIG_KPROBES */
229 int skip_trace(unsigned long ip)
232 struct dyn_ftrace *rec;
233 struct hlist_node *t;
234 struct hlist_head *head;
236 if (frozen_record_count == 0)
239 head = &ftrace_hash[hash_long(ip, FTRACE_HASHBITS)];
240 hlist_for_each_entry_rcu(rec, t, head, node) {
242 if (record_frozen(rec)) {
243 if (rec->flags & FTRACE_FL_FAILED)
246 if (!(rec->flags & FTRACE_FL_CONVERTED))
249 if (!tracing_on || !ftrace_enabled)
252 if (ftrace_filtered) {
253 fl = rec->flags & (FTRACE_FL_FILTER |
255 if (!fl || (fl & FTRACE_FL_NOTRACE))
267 ftrace_ip_in_hash(unsigned long ip, unsigned long key)
269 struct dyn_ftrace *p;
270 struct hlist_node *t;
273 hlist_for_each_entry_rcu(p, t, &ftrace_hash[key], node) {
284 ftrace_add_hash(struct dyn_ftrace *node, unsigned long key)
286 hlist_add_head_rcu(&node->node, &ftrace_hash[key]);
289 /* called from kstop_machine */
290 static inline void ftrace_del_hash(struct dyn_ftrace *node)
292 hlist_del(&node->node);
295 static void ftrace_free_rec(struct dyn_ftrace *rec)
297 /* no locking, only called from kstop_machine */
299 rec->ip = (unsigned long)ftrace_free_records;
300 ftrace_free_records = rec;
301 rec->flags |= FTRACE_FL_FREE;
304 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
306 struct dyn_ftrace *rec;
308 /* First check for freed records */
309 if (ftrace_free_records) {
310 rec = ftrace_free_records;
312 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
314 ftrace_free_records = NULL;
320 ftrace_free_records = (void *)rec->ip;
321 memset(rec, 0, sizeof(*rec));
325 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
326 if (!ftrace_pages->next)
328 ftrace_pages = ftrace_pages->next;
331 return &ftrace_pages->records[ftrace_pages->index++];
335 ftrace_record_ip(unsigned long ip)
337 struct dyn_ftrace *node;
344 if (!ftrace_enabled || ftrace_disabled)
347 resched = need_resched();
348 preempt_disable_notrace();
351 * We simply need to protect against recursion.
352 * Use the the raw version of smp_processor_id and not
353 * __get_cpu_var which can call debug hooks that can
354 * cause a recursive crash here.
356 cpu = raw_smp_processor_id();
357 per_cpu(ftrace_shutdown_disable_cpu, cpu)++;
358 if (per_cpu(ftrace_shutdown_disable_cpu, cpu) != 1)
361 if (unlikely(ftrace_record_suspend))
364 key = hash_long(ip, FTRACE_HASHBITS);
366 WARN_ON_ONCE(key >= FTRACE_HASHSIZE);
368 if (ftrace_ip_in_hash(ip, key))
371 atomic = irqs_disabled();
373 spin_lock_irqsave(&ftrace_shutdown_lock, flags);
375 /* This ip may have hit the hash before the lock */
376 if (ftrace_ip_in_hash(ip, key))
379 node = ftrace_alloc_dyn_node(ip);
385 ftrace_add_hash(node, key);
390 spin_unlock_irqrestore(&ftrace_shutdown_lock, flags);
392 per_cpu(ftrace_shutdown_disable_cpu, cpu)--;
394 /* prevent recursion with scheduler */
396 preempt_enable_no_resched_notrace();
398 preempt_enable_notrace();
401 #define FTRACE_ADDR ((long)(ftrace_caller))
404 __ftrace_replace_code(struct dyn_ftrace *rec,
405 unsigned char *old, unsigned char *new, int enable)
407 unsigned long ip, fl;
411 if (ftrace_filtered && enable) {
413 * If filtering is on:
415 * If this record is set to be filtered and
416 * is enabled then do nothing.
418 * If this record is set to be filtered and
419 * it is not enabled, enable it.
421 * If this record is not set to be filtered
422 * and it is not enabled do nothing.
424 * If this record is set not to trace then
427 * If this record is set not to trace and
428 * it is enabled then disable it.
430 * If this record is not set to be filtered and
431 * it is enabled, disable it.
434 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE |
437 if ((fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) ||
438 (fl == (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE)) ||
439 !fl || (fl == FTRACE_FL_NOTRACE))
443 * If it is enabled disable it,
444 * otherwise enable it!
446 if (fl & FTRACE_FL_ENABLED) {
447 /* swap new and old */
449 old = ftrace_call_replace(ip, FTRACE_ADDR);
450 rec->flags &= ~FTRACE_FL_ENABLED;
452 new = ftrace_call_replace(ip, FTRACE_ADDR);
453 rec->flags |= FTRACE_FL_ENABLED;
459 * If this record is set not to trace and is
460 * not enabled, do nothing.
462 fl = rec->flags & (FTRACE_FL_NOTRACE | FTRACE_FL_ENABLED);
463 if (fl == FTRACE_FL_NOTRACE)
466 new = ftrace_call_replace(ip, FTRACE_ADDR);
468 old = ftrace_call_replace(ip, FTRACE_ADDR);
471 if (rec->flags & FTRACE_FL_ENABLED)
473 rec->flags |= FTRACE_FL_ENABLED;
475 if (!(rec->flags & FTRACE_FL_ENABLED))
477 rec->flags &= ~FTRACE_FL_ENABLED;
481 return ftrace_modify_code(ip, old, new);
484 static void ftrace_replace_code(int enable)
487 unsigned char *new = NULL, *old = NULL;
488 struct dyn_ftrace *rec;
489 struct ftrace_page *pg;
492 old = ftrace_nop_replace();
494 new = ftrace_nop_replace();
496 for (pg = ftrace_pages_start; pg; pg = pg->next) {
497 for (i = 0; i < pg->index; i++) {
498 rec = &pg->records[i];
500 /* don't modify code that has already faulted */
501 if (rec->flags & FTRACE_FL_FAILED)
504 /* ignore updates to this record's mcount site */
505 if (get_kprobe((void *)rec->ip)) {
509 unfreeze_record(rec);
512 failed = __ftrace_replace_code(rec, old, new, enable);
513 if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
514 rec->flags |= FTRACE_FL_FAILED;
515 if ((system_state == SYSTEM_BOOTING) ||
516 !core_kernel_text(rec->ip)) {
517 ftrace_del_hash(rec);
518 ftrace_free_rec(rec);
525 static void ftrace_shutdown_replenish(void)
527 if (ftrace_pages->next)
530 /* allocate another page */
531 ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL);
535 ftrace_code_disable(struct dyn_ftrace *rec)
538 unsigned char *nop, *call;
543 nop = ftrace_nop_replace();
544 call = ftrace_call_replace(ip, MCOUNT_ADDR);
546 failed = ftrace_modify_code(ip, call, nop);
548 rec->flags |= FTRACE_FL_FAILED;
554 static int __ftrace_update_code(void *ignore);
556 static int __ftrace_modify_code(void *data)
561 if (*command & FTRACE_ENABLE_CALLS) {
563 * Update any recorded ips now that we have the
566 __ftrace_update_code(NULL);
567 ftrace_replace_code(1);
569 } else if (*command & FTRACE_DISABLE_CALLS) {
570 ftrace_replace_code(0);
574 if (*command & FTRACE_UPDATE_TRACE_FUNC)
575 ftrace_update_ftrace_func(ftrace_trace_function);
577 if (*command & FTRACE_ENABLE_MCOUNT) {
578 addr = (unsigned long)ftrace_record_ip;
579 ftrace_mcount_set(&addr);
580 } else if (*command & FTRACE_DISABLE_MCOUNT) {
581 addr = (unsigned long)ftrace_stub;
582 ftrace_mcount_set(&addr);
588 static void ftrace_run_update_code(int command)
590 stop_machine(__ftrace_modify_code, &command, NULL);
593 void ftrace_disable_daemon(void)
595 /* Stop the daemon from calling kstop_machine */
596 mutex_lock(&ftraced_lock);
598 mutex_unlock(&ftraced_lock);
600 ftrace_force_update();
603 void ftrace_enable_daemon(void)
605 mutex_lock(&ftraced_lock);
607 mutex_unlock(&ftraced_lock);
609 ftrace_force_update();
612 static ftrace_func_t saved_ftrace_func;
614 static void ftrace_startup(void)
618 if (unlikely(ftrace_disabled))
621 mutex_lock(&ftraced_lock);
623 if (ftraced_suspend == 1)
624 command |= FTRACE_ENABLE_CALLS;
626 if (saved_ftrace_func != ftrace_trace_function) {
627 saved_ftrace_func = ftrace_trace_function;
628 command |= FTRACE_UPDATE_TRACE_FUNC;
631 if (!command || !ftrace_enabled)
634 ftrace_run_update_code(command);
636 mutex_unlock(&ftraced_lock);
639 static void ftrace_shutdown(void)
643 if (unlikely(ftrace_disabled))
646 mutex_lock(&ftraced_lock);
648 if (!ftraced_suspend)
649 command |= FTRACE_DISABLE_CALLS;
651 if (saved_ftrace_func != ftrace_trace_function) {
652 saved_ftrace_func = ftrace_trace_function;
653 command |= FTRACE_UPDATE_TRACE_FUNC;
656 if (!command || !ftrace_enabled)
659 ftrace_run_update_code(command);
661 mutex_unlock(&ftraced_lock);
664 static void ftrace_startup_sysctl(void)
666 int command = FTRACE_ENABLE_MCOUNT;
668 if (unlikely(ftrace_disabled))
671 mutex_lock(&ftraced_lock);
672 /* Force update next time */
673 saved_ftrace_func = NULL;
674 /* ftraced_suspend is true if we want ftrace running */
676 command |= FTRACE_ENABLE_CALLS;
678 ftrace_run_update_code(command);
679 mutex_unlock(&ftraced_lock);
682 static void ftrace_shutdown_sysctl(void)
684 int command = FTRACE_DISABLE_MCOUNT;
686 if (unlikely(ftrace_disabled))
689 mutex_lock(&ftraced_lock);
690 /* ftraced_suspend is true if ftrace is running */
692 command |= FTRACE_DISABLE_CALLS;
694 ftrace_run_update_code(command);
695 mutex_unlock(&ftraced_lock);
698 static cycle_t ftrace_update_time;
699 static unsigned long ftrace_update_cnt;
700 unsigned long ftrace_update_tot_cnt;
702 static int __ftrace_update_code(void *ignore)
704 int i, save_ftrace_enabled;
706 struct dyn_ftrace *p;
707 struct hlist_node *t, *n;
708 struct hlist_head *head, temp_list;
710 /* Don't be recording funcs now */
711 ftrace_record_suspend++;
712 save_ftrace_enabled = ftrace_enabled;
715 start = ftrace_now(raw_smp_processor_id());
716 ftrace_update_cnt = 0;
718 /* No locks needed, the machine is stopped! */
719 for (i = 0; i < FTRACE_HASHSIZE; i++) {
720 INIT_HLIST_HEAD(&temp_list);
721 head = &ftrace_hash[i];
723 /* all CPUS are stopped, we are safe to modify code */
724 hlist_for_each_entry_safe(p, t, n, head, node) {
725 /* Skip over failed records which have not been
727 if (p->flags & FTRACE_FL_FAILED)
730 /* Unconverted records are always at the head of the
731 * hash bucket. Once we encounter a converted record,
732 * simply skip over to the next bucket. Saves ftraced
733 * some processor cycles (ftrace does its bid for
734 * global warming :-p ). */
735 if (p->flags & (FTRACE_FL_CONVERTED))
738 /* Ignore updates to this record's mcount site.
739 * Reintroduce this record at the head of this
740 * bucket to attempt to "convert" it again if
741 * the kprobe on it is unregistered before the
743 if (get_kprobe((void *)p->ip)) {
745 INIT_HLIST_NODE(&p->node);
746 hlist_add_head(&p->node, &temp_list);
753 /* convert record (i.e, patch mcount-call with NOP) */
754 if (ftrace_code_disable(p)) {
755 p->flags |= FTRACE_FL_CONVERTED;
758 if ((system_state == SYSTEM_BOOTING) ||
759 !core_kernel_text(p->ip)) {
766 hlist_for_each_entry_safe(p, t, n, &temp_list, node) {
768 INIT_HLIST_NODE(&p->node);
769 hlist_add_head(&p->node, head);
773 stop = ftrace_now(raw_smp_processor_id());
774 ftrace_update_time = stop - start;
775 ftrace_update_tot_cnt += ftrace_update_cnt;
778 ftrace_enabled = save_ftrace_enabled;
779 ftrace_record_suspend--;
784 static int ftrace_update_code(void)
786 if (unlikely(ftrace_disabled) ||
787 !ftrace_enabled || !ftraced_trigger)
790 stop_machine(__ftrace_update_code, NULL, NULL);
795 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
797 struct ftrace_page *pg;
801 /* allocate a few pages */
802 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
803 if (!ftrace_pages_start)
807 * Allocate a few more pages.
809 * TODO: have some parser search vmlinux before
810 * final linking to find all calls to ftrace.
812 * a) know how many pages to allocate.
814 * b) set up the table then.
816 * The dynamic code is still necessary for
820 pg = ftrace_pages = ftrace_pages_start;
822 cnt = num_to_init / ENTRIES_PER_PAGE;
823 pr_info("ftrace: allocating %ld hash entries in %d pages\n",
826 for (i = 0; i < cnt; i++) {
827 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
829 /* If we fail, we'll try later anyway */
840 FTRACE_ITER_FILTER = (1 << 0),
841 FTRACE_ITER_CONT = (1 << 1),
842 FTRACE_ITER_NOTRACE = (1 << 2),
843 FTRACE_ITER_FAILURES = (1 << 3),
846 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
848 struct ftrace_iterator {
850 struct ftrace_page *pg;
853 unsigned char buffer[FTRACE_BUFF_MAX+1];
859 t_next(struct seq_file *m, void *v, loff_t *pos)
861 struct ftrace_iterator *iter = m->private;
862 struct dyn_ftrace *rec = NULL;
867 if (iter->idx >= iter->pg->index) {
868 if (iter->pg->next) {
869 iter->pg = iter->pg->next;
874 rec = &iter->pg->records[iter->idx++];
875 if ((rec->flags & FTRACE_FL_FREE) ||
877 (!(iter->flags & FTRACE_ITER_FAILURES) &&
878 (rec->flags & FTRACE_FL_FAILED)) ||
880 ((iter->flags & FTRACE_ITER_FAILURES) &&
881 !(rec->flags & FTRACE_FL_FAILED)) ||
883 ((iter->flags & FTRACE_ITER_NOTRACE) &&
884 !(rec->flags & FTRACE_FL_NOTRACE))) {
895 static void *t_start(struct seq_file *m, loff_t *pos)
897 struct ftrace_iterator *iter = m->private;
901 if (*pos != iter->pos) {
902 for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l))
906 p = t_next(m, p, &l);
912 static void t_stop(struct seq_file *m, void *p)
916 static int t_show(struct seq_file *m, void *v)
918 struct dyn_ftrace *rec = v;
919 char str[KSYM_SYMBOL_LEN];
924 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
926 seq_printf(m, "%s\n", str);
931 static struct seq_operations show_ftrace_seq_ops = {
939 ftrace_avail_open(struct inode *inode, struct file *file)
941 struct ftrace_iterator *iter;
944 if (unlikely(ftrace_disabled))
947 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
951 iter->pg = ftrace_pages_start;
954 ret = seq_open(file, &show_ftrace_seq_ops);
956 struct seq_file *m = file->private_data;
966 int ftrace_avail_release(struct inode *inode, struct file *file)
968 struct seq_file *m = (struct seq_file *)file->private_data;
969 struct ftrace_iterator *iter = m->private;
971 seq_release(inode, file);
978 ftrace_failures_open(struct inode *inode, struct file *file)
982 struct ftrace_iterator *iter;
984 ret = ftrace_avail_open(inode, file);
986 m = (struct seq_file *)file->private_data;
987 iter = (struct ftrace_iterator *)m->private;
988 iter->flags = FTRACE_ITER_FAILURES;
995 static void ftrace_filter_reset(int enable)
997 struct ftrace_page *pg;
998 struct dyn_ftrace *rec;
999 unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1002 /* keep kstop machine from running */
1005 ftrace_filtered = 0;
1006 pg = ftrace_pages_start;
1008 for (i = 0; i < pg->index; i++) {
1009 rec = &pg->records[i];
1010 if (rec->flags & FTRACE_FL_FAILED)
1012 rec->flags &= ~type;
1020 ftrace_regex_open(struct inode *inode, struct file *file, int enable)
1022 struct ftrace_iterator *iter;
1025 if (unlikely(ftrace_disabled))
1028 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1032 mutex_lock(&ftrace_regex_lock);
1033 if ((file->f_mode & FMODE_WRITE) &&
1034 !(file->f_flags & O_APPEND))
1035 ftrace_filter_reset(enable);
1037 if (file->f_mode & FMODE_READ) {
1038 iter->pg = ftrace_pages_start;
1040 iter->flags = enable ? FTRACE_ITER_FILTER :
1041 FTRACE_ITER_NOTRACE;
1043 ret = seq_open(file, &show_ftrace_seq_ops);
1045 struct seq_file *m = file->private_data;
1050 file->private_data = iter;
1051 mutex_unlock(&ftrace_regex_lock);
1057 ftrace_filter_open(struct inode *inode, struct file *file)
1059 return ftrace_regex_open(inode, file, 1);
1063 ftrace_notrace_open(struct inode *inode, struct file *file)
1065 return ftrace_regex_open(inode, file, 0);
1069 ftrace_regex_read(struct file *file, char __user *ubuf,
1070 size_t cnt, loff_t *ppos)
1072 if (file->f_mode & FMODE_READ)
1073 return seq_read(file, ubuf, cnt, ppos);
1079 ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
1083 if (file->f_mode & FMODE_READ)
1084 ret = seq_lseek(file, offset, origin);
1086 file->f_pos = ret = 1;
1099 ftrace_match(unsigned char *buff, int len, int enable)
1101 char str[KSYM_SYMBOL_LEN];
1102 char *search = NULL;
1103 struct ftrace_page *pg;
1104 struct dyn_ftrace *rec;
1105 int type = MATCH_FULL;
1106 unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1107 unsigned i, match = 0, search_len = 0;
1109 for (i = 0; i < len; i++) {
1110 if (buff[i] == '*') {
1112 search = buff + i + 1;
1113 type = MATCH_END_ONLY;
1114 search_len = len - (i + 1);
1116 if (type == MATCH_END_ONLY) {
1117 type = MATCH_MIDDLE_ONLY;
1120 type = MATCH_FRONT_ONLY;
1128 /* keep kstop machine from running */
1131 ftrace_filtered = 1;
1132 pg = ftrace_pages_start;
1134 for (i = 0; i < pg->index; i++) {
1138 rec = &pg->records[i];
1139 if (rec->flags & FTRACE_FL_FAILED)
1141 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1144 if (strcmp(str, buff) == 0)
1147 case MATCH_FRONT_ONLY:
1148 if (memcmp(str, buff, match) == 0)
1151 case MATCH_MIDDLE_ONLY:
1152 if (strstr(str, search))
1155 case MATCH_END_ONLY:
1156 ptr = strstr(str, search);
1157 if (ptr && (ptr[search_len] == 0))
1170 ftrace_regex_write(struct file *file, const char __user *ubuf,
1171 size_t cnt, loff_t *ppos, int enable)
1173 struct ftrace_iterator *iter;
1178 if (!cnt || cnt < 0)
1181 mutex_lock(&ftrace_regex_lock);
1183 if (file->f_mode & FMODE_READ) {
1184 struct seq_file *m = file->private_data;
1187 iter = file->private_data;
1190 iter->flags &= ~FTRACE_ITER_CONT;
1191 iter->buffer_idx = 0;
1194 ret = get_user(ch, ubuf++);
1200 if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1201 /* skip white space */
1202 while (cnt && isspace(ch)) {
1203 ret = get_user(ch, ubuf++);
1211 file->f_pos += read;
1216 iter->buffer_idx = 0;
1219 while (cnt && !isspace(ch)) {
1220 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1221 iter->buffer[iter->buffer_idx++] = ch;
1226 ret = get_user(ch, ubuf++);
1235 iter->buffer[iter->buffer_idx] = 0;
1236 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1237 iter->buffer_idx = 0;
1239 iter->flags |= FTRACE_ITER_CONT;
1242 file->f_pos += read;
1246 mutex_unlock(&ftrace_regex_lock);
1252 ftrace_filter_write(struct file *file, const char __user *ubuf,
1253 size_t cnt, loff_t *ppos)
1255 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
1259 ftrace_notrace_write(struct file *file, const char __user *ubuf,
1260 size_t cnt, loff_t *ppos)
1262 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
1266 ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
1268 if (unlikely(ftrace_disabled))
1271 mutex_lock(&ftrace_regex_lock);
1273 ftrace_filter_reset(enable);
1275 ftrace_match(buf, len, enable);
1276 mutex_unlock(&ftrace_regex_lock);
1280 * ftrace_set_filter - set a function to filter on in ftrace
1281 * @buf - the string that holds the function filter text.
1282 * @len - the length of the string.
1283 * @reset - non zero to reset all filters before applying this filter.
1285 * Filters denote which functions should be enabled when tracing is enabled.
1286 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1288 void ftrace_set_filter(unsigned char *buf, int len, int reset)
1290 ftrace_set_regex(buf, len, reset, 1);
1294 * ftrace_set_notrace - set a function to not trace in ftrace
1295 * @buf - the string that holds the function notrace text.
1296 * @len - the length of the string.
1297 * @reset - non zero to reset all filters before applying this filter.
1299 * Notrace Filters denote which functions should not be enabled when tracing
1300 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1303 void ftrace_set_notrace(unsigned char *buf, int len, int reset)
1305 ftrace_set_regex(buf, len, reset, 0);
1309 ftrace_regex_release(struct inode *inode, struct file *file, int enable)
1311 struct seq_file *m = (struct seq_file *)file->private_data;
1312 struct ftrace_iterator *iter;
1314 mutex_lock(&ftrace_regex_lock);
1315 if (file->f_mode & FMODE_READ) {
1318 seq_release(inode, file);
1320 iter = file->private_data;
1322 if (iter->buffer_idx) {
1324 iter->buffer[iter->buffer_idx] = 0;
1325 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1328 mutex_lock(&ftrace_sysctl_lock);
1329 mutex_lock(&ftraced_lock);
1330 if (iter->filtered && ftraced_suspend && ftrace_enabled)
1331 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1332 mutex_unlock(&ftraced_lock);
1333 mutex_unlock(&ftrace_sysctl_lock);
1336 mutex_unlock(&ftrace_regex_lock);
1341 ftrace_filter_release(struct inode *inode, struct file *file)
1343 return ftrace_regex_release(inode, file, 1);
1347 ftrace_notrace_release(struct inode *inode, struct file *file)
1349 return ftrace_regex_release(inode, file, 0);
1353 ftraced_read(struct file *filp, char __user *ubuf,
1354 size_t cnt, loff_t *ppos)
1356 /* don't worry about races */
1357 char *buf = ftraced_stop ? "disabled\n" : "enabled\n";
1358 int r = strlen(buf);
1360 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1364 ftraced_write(struct file *filp, const char __user *ubuf,
1365 size_t cnt, loff_t *ppos)
1371 if (cnt >= sizeof(buf))
1374 if (copy_from_user(&buf, ubuf, cnt))
1377 if (strncmp(buf, "enable", 6) == 0)
1379 else if (strncmp(buf, "disable", 7) == 0)
1384 ret = strict_strtoul(buf, 10, &val);
1392 ftrace_enable_daemon();
1394 ftrace_disable_daemon();
1401 static struct file_operations ftrace_avail_fops = {
1402 .open = ftrace_avail_open,
1404 .llseek = seq_lseek,
1405 .release = ftrace_avail_release,
1408 static struct file_operations ftrace_failures_fops = {
1409 .open = ftrace_failures_open,
1411 .llseek = seq_lseek,
1412 .release = ftrace_avail_release,
1415 static struct file_operations ftrace_filter_fops = {
1416 .open = ftrace_filter_open,
1417 .read = ftrace_regex_read,
1418 .write = ftrace_filter_write,
1419 .llseek = ftrace_regex_lseek,
1420 .release = ftrace_filter_release,
1423 static struct file_operations ftrace_notrace_fops = {
1424 .open = ftrace_notrace_open,
1425 .read = ftrace_regex_read,
1426 .write = ftrace_notrace_write,
1427 .llseek = ftrace_regex_lseek,
1428 .release = ftrace_notrace_release,
1431 static struct file_operations ftraced_fops = {
1432 .open = tracing_open_generic,
1433 .read = ftraced_read,
1434 .write = ftraced_write,
1438 * ftrace_force_update - force an update to all recording ftrace functions
1440 int ftrace_force_update(void)
1444 if (unlikely(ftrace_disabled))
1447 mutex_lock(&ftrace_sysctl_lock);
1448 mutex_lock(&ftraced_lock);
1451 * If ftraced_trigger is not set, then there is nothing
1454 if (ftraced_trigger && !ftrace_update_code())
1457 mutex_unlock(&ftraced_lock);
1458 mutex_unlock(&ftrace_sysctl_lock);
1463 static void ftrace_force_shutdown(void)
1465 struct task_struct *task;
1466 int command = FTRACE_DISABLE_CALLS | FTRACE_UPDATE_TRACE_FUNC;
1468 mutex_lock(&ftraced_lock);
1469 task = ftraced_task;
1470 ftraced_task = NULL;
1471 ftraced_suspend = -1;
1472 ftrace_run_update_code(command);
1473 mutex_unlock(&ftraced_lock);
1479 static __init int ftrace_init_debugfs(void)
1481 struct dentry *d_tracer;
1482 struct dentry *entry;
1484 d_tracer = tracing_init_dentry();
1486 entry = debugfs_create_file("available_filter_functions", 0444,
1487 d_tracer, NULL, &ftrace_avail_fops);
1489 pr_warning("Could not create debugfs "
1490 "'available_filter_functions' entry\n");
1492 entry = debugfs_create_file("failures", 0444,
1493 d_tracer, NULL, &ftrace_failures_fops);
1495 pr_warning("Could not create debugfs 'failures' entry\n");
1497 entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1498 NULL, &ftrace_filter_fops);
1500 pr_warning("Could not create debugfs "
1501 "'set_ftrace_filter' entry\n");
1503 entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
1504 NULL, &ftrace_notrace_fops);
1506 pr_warning("Could not create debugfs "
1507 "'set_ftrace_notrace' entry\n");
1509 entry = debugfs_create_file("ftraced_enabled", 0644, d_tracer,
1510 NULL, &ftraced_fops);
1512 pr_warning("Could not create debugfs "
1513 "'ftraced_enabled' entry\n");
1517 fs_initcall(ftrace_init_debugfs);
1519 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
1520 static int ftrace_convert_nops(unsigned long *start,
1525 unsigned long flags;
1529 addr = ftrace_call_adjust(*p++);
1530 ftrace_record_ip(addr);
1531 ftrace_shutdown_replenish();
1535 local_irq_save(flags);
1536 __ftrace_update_code(p);
1537 local_irq_restore(flags);
1542 void ftrace_init_module(unsigned long *start, unsigned long *end)
1544 ftrace_convert_nops(start, end);
1547 extern unsigned long __start_mcount_loc[];
1548 extern unsigned long __stop_mcount_loc[];
1550 void __init ftrace_init(void)
1552 unsigned long count, addr, flags;
1555 /* Keep the ftrace pointer to the stub */
1556 addr = (unsigned long)ftrace_stub;
1558 local_irq_save(flags);
1559 ftrace_dyn_arch_init(&addr);
1560 local_irq_restore(flags);
1562 /* ftrace_dyn_arch_init places the return code in addr */
1566 count = __stop_mcount_loc - __start_mcount_loc;
1568 ret = ftrace_dyn_table_alloc(count);
1572 last_ftrace_enabled = ftrace_enabled = 1;
1574 ret = ftrace_convert_nops(__start_mcount_loc,
1579 ftrace_disabled = 1;
1581 #else /* CONFIG_FTRACE_MCOUNT_RECORD */
1582 static int ftraced(void *ignore)
1584 unsigned long usecs;
1586 while (!kthread_should_stop()) {
1588 set_current_state(TASK_INTERRUPTIBLE);
1590 /* check once a second */
1591 schedule_timeout(HZ);
1593 if (unlikely(ftrace_disabled))
1596 mutex_lock(&ftrace_sysctl_lock);
1597 mutex_lock(&ftraced_lock);
1598 if (!ftraced_suspend && !ftraced_stop &&
1599 ftrace_update_code()) {
1600 usecs = nsecs_to_usecs(ftrace_update_time);
1601 if (ftrace_update_tot_cnt > 100000) {
1602 ftrace_update_tot_cnt = 0;
1603 pr_info("hm, dftrace overflow: %lu change%s"
1604 " (%lu total) in %lu usec%s\n",
1606 ftrace_update_cnt != 1 ? "s" : "",
1607 ftrace_update_tot_cnt,
1608 usecs, usecs != 1 ? "s" : "");
1609 ftrace_disabled = 1;
1613 mutex_unlock(&ftraced_lock);
1614 mutex_unlock(&ftrace_sysctl_lock);
1616 ftrace_shutdown_replenish();
1618 __set_current_state(TASK_RUNNING);
1622 static int __init ftrace_dynamic_init(void)
1624 struct task_struct *p;
1628 addr = (unsigned long)ftrace_record_ip;
1630 stop_machine(ftrace_dyn_arch_init, &addr, NULL);
1632 /* ftrace_dyn_arch_init places the return code in addr */
1638 ret = ftrace_dyn_table_alloc(NR_TO_INIT);
1642 p = kthread_run(ftraced, NULL, "ftraced");
1648 last_ftrace_enabled = ftrace_enabled = 1;
1654 ftrace_disabled = 1;
1658 core_initcall(ftrace_dynamic_init);
1659 #endif /* CONFIG_FTRACE_MCOUNT_RECORD */
1662 # define ftrace_startup() do { } while (0)
1663 # define ftrace_shutdown() do { } while (0)
1664 # define ftrace_startup_sysctl() do { } while (0)
1665 # define ftrace_shutdown_sysctl() do { } while (0)
1666 # define ftrace_force_shutdown() do { } while (0)
1667 #endif /* CONFIG_DYNAMIC_FTRACE */
1670 * ftrace_kill_atomic - kill ftrace from critical sections
1672 * This function should be used by panic code. It stops ftrace
1673 * but in a not so nice way. If you need to simply kill ftrace
1674 * from a non-atomic section, use ftrace_kill.
1676 void ftrace_kill_atomic(void)
1678 ftrace_disabled = 1;
1680 #ifdef CONFIG_DYNAMIC_FTRACE
1681 ftraced_suspend = -1;
1683 clear_ftrace_function();
1687 * ftrace_kill - totally shutdown ftrace
1689 * This is a safety measure. If something was detected that seems
1690 * wrong, calling this function will keep ftrace from doing
1691 * any more modifications, and updates.
1692 * used when something went wrong.
1694 void ftrace_kill(void)
1696 mutex_lock(&ftrace_sysctl_lock);
1697 ftrace_disabled = 1;
1700 clear_ftrace_function();
1701 mutex_unlock(&ftrace_sysctl_lock);
1703 /* Try to totally disable ftrace */
1704 ftrace_force_shutdown();
1708 * register_ftrace_function - register a function for profiling
1709 * @ops - ops structure that holds the function for profiling.
1711 * Register a function to be called by all functions in the
1714 * Note: @ops->func and all the functions it calls must be labeled
1715 * with "notrace", otherwise it will go into a
1718 int register_ftrace_function(struct ftrace_ops *ops)
1722 if (unlikely(ftrace_disabled))
1725 mutex_lock(&ftrace_sysctl_lock);
1726 ret = __register_ftrace_function(ops);
1728 mutex_unlock(&ftrace_sysctl_lock);
1734 * unregister_ftrace_function - unresgister a function for profiling.
1735 * @ops - ops structure that holds the function to unregister
1737 * Unregister a function that was added to be called by ftrace profiling.
1739 int unregister_ftrace_function(struct ftrace_ops *ops)
1743 mutex_lock(&ftrace_sysctl_lock);
1744 ret = __unregister_ftrace_function(ops);
1746 mutex_unlock(&ftrace_sysctl_lock);
1752 ftrace_enable_sysctl(struct ctl_table *table, int write,
1753 struct file *file, void __user *buffer, size_t *lenp,
1758 if (unlikely(ftrace_disabled))
1761 mutex_lock(&ftrace_sysctl_lock);
1763 ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
1765 if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
1768 last_ftrace_enabled = ftrace_enabled;
1770 if (ftrace_enabled) {
1772 ftrace_startup_sysctl();
1774 /* we are starting ftrace again */
1775 if (ftrace_list != &ftrace_list_end) {
1776 if (ftrace_list->next == &ftrace_list_end)
1777 ftrace_trace_function = ftrace_list->func;
1779 ftrace_trace_function = ftrace_list_func;
1783 /* stopping ftrace calls (just send to ftrace_stub) */
1784 ftrace_trace_function = ftrace_stub;
1786 ftrace_shutdown_sysctl();
1790 mutex_unlock(&ftrace_sysctl_lock);