2 * Infrastructure for profiling code inserted by 'gcc -pg'.
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code in the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/debugfs.h>
21 #include <linux/hardirq.h>
22 #include <linux/kthread.h>
23 #include <linux/uaccess.h>
24 #include <linux/kprobes.h>
25 #include <linux/ftrace.h>
26 #include <linux/sysctl.h>
27 #include <linux/ctype.h>
28 #include <linux/hash.h>
29 #include <linux/list.h>
31 #include <asm/ftrace.h>
35 /* ftrace_enabled is a method to turn ftrace on or off */
36 int ftrace_enabled __read_mostly;
37 static int last_ftrace_enabled;
40 * ftrace_disabled is set when an anomaly is discovered.
41 * ftrace_disabled is much stronger than ftrace_enabled.
43 static int ftrace_disabled __read_mostly;
45 static DEFINE_SPINLOCK(ftrace_lock);
46 static DEFINE_MUTEX(ftrace_sysctl_lock);
48 static struct ftrace_ops ftrace_list_end __read_mostly =
53 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
54 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
56 static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
58 struct ftrace_ops *op = ftrace_list;
60 /* in case someone actually ports this to alpha! */
61 read_barrier_depends();
63 while (op != &ftrace_list_end) {
65 read_barrier_depends();
66 op->func(ip, parent_ip);
72 * clear_ftrace_function - reset the ftrace function
74 * This NULLs the ftrace function and in essence stops
75 * tracing. There may be lag
77 void clear_ftrace_function(void)
79 ftrace_trace_function = ftrace_stub;
82 static int __register_ftrace_function(struct ftrace_ops *ops)
84 /* should not be called from interrupt context */
85 spin_lock(&ftrace_lock);
87 ops->next = ftrace_list;
89 * We are entering ops into the ftrace_list but another
90 * CPU might be walking that list. We need to make sure
91 * the ops->next pointer is valid before another CPU sees
92 * the ops pointer included into the ftrace_list.
99 * For one func, simply call it directly.
100 * For more than one func, call the chain.
102 if (ops->next == &ftrace_list_end)
103 ftrace_trace_function = ops->func;
105 ftrace_trace_function = ftrace_list_func;
108 spin_unlock(&ftrace_lock);
113 static int __unregister_ftrace_function(struct ftrace_ops *ops)
115 struct ftrace_ops **p;
118 /* should not be called from interrupt context */
119 spin_lock(&ftrace_lock);
122 * If we are removing the last function, then simply point
123 * to the ftrace_stub.
125 if (ftrace_list == ops && ops->next == &ftrace_list_end) {
126 ftrace_trace_function = ftrace_stub;
127 ftrace_list = &ftrace_list_end;
131 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
142 if (ftrace_enabled) {
143 /* If we only have one func left, then call that directly */
144 if (ftrace_list == &ftrace_list_end ||
145 ftrace_list->next == &ftrace_list_end)
146 ftrace_trace_function = ftrace_list->func;
150 spin_unlock(&ftrace_lock);
155 #ifdef CONFIG_DYNAMIC_FTRACE
157 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
159 * The hash lock is only needed when the recording of the mcount
160 * callers are dynamic. That is, by the caller themselves and
161 * not recorded via the compilation.
163 static DEFINE_SPINLOCK(ftrace_hash_lock);
164 #define ftrace_hash_lock(flags) spin_lock_irqsave(ftrace_hash_lock, flags)
165 #define ftrace_hash_unlock(flags) spin_lock_irqsave(ftrace_hash_lock, flags)
167 /* This is protected via the ftrace_lock with MCOUNT_RECORD. */
168 #define ftrace_hash_lock(flags) do { (void)flags; } while (0)
169 #define ftrace_hash_unlock(flags) do { } while(0)
172 static struct task_struct *ftraced_task;
175 FTRACE_ENABLE_CALLS = (1 << 0),
176 FTRACE_DISABLE_CALLS = (1 << 1),
177 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
178 FTRACE_ENABLE_MCOUNT = (1 << 3),
179 FTRACE_DISABLE_MCOUNT = (1 << 4),
182 static int ftrace_filtered;
183 static int tracing_on;
184 static int frozen_record_count;
186 static struct hlist_head ftrace_hash[FTRACE_HASHSIZE];
188 static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu);
190 static DEFINE_MUTEX(ftraced_lock);
191 static DEFINE_MUTEX(ftrace_regex_lock);
194 struct ftrace_page *next;
196 struct dyn_ftrace records[];
199 #define ENTRIES_PER_PAGE \
200 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
202 /* estimate from running different kernels */
203 #define NR_TO_INIT 10000
205 static struct ftrace_page *ftrace_pages_start;
206 static struct ftrace_page *ftrace_pages;
208 static int ftraced_trigger;
209 static int ftraced_suspend;
210 static int ftraced_stop;
212 static int ftrace_record_suspend;
214 static struct dyn_ftrace *ftrace_free_records;
217 #ifdef CONFIG_KPROBES
218 static inline void freeze_record(struct dyn_ftrace *rec)
220 if (!(rec->flags & FTRACE_FL_FROZEN)) {
221 rec->flags |= FTRACE_FL_FROZEN;
222 frozen_record_count++;
226 static inline void unfreeze_record(struct dyn_ftrace *rec)
228 if (rec->flags & FTRACE_FL_FROZEN) {
229 rec->flags &= ~FTRACE_FL_FROZEN;
230 frozen_record_count--;
234 static inline int record_frozen(struct dyn_ftrace *rec)
236 return rec->flags & FTRACE_FL_FROZEN;
239 # define freeze_record(rec) ({ 0; })
240 # define unfreeze_record(rec) ({ 0; })
241 # define record_frozen(rec) ({ 0; })
242 #endif /* CONFIG_KPROBES */
244 int skip_trace(unsigned long ip)
247 struct dyn_ftrace *rec;
248 struct hlist_node *t;
249 struct hlist_head *head;
251 if (frozen_record_count == 0)
254 head = &ftrace_hash[hash_long(ip, FTRACE_HASHBITS)];
255 hlist_for_each_entry_rcu(rec, t, head, node) {
257 if (record_frozen(rec)) {
258 if (rec->flags & FTRACE_FL_FAILED)
261 if (!(rec->flags & FTRACE_FL_CONVERTED))
264 if (!tracing_on || !ftrace_enabled)
267 if (ftrace_filtered) {
268 fl = rec->flags & (FTRACE_FL_FILTER |
270 if (!fl || (fl & FTRACE_FL_NOTRACE))
282 ftrace_ip_in_hash(unsigned long ip, unsigned long key)
284 struct dyn_ftrace *p;
285 struct hlist_node *t;
288 hlist_for_each_entry_rcu(p, t, &ftrace_hash[key], node) {
299 ftrace_add_hash(struct dyn_ftrace *node, unsigned long key)
301 hlist_add_head_rcu(&node->node, &ftrace_hash[key]);
304 /* called from kstop_machine */
305 static inline void ftrace_del_hash(struct dyn_ftrace *node)
307 hlist_del(&node->node);
310 static void ftrace_free_rec(struct dyn_ftrace *rec)
312 rec->ip = (unsigned long)ftrace_free_records;
313 ftrace_free_records = rec;
314 rec->flags |= FTRACE_FL_FREE;
317 void ftrace_release(void *start, unsigned long size)
319 struct dyn_ftrace *rec;
320 struct ftrace_page *pg;
321 unsigned long s = (unsigned long)start;
322 unsigned long e = s + size;
325 if (ftrace_disabled || !start)
328 /* should not be called from interrupt context */
329 spin_lock(&ftrace_lock);
331 for (pg = ftrace_pages_start; pg; pg = pg->next) {
332 for (i = 0; i < pg->index; i++) {
333 rec = &pg->records[i];
335 if ((rec->ip >= s) && (rec->ip < e))
336 ftrace_free_rec(rec);
339 spin_unlock(&ftrace_lock);
343 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
345 struct dyn_ftrace *rec;
347 /* First check for freed records */
348 if (ftrace_free_records) {
349 rec = ftrace_free_records;
351 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
353 ftrace_free_records = NULL;
359 ftrace_free_records = (void *)rec->ip;
360 memset(rec, 0, sizeof(*rec));
364 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
365 if (!ftrace_pages->next)
367 ftrace_pages = ftrace_pages->next;
370 return &ftrace_pages->records[ftrace_pages->index++];
374 ftrace_record_ip(unsigned long ip)
376 struct dyn_ftrace *node;
382 if (!ftrace_enabled || ftrace_disabled)
385 resched = need_resched();
386 preempt_disable_notrace();
389 * We simply need to protect against recursion.
390 * Use the the raw version of smp_processor_id and not
391 * __get_cpu_var which can call debug hooks that can
392 * cause a recursive crash here.
394 cpu = raw_smp_processor_id();
395 per_cpu(ftrace_shutdown_disable_cpu, cpu)++;
396 if (per_cpu(ftrace_shutdown_disable_cpu, cpu) != 1)
399 if (unlikely(ftrace_record_suspend))
402 key = hash_long(ip, FTRACE_HASHBITS);
404 WARN_ON_ONCE(key >= FTRACE_HASHSIZE);
406 if (ftrace_ip_in_hash(ip, key))
409 ftrace_hash_lock(flags);
411 /* This ip may have hit the hash before the lock */
412 if (ftrace_ip_in_hash(ip, key))
415 node = ftrace_alloc_dyn_node(ip);
421 ftrace_add_hash(node, key);
426 ftrace_hash_unlock(flags);
428 per_cpu(ftrace_shutdown_disable_cpu, cpu)--;
430 /* prevent recursion with scheduler */
432 preempt_enable_no_resched_notrace();
434 preempt_enable_notrace();
437 #define FTRACE_ADDR ((long)(ftrace_caller))
440 __ftrace_replace_code(struct dyn_ftrace *rec,
441 unsigned char *old, unsigned char *new, int enable)
443 unsigned long ip, fl;
447 if (ftrace_filtered && enable) {
449 * If filtering is on:
451 * If this record is set to be filtered and
452 * is enabled then do nothing.
454 * If this record is set to be filtered and
455 * it is not enabled, enable it.
457 * If this record is not set to be filtered
458 * and it is not enabled do nothing.
460 * If this record is set not to trace then
463 * If this record is set not to trace and
464 * it is enabled then disable it.
466 * If this record is not set to be filtered and
467 * it is enabled, disable it.
470 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE |
473 if ((fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) ||
474 (fl == (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE)) ||
475 !fl || (fl == FTRACE_FL_NOTRACE))
479 * If it is enabled disable it,
480 * otherwise enable it!
482 if (fl & FTRACE_FL_ENABLED) {
483 /* swap new and old */
485 old = ftrace_call_replace(ip, FTRACE_ADDR);
486 rec->flags &= ~FTRACE_FL_ENABLED;
488 new = ftrace_call_replace(ip, FTRACE_ADDR);
489 rec->flags |= FTRACE_FL_ENABLED;
495 * If this record is set not to trace and is
496 * not enabled, do nothing.
498 fl = rec->flags & (FTRACE_FL_NOTRACE | FTRACE_FL_ENABLED);
499 if (fl == FTRACE_FL_NOTRACE)
502 new = ftrace_call_replace(ip, FTRACE_ADDR);
504 old = ftrace_call_replace(ip, FTRACE_ADDR);
507 if (rec->flags & FTRACE_FL_ENABLED)
509 rec->flags |= FTRACE_FL_ENABLED;
511 if (!(rec->flags & FTRACE_FL_ENABLED))
513 rec->flags &= ~FTRACE_FL_ENABLED;
517 return ftrace_modify_code(ip, old, new);
520 static void ftrace_replace_code(int enable)
523 unsigned char *new = NULL, *old = NULL;
524 struct dyn_ftrace *rec;
525 struct ftrace_page *pg;
528 old = ftrace_nop_replace();
530 new = ftrace_nop_replace();
532 for (pg = ftrace_pages_start; pg; pg = pg->next) {
533 for (i = 0; i < pg->index; i++) {
534 rec = &pg->records[i];
536 /* don't modify code that has already faulted */
537 if (rec->flags & FTRACE_FL_FAILED)
540 /* ignore updates to this record's mcount site */
541 if (get_kprobe((void *)rec->ip)) {
545 unfreeze_record(rec);
548 failed = __ftrace_replace_code(rec, old, new, enable);
549 if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
550 rec->flags |= FTRACE_FL_FAILED;
551 if ((system_state == SYSTEM_BOOTING) ||
552 !core_kernel_text(rec->ip)) {
553 ftrace_del_hash(rec);
554 ftrace_free_rec(rec);
561 static void ftrace_shutdown_replenish(void)
563 if (ftrace_pages->next)
566 /* allocate another page */
567 ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL);
571 ftrace_code_disable(struct dyn_ftrace *rec)
574 unsigned char *nop, *call;
579 nop = ftrace_nop_replace();
580 call = ftrace_call_replace(ip, MCOUNT_ADDR);
582 failed = ftrace_modify_code(ip, call, nop);
584 rec->flags |= FTRACE_FL_FAILED;
590 static int __ftrace_update_code(void *ignore);
592 static int __ftrace_modify_code(void *data)
597 if (*command & FTRACE_ENABLE_CALLS) {
599 * Update any recorded ips now that we have the
602 __ftrace_update_code(NULL);
603 ftrace_replace_code(1);
605 } else if (*command & FTRACE_DISABLE_CALLS) {
606 ftrace_replace_code(0);
610 if (*command & FTRACE_UPDATE_TRACE_FUNC)
611 ftrace_update_ftrace_func(ftrace_trace_function);
613 if (*command & FTRACE_ENABLE_MCOUNT) {
614 addr = (unsigned long)ftrace_record_ip;
615 ftrace_mcount_set(&addr);
616 } else if (*command & FTRACE_DISABLE_MCOUNT) {
617 addr = (unsigned long)ftrace_stub;
618 ftrace_mcount_set(&addr);
624 static void ftrace_run_update_code(int command)
626 stop_machine(__ftrace_modify_code, &command, NULL);
629 void ftrace_disable_daemon(void)
631 /* Stop the daemon from calling kstop_machine */
632 mutex_lock(&ftraced_lock);
634 mutex_unlock(&ftraced_lock);
636 ftrace_force_update();
639 void ftrace_enable_daemon(void)
641 mutex_lock(&ftraced_lock);
643 mutex_unlock(&ftraced_lock);
645 ftrace_force_update();
648 static ftrace_func_t saved_ftrace_func;
650 static void ftrace_startup(void)
654 if (unlikely(ftrace_disabled))
657 mutex_lock(&ftraced_lock);
659 if (ftraced_suspend == 1)
660 command |= FTRACE_ENABLE_CALLS;
662 if (saved_ftrace_func != ftrace_trace_function) {
663 saved_ftrace_func = ftrace_trace_function;
664 command |= FTRACE_UPDATE_TRACE_FUNC;
667 if (!command || !ftrace_enabled)
670 ftrace_run_update_code(command);
672 mutex_unlock(&ftraced_lock);
675 static void ftrace_shutdown(void)
679 if (unlikely(ftrace_disabled))
682 mutex_lock(&ftraced_lock);
684 if (!ftraced_suspend)
685 command |= FTRACE_DISABLE_CALLS;
687 if (saved_ftrace_func != ftrace_trace_function) {
688 saved_ftrace_func = ftrace_trace_function;
689 command |= FTRACE_UPDATE_TRACE_FUNC;
692 if (!command || !ftrace_enabled)
695 ftrace_run_update_code(command);
697 mutex_unlock(&ftraced_lock);
700 static void ftrace_startup_sysctl(void)
702 int command = FTRACE_ENABLE_MCOUNT;
704 if (unlikely(ftrace_disabled))
707 mutex_lock(&ftraced_lock);
708 /* Force update next time */
709 saved_ftrace_func = NULL;
710 /* ftraced_suspend is true if we want ftrace running */
712 command |= FTRACE_ENABLE_CALLS;
714 ftrace_run_update_code(command);
715 mutex_unlock(&ftraced_lock);
718 static void ftrace_shutdown_sysctl(void)
720 int command = FTRACE_DISABLE_MCOUNT;
722 if (unlikely(ftrace_disabled))
725 mutex_lock(&ftraced_lock);
726 /* ftraced_suspend is true if ftrace is running */
728 command |= FTRACE_DISABLE_CALLS;
730 ftrace_run_update_code(command);
731 mutex_unlock(&ftraced_lock);
734 static cycle_t ftrace_update_time;
735 static unsigned long ftrace_update_cnt;
736 unsigned long ftrace_update_tot_cnt;
738 static int __ftrace_update_code(void *ignore)
740 int i, save_ftrace_enabled;
742 struct dyn_ftrace *p;
743 struct hlist_node *t, *n;
744 struct hlist_head *head, temp_list;
746 /* Don't be recording funcs now */
747 ftrace_record_suspend++;
748 save_ftrace_enabled = ftrace_enabled;
751 start = ftrace_now(raw_smp_processor_id());
752 ftrace_update_cnt = 0;
754 /* No locks needed, the machine is stopped! */
755 for (i = 0; i < FTRACE_HASHSIZE; i++) {
756 INIT_HLIST_HEAD(&temp_list);
757 head = &ftrace_hash[i];
759 /* all CPUS are stopped, we are safe to modify code */
760 hlist_for_each_entry_safe(p, t, n, head, node) {
761 /* Skip over failed records which have not been
763 if (p->flags & FTRACE_FL_FAILED)
766 /* Unconverted records are always at the head of the
767 * hash bucket. Once we encounter a converted record,
768 * simply skip over to the next bucket. Saves ftraced
769 * some processor cycles (ftrace does its bid for
770 * global warming :-p ). */
771 if (p->flags & (FTRACE_FL_CONVERTED))
774 /* Ignore updates to this record's mcount site.
775 * Reintroduce this record at the head of this
776 * bucket to attempt to "convert" it again if
777 * the kprobe on it is unregistered before the
779 if (get_kprobe((void *)p->ip)) {
781 INIT_HLIST_NODE(&p->node);
782 hlist_add_head(&p->node, &temp_list);
789 /* convert record (i.e, patch mcount-call with NOP) */
790 if (ftrace_code_disable(p)) {
791 p->flags |= FTRACE_FL_CONVERTED;
794 if ((system_state == SYSTEM_BOOTING) ||
795 !core_kernel_text(p->ip)) {
802 hlist_for_each_entry_safe(p, t, n, &temp_list, node) {
804 INIT_HLIST_NODE(&p->node);
805 hlist_add_head(&p->node, head);
809 stop = ftrace_now(raw_smp_processor_id());
810 ftrace_update_time = stop - start;
811 ftrace_update_tot_cnt += ftrace_update_cnt;
814 ftrace_enabled = save_ftrace_enabled;
815 ftrace_record_suspend--;
820 static int ftrace_update_code(void)
822 if (unlikely(ftrace_disabled) ||
823 !ftrace_enabled || !ftraced_trigger)
826 stop_machine(__ftrace_update_code, NULL, NULL);
831 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
833 struct ftrace_page *pg;
837 /* allocate a few pages */
838 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
839 if (!ftrace_pages_start)
843 * Allocate a few more pages.
845 * TODO: have some parser search vmlinux before
846 * final linking to find all calls to ftrace.
848 * a) know how many pages to allocate.
850 * b) set up the table then.
852 * The dynamic code is still necessary for
856 pg = ftrace_pages = ftrace_pages_start;
858 cnt = num_to_init / ENTRIES_PER_PAGE;
859 pr_info("ftrace: allocating %ld hash entries in %d pages\n",
862 for (i = 0; i < cnt; i++) {
863 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
865 /* If we fail, we'll try later anyway */
876 FTRACE_ITER_FILTER = (1 << 0),
877 FTRACE_ITER_CONT = (1 << 1),
878 FTRACE_ITER_NOTRACE = (1 << 2),
879 FTRACE_ITER_FAILURES = (1 << 3),
882 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
884 struct ftrace_iterator {
886 struct ftrace_page *pg;
889 unsigned char buffer[FTRACE_BUFF_MAX+1];
895 t_next(struct seq_file *m, void *v, loff_t *pos)
897 struct ftrace_iterator *iter = m->private;
898 struct dyn_ftrace *rec = NULL;
902 /* should not be called from interrupt context */
903 spin_lock(&ftrace_lock);
905 if (iter->idx >= iter->pg->index) {
906 if (iter->pg->next) {
907 iter->pg = iter->pg->next;
912 rec = &iter->pg->records[iter->idx++];
913 if ((rec->flags & FTRACE_FL_FREE) ||
915 (!(iter->flags & FTRACE_ITER_FAILURES) &&
916 (rec->flags & FTRACE_FL_FAILED)) ||
918 ((iter->flags & FTRACE_ITER_FAILURES) &&
919 !(rec->flags & FTRACE_FL_FAILED)) ||
921 ((iter->flags & FTRACE_ITER_NOTRACE) &&
922 !(rec->flags & FTRACE_FL_NOTRACE))) {
927 spin_unlock(&ftrace_lock);
934 static void *t_start(struct seq_file *m, loff_t *pos)
936 struct ftrace_iterator *iter = m->private;
940 if (*pos != iter->pos) {
941 for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l))
945 p = t_next(m, p, &l);
951 static void t_stop(struct seq_file *m, void *p)
955 static int t_show(struct seq_file *m, void *v)
957 struct dyn_ftrace *rec = v;
958 char str[KSYM_SYMBOL_LEN];
963 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
965 seq_printf(m, "%s\n", str);
970 static struct seq_operations show_ftrace_seq_ops = {
978 ftrace_avail_open(struct inode *inode, struct file *file)
980 struct ftrace_iterator *iter;
983 if (unlikely(ftrace_disabled))
986 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
990 iter->pg = ftrace_pages_start;
993 ret = seq_open(file, &show_ftrace_seq_ops);
995 struct seq_file *m = file->private_data;
1005 int ftrace_avail_release(struct inode *inode, struct file *file)
1007 struct seq_file *m = (struct seq_file *)file->private_data;
1008 struct ftrace_iterator *iter = m->private;
1010 seq_release(inode, file);
1017 ftrace_failures_open(struct inode *inode, struct file *file)
1021 struct ftrace_iterator *iter;
1023 ret = ftrace_avail_open(inode, file);
1025 m = (struct seq_file *)file->private_data;
1026 iter = (struct ftrace_iterator *)m->private;
1027 iter->flags = FTRACE_ITER_FAILURES;
1034 static void ftrace_filter_reset(int enable)
1036 struct ftrace_page *pg;
1037 struct dyn_ftrace *rec;
1038 unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1041 /* should not be called from interrupt context */
1042 spin_lock(&ftrace_lock);
1044 ftrace_filtered = 0;
1045 pg = ftrace_pages_start;
1047 for (i = 0; i < pg->index; i++) {
1048 rec = &pg->records[i];
1049 if (rec->flags & FTRACE_FL_FAILED)
1051 rec->flags &= ~type;
1055 spin_unlock(&ftrace_lock);
1059 ftrace_regex_open(struct inode *inode, struct file *file, int enable)
1061 struct ftrace_iterator *iter;
1064 if (unlikely(ftrace_disabled))
1067 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1071 mutex_lock(&ftrace_regex_lock);
1072 if ((file->f_mode & FMODE_WRITE) &&
1073 !(file->f_flags & O_APPEND))
1074 ftrace_filter_reset(enable);
1076 if (file->f_mode & FMODE_READ) {
1077 iter->pg = ftrace_pages_start;
1079 iter->flags = enable ? FTRACE_ITER_FILTER :
1080 FTRACE_ITER_NOTRACE;
1082 ret = seq_open(file, &show_ftrace_seq_ops);
1084 struct seq_file *m = file->private_data;
1089 file->private_data = iter;
1090 mutex_unlock(&ftrace_regex_lock);
1096 ftrace_filter_open(struct inode *inode, struct file *file)
1098 return ftrace_regex_open(inode, file, 1);
1102 ftrace_notrace_open(struct inode *inode, struct file *file)
1104 return ftrace_regex_open(inode, file, 0);
1108 ftrace_regex_read(struct file *file, char __user *ubuf,
1109 size_t cnt, loff_t *ppos)
1111 if (file->f_mode & FMODE_READ)
1112 return seq_read(file, ubuf, cnt, ppos);
1118 ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
1122 if (file->f_mode & FMODE_READ)
1123 ret = seq_lseek(file, offset, origin);
1125 file->f_pos = ret = 1;
1138 ftrace_match(unsigned char *buff, int len, int enable)
1140 char str[KSYM_SYMBOL_LEN];
1141 char *search = NULL;
1142 struct ftrace_page *pg;
1143 struct dyn_ftrace *rec;
1144 int type = MATCH_FULL;
1145 unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1146 unsigned i, match = 0, search_len = 0;
1148 for (i = 0; i < len; i++) {
1149 if (buff[i] == '*') {
1151 search = buff + i + 1;
1152 type = MATCH_END_ONLY;
1153 search_len = len - (i + 1);
1155 if (type == MATCH_END_ONLY) {
1156 type = MATCH_MIDDLE_ONLY;
1159 type = MATCH_FRONT_ONLY;
1167 /* should not be called from interrupt context */
1168 spin_lock(&ftrace_lock);
1170 ftrace_filtered = 1;
1171 pg = ftrace_pages_start;
1173 for (i = 0; i < pg->index; i++) {
1177 rec = &pg->records[i];
1178 if (rec->flags & FTRACE_FL_FAILED)
1180 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1183 if (strcmp(str, buff) == 0)
1186 case MATCH_FRONT_ONLY:
1187 if (memcmp(str, buff, match) == 0)
1190 case MATCH_MIDDLE_ONLY:
1191 if (strstr(str, search))
1194 case MATCH_END_ONLY:
1195 ptr = strstr(str, search);
1196 if (ptr && (ptr[search_len] == 0))
1205 spin_unlock(&ftrace_lock);
1209 ftrace_regex_write(struct file *file, const char __user *ubuf,
1210 size_t cnt, loff_t *ppos, int enable)
1212 struct ftrace_iterator *iter;
1217 if (!cnt || cnt < 0)
1220 mutex_lock(&ftrace_regex_lock);
1222 if (file->f_mode & FMODE_READ) {
1223 struct seq_file *m = file->private_data;
1226 iter = file->private_data;
1229 iter->flags &= ~FTRACE_ITER_CONT;
1230 iter->buffer_idx = 0;
1233 ret = get_user(ch, ubuf++);
1239 if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1240 /* skip white space */
1241 while (cnt && isspace(ch)) {
1242 ret = get_user(ch, ubuf++);
1250 file->f_pos += read;
1255 iter->buffer_idx = 0;
1258 while (cnt && !isspace(ch)) {
1259 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1260 iter->buffer[iter->buffer_idx++] = ch;
1265 ret = get_user(ch, ubuf++);
1274 iter->buffer[iter->buffer_idx] = 0;
1275 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1276 iter->buffer_idx = 0;
1278 iter->flags |= FTRACE_ITER_CONT;
1281 file->f_pos += read;
1285 mutex_unlock(&ftrace_regex_lock);
1291 ftrace_filter_write(struct file *file, const char __user *ubuf,
1292 size_t cnt, loff_t *ppos)
1294 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
1298 ftrace_notrace_write(struct file *file, const char __user *ubuf,
1299 size_t cnt, loff_t *ppos)
1301 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
1305 ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
1307 if (unlikely(ftrace_disabled))
1310 mutex_lock(&ftrace_regex_lock);
1312 ftrace_filter_reset(enable);
1314 ftrace_match(buf, len, enable);
1315 mutex_unlock(&ftrace_regex_lock);
1319 * ftrace_set_filter - set a function to filter on in ftrace
1320 * @buf - the string that holds the function filter text.
1321 * @len - the length of the string.
1322 * @reset - non zero to reset all filters before applying this filter.
1324 * Filters denote which functions should be enabled when tracing is enabled.
1325 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1327 void ftrace_set_filter(unsigned char *buf, int len, int reset)
1329 ftrace_set_regex(buf, len, reset, 1);
1333 * ftrace_set_notrace - set a function to not trace in ftrace
1334 * @buf - the string that holds the function notrace text.
1335 * @len - the length of the string.
1336 * @reset - non zero to reset all filters before applying this filter.
1338 * Notrace Filters denote which functions should not be enabled when tracing
1339 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1342 void ftrace_set_notrace(unsigned char *buf, int len, int reset)
1344 ftrace_set_regex(buf, len, reset, 0);
1348 ftrace_regex_release(struct inode *inode, struct file *file, int enable)
1350 struct seq_file *m = (struct seq_file *)file->private_data;
1351 struct ftrace_iterator *iter;
1353 mutex_lock(&ftrace_regex_lock);
1354 if (file->f_mode & FMODE_READ) {
1357 seq_release(inode, file);
1359 iter = file->private_data;
1361 if (iter->buffer_idx) {
1363 iter->buffer[iter->buffer_idx] = 0;
1364 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1367 mutex_lock(&ftrace_sysctl_lock);
1368 mutex_lock(&ftraced_lock);
1369 if (iter->filtered && ftraced_suspend && ftrace_enabled)
1370 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1371 mutex_unlock(&ftraced_lock);
1372 mutex_unlock(&ftrace_sysctl_lock);
1375 mutex_unlock(&ftrace_regex_lock);
1380 ftrace_filter_release(struct inode *inode, struct file *file)
1382 return ftrace_regex_release(inode, file, 1);
1386 ftrace_notrace_release(struct inode *inode, struct file *file)
1388 return ftrace_regex_release(inode, file, 0);
1392 ftraced_read(struct file *filp, char __user *ubuf,
1393 size_t cnt, loff_t *ppos)
1395 /* don't worry about races */
1396 char *buf = ftraced_stop ? "disabled\n" : "enabled\n";
1397 int r = strlen(buf);
1399 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1403 ftraced_write(struct file *filp, const char __user *ubuf,
1404 size_t cnt, loff_t *ppos)
1410 if (cnt >= sizeof(buf))
1413 if (copy_from_user(&buf, ubuf, cnt))
1416 if (strncmp(buf, "enable", 6) == 0)
1418 else if (strncmp(buf, "disable", 7) == 0)
1423 ret = strict_strtoul(buf, 10, &val);
1431 ftrace_enable_daemon();
1433 ftrace_disable_daemon();
1440 static struct file_operations ftrace_avail_fops = {
1441 .open = ftrace_avail_open,
1443 .llseek = seq_lseek,
1444 .release = ftrace_avail_release,
1447 static struct file_operations ftrace_failures_fops = {
1448 .open = ftrace_failures_open,
1450 .llseek = seq_lseek,
1451 .release = ftrace_avail_release,
1454 static struct file_operations ftrace_filter_fops = {
1455 .open = ftrace_filter_open,
1456 .read = ftrace_regex_read,
1457 .write = ftrace_filter_write,
1458 .llseek = ftrace_regex_lseek,
1459 .release = ftrace_filter_release,
1462 static struct file_operations ftrace_notrace_fops = {
1463 .open = ftrace_notrace_open,
1464 .read = ftrace_regex_read,
1465 .write = ftrace_notrace_write,
1466 .llseek = ftrace_regex_lseek,
1467 .release = ftrace_notrace_release,
1470 static struct file_operations ftraced_fops = {
1471 .open = tracing_open_generic,
1472 .read = ftraced_read,
1473 .write = ftraced_write,
1477 * ftrace_force_update - force an update to all recording ftrace functions
1479 int ftrace_force_update(void)
1483 if (unlikely(ftrace_disabled))
1486 mutex_lock(&ftrace_sysctl_lock);
1487 mutex_lock(&ftraced_lock);
1490 * If ftraced_trigger is not set, then there is nothing
1493 if (ftraced_trigger && !ftrace_update_code())
1496 mutex_unlock(&ftraced_lock);
1497 mutex_unlock(&ftrace_sysctl_lock);
1502 static void ftrace_force_shutdown(void)
1504 struct task_struct *task;
1505 int command = FTRACE_DISABLE_CALLS | FTRACE_UPDATE_TRACE_FUNC;
1507 mutex_lock(&ftraced_lock);
1508 task = ftraced_task;
1509 ftraced_task = NULL;
1510 ftraced_suspend = -1;
1511 ftrace_run_update_code(command);
1512 mutex_unlock(&ftraced_lock);
1518 static __init int ftrace_init_debugfs(void)
1520 struct dentry *d_tracer;
1521 struct dentry *entry;
1523 d_tracer = tracing_init_dentry();
1525 entry = debugfs_create_file("available_filter_functions", 0444,
1526 d_tracer, NULL, &ftrace_avail_fops);
1528 pr_warning("Could not create debugfs "
1529 "'available_filter_functions' entry\n");
1531 entry = debugfs_create_file("failures", 0444,
1532 d_tracer, NULL, &ftrace_failures_fops);
1534 pr_warning("Could not create debugfs 'failures' entry\n");
1536 entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1537 NULL, &ftrace_filter_fops);
1539 pr_warning("Could not create debugfs "
1540 "'set_ftrace_filter' entry\n");
1542 entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
1543 NULL, &ftrace_notrace_fops);
1545 pr_warning("Could not create debugfs "
1546 "'set_ftrace_notrace' entry\n");
1548 entry = debugfs_create_file("ftraced_enabled", 0644, d_tracer,
1549 NULL, &ftraced_fops);
1551 pr_warning("Could not create debugfs "
1552 "'ftraced_enabled' entry\n");
1556 fs_initcall(ftrace_init_debugfs);
1558 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
1559 static int ftrace_convert_nops(unsigned long *start,
1564 unsigned long flags;
1568 addr = ftrace_call_adjust(*p++);
1569 /* should not be called from interrupt context */
1570 spin_lock(&ftrace_lock);
1571 ftrace_record_ip(addr);
1572 spin_unlock(&ftrace_lock);
1573 ftrace_shutdown_replenish();
1577 local_irq_save(flags);
1578 __ftrace_update_code(p);
1579 local_irq_restore(flags);
1584 void ftrace_init_module(unsigned long *start, unsigned long *end)
1586 if (ftrace_disabled || start == end)
1588 ftrace_convert_nops(start, end);
1591 extern unsigned long __start_mcount_loc[];
1592 extern unsigned long __stop_mcount_loc[];
1594 void __init ftrace_init(void)
1596 unsigned long count, addr, flags;
1599 /* Keep the ftrace pointer to the stub */
1600 addr = (unsigned long)ftrace_stub;
1602 local_irq_save(flags);
1603 ftrace_dyn_arch_init(&addr);
1604 local_irq_restore(flags);
1606 /* ftrace_dyn_arch_init places the return code in addr */
1610 count = __stop_mcount_loc - __start_mcount_loc;
1612 ret = ftrace_dyn_table_alloc(count);
1616 last_ftrace_enabled = ftrace_enabled = 1;
1618 ret = ftrace_convert_nops(__start_mcount_loc,
1623 ftrace_disabled = 1;
1625 #else /* CONFIG_FTRACE_MCOUNT_RECORD */
1626 static int ftraced(void *ignore)
1628 unsigned long usecs;
1630 while (!kthread_should_stop()) {
1632 set_current_state(TASK_INTERRUPTIBLE);
1634 /* check once a second */
1635 schedule_timeout(HZ);
1637 if (unlikely(ftrace_disabled))
1640 mutex_lock(&ftrace_sysctl_lock);
1641 mutex_lock(&ftraced_lock);
1642 if (!ftraced_suspend && !ftraced_stop &&
1643 ftrace_update_code()) {
1644 usecs = nsecs_to_usecs(ftrace_update_time);
1645 if (ftrace_update_tot_cnt > 100000) {
1646 ftrace_update_tot_cnt = 0;
1647 pr_info("hm, dftrace overflow: %lu change%s"
1648 " (%lu total) in %lu usec%s\n",
1650 ftrace_update_cnt != 1 ? "s" : "",
1651 ftrace_update_tot_cnt,
1652 usecs, usecs != 1 ? "s" : "");
1653 ftrace_disabled = 1;
1657 mutex_unlock(&ftraced_lock);
1658 mutex_unlock(&ftrace_sysctl_lock);
1660 ftrace_shutdown_replenish();
1662 __set_current_state(TASK_RUNNING);
1666 static int __init ftrace_dynamic_init(void)
1668 struct task_struct *p;
1672 addr = (unsigned long)ftrace_record_ip;
1674 stop_machine(ftrace_dyn_arch_init, &addr, NULL);
1676 /* ftrace_dyn_arch_init places the return code in addr */
1682 ret = ftrace_dyn_table_alloc(NR_TO_INIT);
1686 p = kthread_run(ftraced, NULL, "ftraced");
1692 last_ftrace_enabled = ftrace_enabled = 1;
1698 ftrace_disabled = 1;
1702 core_initcall(ftrace_dynamic_init);
1703 #endif /* CONFIG_FTRACE_MCOUNT_RECORD */
1706 # define ftrace_startup() do { } while (0)
1707 # define ftrace_shutdown() do { } while (0)
1708 # define ftrace_startup_sysctl() do { } while (0)
1709 # define ftrace_shutdown_sysctl() do { } while (0)
1710 # define ftrace_force_shutdown() do { } while (0)
1711 #endif /* CONFIG_DYNAMIC_FTRACE */
1714 * ftrace_kill_atomic - kill ftrace from critical sections
1716 * This function should be used by panic code. It stops ftrace
1717 * but in a not so nice way. If you need to simply kill ftrace
1718 * from a non-atomic section, use ftrace_kill.
1720 void ftrace_kill_atomic(void)
1722 ftrace_disabled = 1;
1724 #ifdef CONFIG_DYNAMIC_FTRACE
1725 ftraced_suspend = -1;
1727 clear_ftrace_function();
1731 * ftrace_kill - totally shutdown ftrace
1733 * This is a safety measure. If something was detected that seems
1734 * wrong, calling this function will keep ftrace from doing
1735 * any more modifications, and updates.
1736 * used when something went wrong.
1738 void ftrace_kill(void)
1740 mutex_lock(&ftrace_sysctl_lock);
1741 ftrace_disabled = 1;
1744 clear_ftrace_function();
1745 mutex_unlock(&ftrace_sysctl_lock);
1747 /* Try to totally disable ftrace */
1748 ftrace_force_shutdown();
1752 * register_ftrace_function - register a function for profiling
1753 * @ops - ops structure that holds the function for profiling.
1755 * Register a function to be called by all functions in the
1758 * Note: @ops->func and all the functions it calls must be labeled
1759 * with "notrace", otherwise it will go into a
1762 int register_ftrace_function(struct ftrace_ops *ops)
1766 if (unlikely(ftrace_disabled))
1769 mutex_lock(&ftrace_sysctl_lock);
1770 ret = __register_ftrace_function(ops);
1772 mutex_unlock(&ftrace_sysctl_lock);
1778 * unregister_ftrace_function - unresgister a function for profiling.
1779 * @ops - ops structure that holds the function to unregister
1781 * Unregister a function that was added to be called by ftrace profiling.
1783 int unregister_ftrace_function(struct ftrace_ops *ops)
1787 mutex_lock(&ftrace_sysctl_lock);
1788 ret = __unregister_ftrace_function(ops);
1790 mutex_unlock(&ftrace_sysctl_lock);
1796 ftrace_enable_sysctl(struct ctl_table *table, int write,
1797 struct file *file, void __user *buffer, size_t *lenp,
1802 if (unlikely(ftrace_disabled))
1805 mutex_lock(&ftrace_sysctl_lock);
1807 ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
1809 if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
1812 last_ftrace_enabled = ftrace_enabled;
1814 if (ftrace_enabled) {
1816 ftrace_startup_sysctl();
1818 /* we are starting ftrace again */
1819 if (ftrace_list != &ftrace_list_end) {
1820 if (ftrace_list->next == &ftrace_list_end)
1821 ftrace_trace_function = ftrace_list->func;
1823 ftrace_trace_function = ftrace_list_func;
1827 /* stopping ftrace calls (just send to ftrace_stub) */
1828 ftrace_trace_function = ftrace_stub;
1830 ftrace_shutdown_sysctl();
1834 mutex_unlock(&ftrace_sysctl_lock);