2 * Infrastructure for profiling code inserted by 'gcc -pg'.
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code in the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/debugfs.h>
21 #include <linux/kthread.h>
22 #include <linux/hardirq.h>
23 #include <linux/ftrace.h>
24 #include <linux/uaccess.h>
25 #include <linux/sysctl.h>
26 #include <linux/hash.h>
27 #include <linux/ctype.h>
28 #include <linux/list.h>
32 /* ftrace_enabled is a method to turn ftrace on or off */
33 int ftrace_enabled __read_mostly;
34 static int last_ftrace_enabled;
37 * ftrace_disabled is set when an anomaly is discovered.
38 * ftrace_disabled is much stronger than ftrace_enabled.
40 static int ftrace_disabled __read_mostly;
42 static DEFINE_SPINLOCK(ftrace_lock);
43 static DEFINE_MUTEX(ftrace_sysctl_lock);
45 static struct ftrace_ops ftrace_list_end __read_mostly =
50 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
51 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
53 /* mcount is defined per arch in assembly */
54 EXPORT_SYMBOL(mcount);
56 void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
58 struct ftrace_ops *op = ftrace_list;
60 /* in case someone actually ports this to alpha! */
61 read_barrier_depends();
63 while (op != &ftrace_list_end) {
65 read_barrier_depends();
66 op->func(ip, parent_ip);
72 * clear_ftrace_function - reset the ftrace function
74 * This NULLs the ftrace function and in essence stops
75 * tracing. There may be lag
77 void clear_ftrace_function(void)
79 ftrace_trace_function = ftrace_stub;
82 static int __register_ftrace_function(struct ftrace_ops *ops)
84 /* Should never be called by interrupts */
85 spin_lock(&ftrace_lock);
87 ops->next = ftrace_list;
89 * We are entering ops into the ftrace_list but another
90 * CPU might be walking that list. We need to make sure
91 * the ops->next pointer is valid before another CPU sees
92 * the ops pointer included into the ftrace_list.
99 * For one func, simply call it directly.
100 * For more than one func, call the chain.
102 if (ops->next == &ftrace_list_end)
103 ftrace_trace_function = ops->func;
105 ftrace_trace_function = ftrace_list_func;
108 spin_unlock(&ftrace_lock);
113 static int __unregister_ftrace_function(struct ftrace_ops *ops)
115 struct ftrace_ops **p;
118 spin_lock(&ftrace_lock);
121 * If we are removing the last function, then simply point
122 * to the ftrace_stub.
124 if (ftrace_list == ops && ops->next == &ftrace_list_end) {
125 ftrace_trace_function = ftrace_stub;
126 ftrace_list = &ftrace_list_end;
130 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
141 if (ftrace_enabled) {
142 /* If we only have one func left, then call that directly */
143 if (ftrace_list == &ftrace_list_end ||
144 ftrace_list->next == &ftrace_list_end)
145 ftrace_trace_function = ftrace_list->func;
149 spin_unlock(&ftrace_lock);
154 #ifdef CONFIG_DYNAMIC_FTRACE
156 static struct task_struct *ftraced_task;
157 static DECLARE_WAIT_QUEUE_HEAD(ftraced_waiters);
158 static unsigned long ftraced_iteration_counter;
161 FTRACE_ENABLE_CALLS = (1 << 0),
162 FTRACE_DISABLE_CALLS = (1 << 1),
163 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
164 FTRACE_ENABLE_MCOUNT = (1 << 3),
165 FTRACE_DISABLE_MCOUNT = (1 << 4),
168 static int ftrace_filtered;
170 static struct hlist_head ftrace_hash[FTRACE_HASHSIZE];
172 static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu);
174 static DEFINE_SPINLOCK(ftrace_shutdown_lock);
175 static DEFINE_MUTEX(ftraced_lock);
176 static DEFINE_MUTEX(ftrace_filter_lock);
179 struct ftrace_page *next;
181 struct dyn_ftrace records[];
184 #define ENTRIES_PER_PAGE \
185 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
187 /* estimate from running different kernels */
188 #define NR_TO_INIT 10000
190 static struct ftrace_page *ftrace_pages_start;
191 static struct ftrace_page *ftrace_pages;
193 static int ftraced_trigger;
194 static int ftraced_suspend;
196 static int ftrace_record_suspend;
198 static struct dyn_ftrace *ftrace_free_records;
201 ftrace_ip_in_hash(unsigned long ip, unsigned long key)
203 struct dyn_ftrace *p;
204 struct hlist_node *t;
207 hlist_for_each_entry(p, t, &ftrace_hash[key], node) {
218 ftrace_add_hash(struct dyn_ftrace *node, unsigned long key)
220 hlist_add_head(&node->node, &ftrace_hash[key]);
223 static void ftrace_free_rec(struct dyn_ftrace *rec)
225 /* no locking, only called from kstop_machine */
227 rec->ip = (unsigned long)ftrace_free_records;
228 ftrace_free_records = rec;
229 rec->flags |= FTRACE_FL_FREE;
232 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
234 struct dyn_ftrace *rec;
236 /* First check for freed records */
237 if (ftrace_free_records) {
238 rec = ftrace_free_records;
240 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
242 ftrace_free_records = NULL;
248 ftrace_free_records = (void *)rec->ip;
249 memset(rec, 0, sizeof(*rec));
253 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
254 if (!ftrace_pages->next)
256 ftrace_pages = ftrace_pages->next;
259 return &ftrace_pages->records[ftrace_pages->index++];
263 ftrace_record_ip(unsigned long ip)
265 struct dyn_ftrace *node;
272 if (!ftrace_enabled || ftrace_disabled)
275 resched = need_resched();
276 preempt_disable_notrace();
279 * We simply need to protect against recursion.
280 * Use the the raw version of smp_processor_id and not
281 * __get_cpu_var which can call debug hooks that can
282 * cause a recursive crash here.
284 cpu = raw_smp_processor_id();
285 per_cpu(ftrace_shutdown_disable_cpu, cpu)++;
286 if (per_cpu(ftrace_shutdown_disable_cpu, cpu) != 1)
289 if (unlikely(ftrace_record_suspend))
292 key = hash_long(ip, FTRACE_HASHBITS);
294 WARN_ON_ONCE(key >= FTRACE_HASHSIZE);
296 if (ftrace_ip_in_hash(ip, key))
299 atomic = irqs_disabled();
301 spin_lock_irqsave(&ftrace_shutdown_lock, flags);
303 /* This ip may have hit the hash before the lock */
304 if (ftrace_ip_in_hash(ip, key))
308 * There's a slight race that the ftraced will update the
309 * hash and reset here. If it is already converted, skip it.
311 if (ftrace_ip_converted(ip))
314 node = ftrace_alloc_dyn_node(ip);
320 ftrace_add_hash(node, key);
325 spin_unlock_irqrestore(&ftrace_shutdown_lock, flags);
327 per_cpu(ftrace_shutdown_disable_cpu, cpu)--;
329 /* prevent recursion with scheduler */
331 preempt_enable_no_resched_notrace();
333 preempt_enable_notrace();
336 #define FTRACE_ADDR ((long)(ftrace_caller))
337 #define MCOUNT_ADDR ((long)(mcount))
340 __ftrace_replace_code(struct dyn_ftrace *rec,
341 unsigned char *old, unsigned char *new, int enable)
348 if (ftrace_filtered && enable) {
351 * If filtering is on:
353 * If this record is set to be filtered and
354 * is enabled then do nothing.
356 * If this record is set to be filtered and
357 * it is not enabled, enable it.
359 * If this record is not set to be filtered
360 * and it is not enabled do nothing.
362 * If this record is not set to be filtered and
363 * it is enabled, disable it.
365 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
367 if ((fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) ||
372 * If it is enabled disable it,
373 * otherwise enable it!
375 if (fl == FTRACE_FL_ENABLED) {
376 /* swap new and old */
378 old = ftrace_call_replace(ip, FTRACE_ADDR);
379 rec->flags &= ~FTRACE_FL_ENABLED;
381 new = ftrace_call_replace(ip, FTRACE_ADDR);
382 rec->flags |= FTRACE_FL_ENABLED;
387 new = ftrace_call_replace(ip, FTRACE_ADDR);
389 old = ftrace_call_replace(ip, FTRACE_ADDR);
392 if (rec->flags & FTRACE_FL_ENABLED)
394 rec->flags |= FTRACE_FL_ENABLED;
396 if (!(rec->flags & FTRACE_FL_ENABLED))
398 rec->flags &= ~FTRACE_FL_ENABLED;
402 failed = ftrace_modify_code(ip, old, new);
405 /* It is possible that the function hasn't been converted yet */
406 key = hash_long(ip, FTRACE_HASHBITS);
407 if (!ftrace_ip_in_hash(ip, key)) {
408 rec->flags |= FTRACE_FL_FAILED;
409 ftrace_free_rec(rec);
415 static void ftrace_replace_code(int enable)
417 unsigned char *new = NULL, *old = NULL;
418 struct dyn_ftrace *rec;
419 struct ftrace_page *pg;
423 old = ftrace_nop_replace();
425 new = ftrace_nop_replace();
427 for (pg = ftrace_pages_start; pg; pg = pg->next) {
428 for (i = 0; i < pg->index; i++) {
429 rec = &pg->records[i];
431 /* don't modify code that has already faulted */
432 if (rec->flags & FTRACE_FL_FAILED)
435 __ftrace_replace_code(rec, old, new, enable);
440 static void ftrace_shutdown_replenish(void)
442 if (ftrace_pages->next)
445 /* allocate another page */
446 ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL);
450 ftrace_code_disable(struct dyn_ftrace *rec)
453 unsigned char *nop, *call;
458 nop = ftrace_nop_replace();
459 call = ftrace_call_replace(ip, MCOUNT_ADDR);
461 failed = ftrace_modify_code(ip, call, nop);
463 rec->flags |= FTRACE_FL_FAILED;
464 ftrace_free_rec(rec);
468 static int __ftrace_modify_code(void *data)
473 if (*command & FTRACE_ENABLE_CALLS)
474 ftrace_replace_code(1);
475 else if (*command & FTRACE_DISABLE_CALLS)
476 ftrace_replace_code(0);
478 if (*command & FTRACE_UPDATE_TRACE_FUNC)
479 ftrace_update_ftrace_func(ftrace_trace_function);
481 if (*command & FTRACE_ENABLE_MCOUNT) {
482 addr = (unsigned long)ftrace_record_ip;
483 ftrace_mcount_set(&addr);
484 } else if (*command & FTRACE_DISABLE_MCOUNT) {
485 addr = (unsigned long)ftrace_stub;
486 ftrace_mcount_set(&addr);
492 static void ftrace_run_update_code(int command)
494 stop_machine_run(__ftrace_modify_code, &command, NR_CPUS);
497 static ftrace_func_t saved_ftrace_func;
499 static void ftrace_startup(void)
503 if (unlikely(ftrace_disabled))
506 mutex_lock(&ftraced_lock);
508 if (ftraced_suspend == 1)
509 command |= FTRACE_ENABLE_CALLS;
511 if (saved_ftrace_func != ftrace_trace_function) {
512 saved_ftrace_func = ftrace_trace_function;
513 command |= FTRACE_UPDATE_TRACE_FUNC;
516 if (!command || !ftrace_enabled)
519 ftrace_run_update_code(command);
521 mutex_unlock(&ftraced_lock);
524 static void ftrace_shutdown(void)
528 if (unlikely(ftrace_disabled))
531 mutex_lock(&ftraced_lock);
533 if (!ftraced_suspend)
534 command |= FTRACE_DISABLE_CALLS;
536 if (saved_ftrace_func != ftrace_trace_function) {
537 saved_ftrace_func = ftrace_trace_function;
538 command |= FTRACE_UPDATE_TRACE_FUNC;
541 if (!command || !ftrace_enabled)
544 ftrace_run_update_code(command);
546 mutex_unlock(&ftraced_lock);
549 static void ftrace_startup_sysctl(void)
551 int command = FTRACE_ENABLE_MCOUNT;
553 if (unlikely(ftrace_disabled))
556 mutex_lock(&ftraced_lock);
557 /* Force update next time */
558 saved_ftrace_func = NULL;
559 /* ftraced_suspend is true if we want ftrace running */
561 command |= FTRACE_ENABLE_CALLS;
563 ftrace_run_update_code(command);
564 mutex_unlock(&ftraced_lock);
567 static void ftrace_shutdown_sysctl(void)
569 int command = FTRACE_DISABLE_MCOUNT;
571 if (unlikely(ftrace_disabled))
574 mutex_lock(&ftraced_lock);
575 /* ftraced_suspend is true if ftrace is running */
577 command |= FTRACE_DISABLE_CALLS;
579 ftrace_run_update_code(command);
580 mutex_unlock(&ftraced_lock);
583 static cycle_t ftrace_update_time;
584 static unsigned long ftrace_update_cnt;
585 unsigned long ftrace_update_tot_cnt;
587 static int __ftrace_update_code(void *ignore)
589 struct dyn_ftrace *p;
590 struct hlist_head head;
591 struct hlist_node *t;
592 int save_ftrace_enabled;
596 /* Don't be recording funcs now */
597 save_ftrace_enabled = ftrace_enabled;
600 start = ftrace_now(raw_smp_processor_id());
601 ftrace_update_cnt = 0;
603 /* No locks needed, the machine is stopped! */
604 for (i = 0; i < FTRACE_HASHSIZE; i++) {
605 if (hlist_empty(&ftrace_hash[i]))
608 head = ftrace_hash[i];
609 INIT_HLIST_HEAD(&ftrace_hash[i]);
611 /* all CPUS are stopped, we are safe to modify code */
612 hlist_for_each_entry(p, t, &head, node) {
613 ftrace_code_disable(p);
619 stop = ftrace_now(raw_smp_processor_id());
620 ftrace_update_time = stop - start;
621 ftrace_update_tot_cnt += ftrace_update_cnt;
623 ftrace_enabled = save_ftrace_enabled;
628 static void ftrace_update_code(void)
630 if (unlikely(ftrace_disabled))
633 stop_machine_run(__ftrace_update_code, NULL, NR_CPUS);
636 static int ftraced(void *ignore)
640 while (!kthread_should_stop()) {
642 set_current_state(TASK_INTERRUPTIBLE);
644 /* check once a second */
645 schedule_timeout(HZ);
647 if (unlikely(ftrace_disabled))
650 mutex_lock(&ftrace_sysctl_lock);
651 mutex_lock(&ftraced_lock);
652 if (ftrace_enabled && ftraced_trigger && !ftraced_suspend) {
653 ftrace_record_suspend++;
654 ftrace_update_code();
655 usecs = nsecs_to_usecs(ftrace_update_time);
656 if (ftrace_update_tot_cnt > 100000) {
657 ftrace_update_tot_cnt = 0;
658 pr_info("hm, dftrace overflow: %lu change%s"
659 " (%lu total) in %lu usec%s\n",
661 ftrace_update_cnt != 1 ? "s" : "",
662 ftrace_update_tot_cnt,
663 usecs, usecs != 1 ? "s" : "");
668 ftrace_record_suspend--;
670 ftraced_iteration_counter++;
671 mutex_unlock(&ftraced_lock);
672 mutex_unlock(&ftrace_sysctl_lock);
674 wake_up_interruptible(&ftraced_waiters);
676 ftrace_shutdown_replenish();
678 __set_current_state(TASK_RUNNING);
682 static int __init ftrace_dyn_table_alloc(void)
684 struct ftrace_page *pg;
688 /* allocate a few pages */
689 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
690 if (!ftrace_pages_start)
694 * Allocate a few more pages.
696 * TODO: have some parser search vmlinux before
697 * final linking to find all calls to ftrace.
699 * a) know how many pages to allocate.
701 * b) set up the table then.
703 * The dynamic code is still necessary for
707 pg = ftrace_pages = ftrace_pages_start;
709 cnt = NR_TO_INIT / ENTRIES_PER_PAGE;
711 for (i = 0; i < cnt; i++) {
712 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
714 /* If we fail, we'll try later anyway */
725 FTRACE_ITER_FILTER = (1 << 0),
726 FTRACE_ITER_CONT = (1 << 1),
729 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
731 struct ftrace_iterator {
733 struct ftrace_page *pg;
736 unsigned char buffer[FTRACE_BUFF_MAX+1];
742 t_next(struct seq_file *m, void *v, loff_t *pos)
744 struct ftrace_iterator *iter = m->private;
745 struct dyn_ftrace *rec = NULL;
750 if (iter->idx >= iter->pg->index) {
751 if (iter->pg->next) {
752 iter->pg = iter->pg->next;
757 rec = &iter->pg->records[iter->idx++];
758 if ((rec->flags & FTRACE_FL_FAILED) ||
759 ((iter->flags & FTRACE_ITER_FILTER) &&
760 !(rec->flags & FTRACE_FL_FILTER))) {
771 static void *t_start(struct seq_file *m, loff_t *pos)
773 struct ftrace_iterator *iter = m->private;
777 if (*pos != iter->pos) {
778 for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l))
782 p = t_next(m, p, &l);
788 static void t_stop(struct seq_file *m, void *p)
792 static int t_show(struct seq_file *m, void *v)
794 struct dyn_ftrace *rec = v;
795 char str[KSYM_SYMBOL_LEN];
800 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
802 seq_printf(m, "%s\n", str);
807 static struct seq_operations show_ftrace_seq_ops = {
815 ftrace_avail_open(struct inode *inode, struct file *file)
817 struct ftrace_iterator *iter;
820 if (unlikely(ftrace_disabled))
823 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
827 iter->pg = ftrace_pages_start;
830 ret = seq_open(file, &show_ftrace_seq_ops);
832 struct seq_file *m = file->private_data;
842 int ftrace_avail_release(struct inode *inode, struct file *file)
844 struct seq_file *m = (struct seq_file *)file->private_data;
845 struct ftrace_iterator *iter = m->private;
847 seq_release(inode, file);
853 static void ftrace_filter_reset(void)
855 struct ftrace_page *pg;
856 struct dyn_ftrace *rec;
859 /* keep kstop machine from running */
862 pg = ftrace_pages_start;
864 for (i = 0; i < pg->index; i++) {
865 rec = &pg->records[i];
866 if (rec->flags & FTRACE_FL_FAILED)
868 rec->flags &= ~FTRACE_FL_FILTER;
876 ftrace_filter_open(struct inode *inode, struct file *file)
878 struct ftrace_iterator *iter;
881 if (unlikely(ftrace_disabled))
884 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
888 mutex_lock(&ftrace_filter_lock);
889 if ((file->f_mode & FMODE_WRITE) &&
890 !(file->f_flags & O_APPEND))
891 ftrace_filter_reset();
893 if (file->f_mode & FMODE_READ) {
894 iter->pg = ftrace_pages_start;
896 iter->flags = FTRACE_ITER_FILTER;
898 ret = seq_open(file, &show_ftrace_seq_ops);
900 struct seq_file *m = file->private_data;
905 file->private_data = iter;
906 mutex_unlock(&ftrace_filter_lock);
912 ftrace_filter_read(struct file *file, char __user *ubuf,
913 size_t cnt, loff_t *ppos)
915 if (file->f_mode & FMODE_READ)
916 return seq_read(file, ubuf, cnt, ppos);
922 ftrace_filter_lseek(struct file *file, loff_t offset, int origin)
926 if (file->f_mode & FMODE_READ)
927 ret = seq_lseek(file, offset, origin);
929 file->f_pos = ret = 1;
942 ftrace_match(unsigned char *buff, int len)
944 char str[KSYM_SYMBOL_LEN];
946 struct ftrace_page *pg;
947 struct dyn_ftrace *rec;
948 int type = MATCH_FULL;
949 unsigned i, match = 0, search_len = 0;
951 for (i = 0; i < len; i++) {
952 if (buff[i] == '*') {
954 search = buff + i + 1;
955 type = MATCH_END_ONLY;
956 search_len = len - (i + 1);
958 if (type == MATCH_END_ONLY) {
959 type = MATCH_MIDDLE_ONLY;
962 type = MATCH_FRONT_ONLY;
970 /* keep kstop machine from running */
973 pg = ftrace_pages_start;
975 for (i = 0; i < pg->index; i++) {
979 rec = &pg->records[i];
980 if (rec->flags & FTRACE_FL_FAILED)
982 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
985 if (strcmp(str, buff) == 0)
988 case MATCH_FRONT_ONLY:
989 if (memcmp(str, buff, match) == 0)
992 case MATCH_MIDDLE_ONLY:
993 if (strstr(str, search))
997 ptr = strstr(str, search);
998 if (ptr && (ptr[search_len] == 0))
1003 rec->flags |= FTRACE_FL_FILTER;
1011 ftrace_filter_write(struct file *file, const char __user *ubuf,
1012 size_t cnt, loff_t *ppos)
1014 struct ftrace_iterator *iter;
1019 if (!cnt || cnt < 0)
1022 mutex_lock(&ftrace_filter_lock);
1024 if (file->f_mode & FMODE_READ) {
1025 struct seq_file *m = file->private_data;
1028 iter = file->private_data;
1031 iter->flags &= ~FTRACE_ITER_CONT;
1032 iter->buffer_idx = 0;
1035 ret = get_user(ch, ubuf++);
1041 if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1042 /* skip white space */
1043 while (cnt && isspace(ch)) {
1044 ret = get_user(ch, ubuf++);
1053 file->f_pos += read;
1058 iter->buffer_idx = 0;
1061 while (cnt && !isspace(ch)) {
1062 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1063 iter->buffer[iter->buffer_idx++] = ch;
1068 ret = get_user(ch, ubuf++);
1077 iter->buffer[iter->buffer_idx] = 0;
1078 ftrace_match(iter->buffer, iter->buffer_idx);
1079 iter->buffer_idx = 0;
1081 iter->flags |= FTRACE_ITER_CONT;
1084 file->f_pos += read;
1088 mutex_unlock(&ftrace_filter_lock);
1094 * ftrace_set_filter - set a function to filter on in ftrace
1095 * @buf - the string that holds the function filter text.
1096 * @len - the length of the string.
1097 * @reset - non zero to reset all filters before applying this filter.
1099 * Filters denote which functions should be enabled when tracing is enabled.
1100 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1102 void ftrace_set_filter(unsigned char *buf, int len, int reset)
1104 if (unlikely(ftrace_disabled))
1107 mutex_lock(&ftrace_filter_lock);
1109 ftrace_filter_reset();
1111 ftrace_match(buf, len);
1112 mutex_unlock(&ftrace_filter_lock);
1116 ftrace_filter_release(struct inode *inode, struct file *file)
1118 struct seq_file *m = (struct seq_file *)file->private_data;
1119 struct ftrace_iterator *iter;
1121 mutex_lock(&ftrace_filter_lock);
1122 if (file->f_mode & FMODE_READ) {
1125 seq_release(inode, file);
1127 iter = file->private_data;
1129 if (iter->buffer_idx) {
1131 iter->buffer[iter->buffer_idx] = 0;
1132 ftrace_match(iter->buffer, iter->buffer_idx);
1135 mutex_lock(&ftrace_sysctl_lock);
1136 mutex_lock(&ftraced_lock);
1137 if (iter->filtered && ftraced_suspend && ftrace_enabled)
1138 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1139 mutex_unlock(&ftraced_lock);
1140 mutex_unlock(&ftrace_sysctl_lock);
1143 mutex_unlock(&ftrace_filter_lock);
1147 static struct file_operations ftrace_avail_fops = {
1148 .open = ftrace_avail_open,
1150 .llseek = seq_lseek,
1151 .release = ftrace_avail_release,
1154 static struct file_operations ftrace_filter_fops = {
1155 .open = ftrace_filter_open,
1156 .read = ftrace_filter_read,
1157 .write = ftrace_filter_write,
1158 .llseek = ftrace_filter_lseek,
1159 .release = ftrace_filter_release,
1163 * ftrace_force_update - force an update to all recording ftrace functions
1165 * The ftrace dynamic update daemon only wakes up once a second.
1166 * There may be cases where an update needs to be done immediately
1167 * for tests or internal kernel tracing to begin. This function
1168 * wakes the daemon to do an update and will not return until the
1169 * update is complete.
1171 int ftrace_force_update(void)
1173 unsigned long last_counter;
1174 DECLARE_WAITQUEUE(wait, current);
1177 if (unlikely(ftrace_disabled))
1180 mutex_lock(&ftraced_lock);
1181 last_counter = ftraced_iteration_counter;
1183 set_current_state(TASK_INTERRUPTIBLE);
1184 add_wait_queue(&ftraced_waiters, &wait);
1186 if (unlikely(!ftraced_task)) {
1192 mutex_unlock(&ftraced_lock);
1193 wake_up_process(ftraced_task);
1195 mutex_lock(&ftraced_lock);
1196 if (signal_pending(current)) {
1200 set_current_state(TASK_INTERRUPTIBLE);
1201 } while (last_counter == ftraced_iteration_counter);
1204 mutex_unlock(&ftraced_lock);
1205 remove_wait_queue(&ftraced_waiters, &wait);
1206 set_current_state(TASK_RUNNING);
1211 static void ftrace_force_shutdown(void)
1213 struct task_struct *task;
1214 int command = FTRACE_DISABLE_CALLS | FTRACE_UPDATE_TRACE_FUNC;
1216 mutex_lock(&ftraced_lock);
1217 task = ftraced_task;
1218 ftraced_task = NULL;
1219 ftraced_suspend = -1;
1220 ftrace_run_update_code(command);
1221 mutex_unlock(&ftraced_lock);
1227 static __init int ftrace_init_debugfs(void)
1229 struct dentry *d_tracer;
1230 struct dentry *entry;
1232 d_tracer = tracing_init_dentry();
1234 entry = debugfs_create_file("available_filter_functions", 0444,
1235 d_tracer, NULL, &ftrace_avail_fops);
1237 pr_warning("Could not create debugfs "
1238 "'available_filter_functions' entry\n");
1240 entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1241 NULL, &ftrace_filter_fops);
1243 pr_warning("Could not create debugfs "
1244 "'set_ftrace_filter' entry\n");
1248 fs_initcall(ftrace_init_debugfs);
1250 static int __init ftrace_dynamic_init(void)
1252 struct task_struct *p;
1256 addr = (unsigned long)ftrace_record_ip;
1258 stop_machine_run(ftrace_dyn_arch_init, &addr, NR_CPUS);
1260 /* ftrace_dyn_arch_init places the return code in addr */
1266 ret = ftrace_dyn_table_alloc();
1270 p = kthread_run(ftraced, NULL, "ftraced");
1276 last_ftrace_enabled = ftrace_enabled = 1;
1282 ftrace_disabled = 1;
1286 core_initcall(ftrace_dynamic_init);
1288 # define ftrace_startup() do { } while (0)
1289 # define ftrace_shutdown() do { } while (0)
1290 # define ftrace_startup_sysctl() do { } while (0)
1291 # define ftrace_shutdown_sysctl() do { } while (0)
1292 # define ftrace_force_shutdown() do { } while (0)
1293 #endif /* CONFIG_DYNAMIC_FTRACE */
1296 * ftrace_kill - totally shutdown ftrace
1298 * This is a safety measure. If something was detected that seems
1299 * wrong, calling this function will keep ftrace from doing
1300 * any more modifications, and updates.
1301 * used when something went wrong.
1303 void ftrace_kill(void)
1305 mutex_lock(&ftrace_sysctl_lock);
1306 ftrace_disabled = 1;
1309 clear_ftrace_function();
1310 mutex_unlock(&ftrace_sysctl_lock);
1312 /* Try to totally disable ftrace */
1313 ftrace_force_shutdown();
1317 * register_ftrace_function - register a function for profiling
1318 * @ops - ops structure that holds the function for profiling.
1320 * Register a function to be called by all functions in the
1323 * Note: @ops->func and all the functions it calls must be labeled
1324 * with "notrace", otherwise it will go into a
1327 int register_ftrace_function(struct ftrace_ops *ops)
1331 if (unlikely(ftrace_disabled))
1334 mutex_lock(&ftrace_sysctl_lock);
1335 ret = __register_ftrace_function(ops);
1337 mutex_unlock(&ftrace_sysctl_lock);
1343 * unregister_ftrace_function - unresgister a function for profiling.
1344 * @ops - ops structure that holds the function to unregister
1346 * Unregister a function that was added to be called by ftrace profiling.
1348 int unregister_ftrace_function(struct ftrace_ops *ops)
1352 mutex_lock(&ftrace_sysctl_lock);
1353 ret = __unregister_ftrace_function(ops);
1355 mutex_unlock(&ftrace_sysctl_lock);
1361 ftrace_enable_sysctl(struct ctl_table *table, int write,
1362 struct file *file, void __user *buffer, size_t *lenp,
1367 if (unlikely(ftrace_disabled))
1370 mutex_lock(&ftrace_sysctl_lock);
1372 ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
1374 if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
1377 last_ftrace_enabled = ftrace_enabled;
1379 if (ftrace_enabled) {
1381 ftrace_startup_sysctl();
1383 /* we are starting ftrace again */
1384 if (ftrace_list != &ftrace_list_end) {
1385 if (ftrace_list->next == &ftrace_list_end)
1386 ftrace_trace_function = ftrace_list->func;
1388 ftrace_trace_function = ftrace_list_func;
1392 /* stopping ftrace calls (just send to ftrace_stub) */
1393 ftrace_trace_function = ftrace_stub;
1395 ftrace_shutdown_sysctl();
1399 mutex_unlock(&ftrace_sysctl_lock);