ftrace: remove packed attribute on ftrace_page.
[linux-2.6] / kernel / trace / ftrace.c
1 /*
2  * Infrastructure for profiling code inserted by 'gcc -pg'.
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally ported from the -rt patch by:
8  *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code in the latency_tracer, that is:
11  *
12  *  Copyright (C) 2004-2006 Ingo Molnar
13  *  Copyright (C) 2004 William Lee Irwin III
14  */
15
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/debugfs.h>
21 #include <linux/kthread.h>
22 #include <linux/hardirq.h>
23 #include <linux/ftrace.h>
24 #include <linux/uaccess.h>
25 #include <linux/sysctl.h>
26 #include <linux/hash.h>
27 #include <linux/ctype.h>
28 #include <linux/list.h>
29
30 #include "trace.h"
31
32 /* ftrace_enabled is a method to turn ftrace on or off */
33 int ftrace_enabled __read_mostly;
34 static int last_ftrace_enabled;
35
36 /*
37  * ftrace_disabled is set when an anomaly is discovered.
38  * ftrace_disabled is much stronger than ftrace_enabled.
39  */
40 static int ftrace_disabled __read_mostly;
41
42 static DEFINE_SPINLOCK(ftrace_lock);
43 static DEFINE_MUTEX(ftrace_sysctl_lock);
44
45 static struct ftrace_ops ftrace_list_end __read_mostly =
46 {
47         .func = ftrace_stub,
48 };
49
50 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
51 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
52
53 /* mcount is defined per arch in assembly */
54 EXPORT_SYMBOL(mcount);
55
56 void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
57 {
58         struct ftrace_ops *op = ftrace_list;
59
60         /* in case someone actually ports this to alpha! */
61         read_barrier_depends();
62
63         while (op != &ftrace_list_end) {
64                 /* silly alpha */
65                 read_barrier_depends();
66                 op->func(ip, parent_ip);
67                 op = op->next;
68         };
69 }
70
71 /**
72  * clear_ftrace_function - reset the ftrace function
73  *
74  * This NULLs the ftrace function and in essence stops
75  * tracing.  There may be lag
76  */
77 void clear_ftrace_function(void)
78 {
79         ftrace_trace_function = ftrace_stub;
80 }
81
82 static int __register_ftrace_function(struct ftrace_ops *ops)
83 {
84         /* Should never be called by interrupts */
85         spin_lock(&ftrace_lock);
86
87         ops->next = ftrace_list;
88         /*
89          * We are entering ops into the ftrace_list but another
90          * CPU might be walking that list. We need to make sure
91          * the ops->next pointer is valid before another CPU sees
92          * the ops pointer included into the ftrace_list.
93          */
94         smp_wmb();
95         ftrace_list = ops;
96
97         if (ftrace_enabled) {
98                 /*
99                  * For one func, simply call it directly.
100                  * For more than one func, call the chain.
101                  */
102                 if (ops->next == &ftrace_list_end)
103                         ftrace_trace_function = ops->func;
104                 else
105                         ftrace_trace_function = ftrace_list_func;
106         }
107
108         spin_unlock(&ftrace_lock);
109
110         return 0;
111 }
112
113 static int __unregister_ftrace_function(struct ftrace_ops *ops)
114 {
115         struct ftrace_ops **p;
116         int ret = 0;
117
118         spin_lock(&ftrace_lock);
119
120         /*
121          * If we are removing the last function, then simply point
122          * to the ftrace_stub.
123          */
124         if (ftrace_list == ops && ops->next == &ftrace_list_end) {
125                 ftrace_trace_function = ftrace_stub;
126                 ftrace_list = &ftrace_list_end;
127                 goto out;
128         }
129
130         for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
131                 if (*p == ops)
132                         break;
133
134         if (*p != ops) {
135                 ret = -1;
136                 goto out;
137         }
138
139         *p = (*p)->next;
140
141         if (ftrace_enabled) {
142                 /* If we only have one func left, then call that directly */
143                 if (ftrace_list == &ftrace_list_end ||
144                     ftrace_list->next == &ftrace_list_end)
145                         ftrace_trace_function = ftrace_list->func;
146         }
147
148  out:
149         spin_unlock(&ftrace_lock);
150
151         return ret;
152 }
153
154 #ifdef CONFIG_DYNAMIC_FTRACE
155
156 static struct task_struct *ftraced_task;
157 static DECLARE_WAIT_QUEUE_HEAD(ftraced_waiters);
158 static unsigned long ftraced_iteration_counter;
159
160 enum {
161         FTRACE_ENABLE_CALLS             = (1 << 0),
162         FTRACE_DISABLE_CALLS            = (1 << 1),
163         FTRACE_UPDATE_TRACE_FUNC        = (1 << 2),
164         FTRACE_ENABLE_MCOUNT            = (1 << 3),
165         FTRACE_DISABLE_MCOUNT           = (1 << 4),
166 };
167
168 static int ftrace_filtered;
169
170 static struct hlist_head ftrace_hash[FTRACE_HASHSIZE];
171
172 static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu);
173
174 static DEFINE_SPINLOCK(ftrace_shutdown_lock);
175 static DEFINE_MUTEX(ftraced_lock);
176 static DEFINE_MUTEX(ftrace_filter_lock);
177
178 struct ftrace_page {
179         struct ftrace_page      *next;
180         unsigned long           index;
181         struct dyn_ftrace       records[];
182 };
183
184 #define ENTRIES_PER_PAGE \
185   ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
186
187 /* estimate from running different kernels */
188 #define NR_TO_INIT              10000
189
190 static struct ftrace_page       *ftrace_pages_start;
191 static struct ftrace_page       *ftrace_pages;
192
193 static int ftraced_trigger;
194 static int ftraced_suspend;
195
196 static int ftrace_record_suspend;
197
198 static struct dyn_ftrace *ftrace_free_records;
199
200 static inline int
201 ftrace_ip_in_hash(unsigned long ip, unsigned long key)
202 {
203         struct dyn_ftrace *p;
204         struct hlist_node *t;
205         int found = 0;
206
207         hlist_for_each_entry(p, t, &ftrace_hash[key], node) {
208                 if (p->ip == ip) {
209                         found = 1;
210                         break;
211                 }
212         }
213
214         return found;
215 }
216
217 static inline void
218 ftrace_add_hash(struct dyn_ftrace *node, unsigned long key)
219 {
220         hlist_add_head(&node->node, &ftrace_hash[key]);
221 }
222
223 static void ftrace_free_rec(struct dyn_ftrace *rec)
224 {
225         /* no locking, only called from kstop_machine */
226
227         rec->ip = (unsigned long)ftrace_free_records;
228         ftrace_free_records = rec;
229         rec->flags |= FTRACE_FL_FREE;
230 }
231
232 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
233 {
234         struct dyn_ftrace *rec;
235
236         /* First check for freed records */
237         if (ftrace_free_records) {
238                 rec = ftrace_free_records;
239
240                 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
241                         WARN_ON_ONCE(1);
242                         ftrace_free_records = NULL;
243                         ftrace_disabled = 1;
244                         ftrace_enabled = 0;
245                         return NULL;
246                 }
247
248                 ftrace_free_records = (void *)rec->ip;
249                 memset(rec, 0, sizeof(*rec));
250                 return rec;
251         }
252
253         if (ftrace_pages->index == ENTRIES_PER_PAGE) {
254                 if (!ftrace_pages->next)
255                         return NULL;
256                 ftrace_pages = ftrace_pages->next;
257         }
258
259         return &ftrace_pages->records[ftrace_pages->index++];
260 }
261
262 static void
263 ftrace_record_ip(unsigned long ip)
264 {
265         struct dyn_ftrace *node;
266         unsigned long flags;
267         unsigned long key;
268         int resched;
269         int atomic;
270         int cpu;
271
272         if (!ftrace_enabled || ftrace_disabled)
273                 return;
274
275         resched = need_resched();
276         preempt_disable_notrace();
277
278         /*
279          * We simply need to protect against recursion.
280          * Use the the raw version of smp_processor_id and not
281          * __get_cpu_var which can call debug hooks that can
282          * cause a recursive crash here.
283          */
284         cpu = raw_smp_processor_id();
285         per_cpu(ftrace_shutdown_disable_cpu, cpu)++;
286         if (per_cpu(ftrace_shutdown_disable_cpu, cpu) != 1)
287                 goto out;
288
289         if (unlikely(ftrace_record_suspend))
290                 goto out;
291
292         key = hash_long(ip, FTRACE_HASHBITS);
293
294         WARN_ON_ONCE(key >= FTRACE_HASHSIZE);
295
296         if (ftrace_ip_in_hash(ip, key))
297                 goto out;
298
299         atomic = irqs_disabled();
300
301         spin_lock_irqsave(&ftrace_shutdown_lock, flags);
302
303         /* This ip may have hit the hash before the lock */
304         if (ftrace_ip_in_hash(ip, key))
305                 goto out_unlock;
306
307         /*
308          * There's a slight race that the ftraced will update the
309          * hash and reset here. If it is already converted, skip it.
310          */
311         if (ftrace_ip_converted(ip))
312                 goto out_unlock;
313
314         node = ftrace_alloc_dyn_node(ip);
315         if (!node)
316                 goto out_unlock;
317
318         node->ip = ip;
319
320         ftrace_add_hash(node, key);
321
322         ftraced_trigger = 1;
323
324  out_unlock:
325         spin_unlock_irqrestore(&ftrace_shutdown_lock, flags);
326  out:
327         per_cpu(ftrace_shutdown_disable_cpu, cpu)--;
328
329         /* prevent recursion with scheduler */
330         if (resched)
331                 preempt_enable_no_resched_notrace();
332         else
333                 preempt_enable_notrace();
334 }
335
336 #define FTRACE_ADDR ((long)(ftrace_caller))
337 #define MCOUNT_ADDR ((long)(mcount))
338
339 static void
340 __ftrace_replace_code(struct dyn_ftrace *rec,
341                       unsigned char *old, unsigned char *new, int enable)
342 {
343         unsigned long ip;
344         int failed;
345
346         ip = rec->ip;
347
348         if (ftrace_filtered && enable) {
349                 unsigned long fl;
350                 /*
351                  * If filtering is on:
352                  *
353                  * If this record is set to be filtered and
354                  * is enabled then do nothing.
355                  *
356                  * If this record is set to be filtered and
357                  * it is not enabled, enable it.
358                  *
359                  * If this record is not set to be filtered
360                  * and it is not enabled do nothing.
361                  *
362                  * If this record is not set to be filtered and
363                  * it is enabled, disable it.
364                  */
365                 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
366
367                 if ((fl ==  (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) ||
368                     (fl == 0))
369                         return;
370
371                 /*
372                  * If it is enabled disable it,
373                  * otherwise enable it!
374                  */
375                 if (fl == FTRACE_FL_ENABLED) {
376                         /* swap new and old */
377                         new = old;
378                         old = ftrace_call_replace(ip, FTRACE_ADDR);
379                         rec->flags &= ~FTRACE_FL_ENABLED;
380                 } else {
381                         new = ftrace_call_replace(ip, FTRACE_ADDR);
382                         rec->flags |= FTRACE_FL_ENABLED;
383                 }
384         } else {
385
386                 if (enable)
387                         new = ftrace_call_replace(ip, FTRACE_ADDR);
388                 else
389                         old = ftrace_call_replace(ip, FTRACE_ADDR);
390
391                 if (enable) {
392                         if (rec->flags & FTRACE_FL_ENABLED)
393                                 return;
394                         rec->flags |= FTRACE_FL_ENABLED;
395                 } else {
396                         if (!(rec->flags & FTRACE_FL_ENABLED))
397                                 return;
398                         rec->flags &= ~FTRACE_FL_ENABLED;
399                 }
400         }
401
402         failed = ftrace_modify_code(ip, old, new);
403         if (failed) {
404                 unsigned long key;
405                 /* It is possible that the function hasn't been converted yet */
406                 key = hash_long(ip, FTRACE_HASHBITS);
407                 if (!ftrace_ip_in_hash(ip, key)) {
408                         rec->flags |= FTRACE_FL_FAILED;
409                         ftrace_free_rec(rec);
410                 }
411
412         }
413 }
414
415 static void ftrace_replace_code(int enable)
416 {
417         unsigned char *new = NULL, *old = NULL;
418         struct dyn_ftrace *rec;
419         struct ftrace_page *pg;
420         int i;
421
422         if (enable)
423                 old = ftrace_nop_replace();
424         else
425                 new = ftrace_nop_replace();
426
427         for (pg = ftrace_pages_start; pg; pg = pg->next) {
428                 for (i = 0; i < pg->index; i++) {
429                         rec = &pg->records[i];
430
431                         /* don't modify code that has already faulted */
432                         if (rec->flags & FTRACE_FL_FAILED)
433                                 continue;
434
435                         __ftrace_replace_code(rec, old, new, enable);
436                 }
437         }
438 }
439
440 static void ftrace_shutdown_replenish(void)
441 {
442         if (ftrace_pages->next)
443                 return;
444
445         /* allocate another page */
446         ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL);
447 }
448
449 static void
450 ftrace_code_disable(struct dyn_ftrace *rec)
451 {
452         unsigned long ip;
453         unsigned char *nop, *call;
454         int failed;
455
456         ip = rec->ip;
457
458         nop = ftrace_nop_replace();
459         call = ftrace_call_replace(ip, MCOUNT_ADDR);
460
461         failed = ftrace_modify_code(ip, call, nop);
462         if (failed) {
463                 rec->flags |= FTRACE_FL_FAILED;
464                 ftrace_free_rec(rec);
465         }
466 }
467
468 static int __ftrace_modify_code(void *data)
469 {
470         unsigned long addr;
471         int *command = data;
472
473         if (*command & FTRACE_ENABLE_CALLS)
474                 ftrace_replace_code(1);
475         else if (*command & FTRACE_DISABLE_CALLS)
476                 ftrace_replace_code(0);
477
478         if (*command & FTRACE_UPDATE_TRACE_FUNC)
479                 ftrace_update_ftrace_func(ftrace_trace_function);
480
481         if (*command & FTRACE_ENABLE_MCOUNT) {
482                 addr = (unsigned long)ftrace_record_ip;
483                 ftrace_mcount_set(&addr);
484         } else if (*command & FTRACE_DISABLE_MCOUNT) {
485                 addr = (unsigned long)ftrace_stub;
486                 ftrace_mcount_set(&addr);
487         }
488
489         return 0;
490 }
491
492 static void ftrace_run_update_code(int command)
493 {
494         stop_machine_run(__ftrace_modify_code, &command, NR_CPUS);
495 }
496
497 static ftrace_func_t saved_ftrace_func;
498
499 static void ftrace_startup(void)
500 {
501         int command = 0;
502
503         if (unlikely(ftrace_disabled))
504                 return;
505
506         mutex_lock(&ftraced_lock);
507         ftraced_suspend++;
508         if (ftraced_suspend == 1)
509                 command |= FTRACE_ENABLE_CALLS;
510
511         if (saved_ftrace_func != ftrace_trace_function) {
512                 saved_ftrace_func = ftrace_trace_function;
513                 command |= FTRACE_UPDATE_TRACE_FUNC;
514         }
515
516         if (!command || !ftrace_enabled)
517                 goto out;
518
519         ftrace_run_update_code(command);
520  out:
521         mutex_unlock(&ftraced_lock);
522 }
523
524 static void ftrace_shutdown(void)
525 {
526         int command = 0;
527
528         if (unlikely(ftrace_disabled))
529                 return;
530
531         mutex_lock(&ftraced_lock);
532         ftraced_suspend--;
533         if (!ftraced_suspend)
534                 command |= FTRACE_DISABLE_CALLS;
535
536         if (saved_ftrace_func != ftrace_trace_function) {
537                 saved_ftrace_func = ftrace_trace_function;
538                 command |= FTRACE_UPDATE_TRACE_FUNC;
539         }
540
541         if (!command || !ftrace_enabled)
542                 goto out;
543
544         ftrace_run_update_code(command);
545  out:
546         mutex_unlock(&ftraced_lock);
547 }
548
549 static void ftrace_startup_sysctl(void)
550 {
551         int command = FTRACE_ENABLE_MCOUNT;
552
553         if (unlikely(ftrace_disabled))
554                 return;
555
556         mutex_lock(&ftraced_lock);
557         /* Force update next time */
558         saved_ftrace_func = NULL;
559         /* ftraced_suspend is true if we want ftrace running */
560         if (ftraced_suspend)
561                 command |= FTRACE_ENABLE_CALLS;
562
563         ftrace_run_update_code(command);
564         mutex_unlock(&ftraced_lock);
565 }
566
567 static void ftrace_shutdown_sysctl(void)
568 {
569         int command = FTRACE_DISABLE_MCOUNT;
570
571         if (unlikely(ftrace_disabled))
572                 return;
573
574         mutex_lock(&ftraced_lock);
575         /* ftraced_suspend is true if ftrace is running */
576         if (ftraced_suspend)
577                 command |= FTRACE_DISABLE_CALLS;
578
579         ftrace_run_update_code(command);
580         mutex_unlock(&ftraced_lock);
581 }
582
583 static cycle_t          ftrace_update_time;
584 static unsigned long    ftrace_update_cnt;
585 unsigned long           ftrace_update_tot_cnt;
586
587 static int __ftrace_update_code(void *ignore)
588 {
589         struct dyn_ftrace *p;
590         struct hlist_head head;
591         struct hlist_node *t;
592         int save_ftrace_enabled;
593         cycle_t start, stop;
594         int i;
595
596         /* Don't be recording funcs now */
597         save_ftrace_enabled = ftrace_enabled;
598         ftrace_enabled = 0;
599
600         start = ftrace_now(raw_smp_processor_id());
601         ftrace_update_cnt = 0;
602
603         /* No locks needed, the machine is stopped! */
604         for (i = 0; i < FTRACE_HASHSIZE; i++) {
605                 if (hlist_empty(&ftrace_hash[i]))
606                         continue;
607
608                 head = ftrace_hash[i];
609                 INIT_HLIST_HEAD(&ftrace_hash[i]);
610
611                 /* all CPUS are stopped, we are safe to modify code */
612                 hlist_for_each_entry(p, t, &head, node) {
613                         ftrace_code_disable(p);
614                         ftrace_update_cnt++;
615                 }
616
617         }
618
619         stop = ftrace_now(raw_smp_processor_id());
620         ftrace_update_time = stop - start;
621         ftrace_update_tot_cnt += ftrace_update_cnt;
622
623         ftrace_enabled = save_ftrace_enabled;
624
625         return 0;
626 }
627
628 static void ftrace_update_code(void)
629 {
630         if (unlikely(ftrace_disabled))
631                 return;
632
633         stop_machine_run(__ftrace_update_code, NULL, NR_CPUS);
634 }
635
636 static int ftraced(void *ignore)
637 {
638         unsigned long usecs;
639
640         while (!kthread_should_stop()) {
641
642                 set_current_state(TASK_INTERRUPTIBLE);
643
644                 /* check once a second */
645                 schedule_timeout(HZ);
646
647                 if (unlikely(ftrace_disabled))
648                         continue;
649
650                 mutex_lock(&ftrace_sysctl_lock);
651                 mutex_lock(&ftraced_lock);
652                 if (ftrace_enabled && ftraced_trigger && !ftraced_suspend) {
653                         ftrace_record_suspend++;
654                         ftrace_update_code();
655                         usecs = nsecs_to_usecs(ftrace_update_time);
656                         if (ftrace_update_tot_cnt > 100000) {
657                                 ftrace_update_tot_cnt = 0;
658                                 pr_info("hm, dftrace overflow: %lu change%s"
659                                          " (%lu total) in %lu usec%s\n",
660                                         ftrace_update_cnt,
661                                         ftrace_update_cnt != 1 ? "s" : "",
662                                         ftrace_update_tot_cnt,
663                                         usecs, usecs != 1 ? "s" : "");
664                                 ftrace_disabled = 1;
665                                 WARN_ON_ONCE(1);
666                         }
667                         ftraced_trigger = 0;
668                         ftrace_record_suspend--;
669                 }
670                 ftraced_iteration_counter++;
671                 mutex_unlock(&ftraced_lock);
672                 mutex_unlock(&ftrace_sysctl_lock);
673
674                 wake_up_interruptible(&ftraced_waiters);
675
676                 ftrace_shutdown_replenish();
677         }
678         __set_current_state(TASK_RUNNING);
679         return 0;
680 }
681
682 static int __init ftrace_dyn_table_alloc(void)
683 {
684         struct ftrace_page *pg;
685         int cnt;
686         int i;
687
688         /* allocate a few pages */
689         ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
690         if (!ftrace_pages_start)
691                 return -1;
692
693         /*
694          * Allocate a few more pages.
695          *
696          * TODO: have some parser search vmlinux before
697          *   final linking to find all calls to ftrace.
698          *   Then we can:
699          *    a) know how many pages to allocate.
700          *     and/or
701          *    b) set up the table then.
702          *
703          *  The dynamic code is still necessary for
704          *  modules.
705          */
706
707         pg = ftrace_pages = ftrace_pages_start;
708
709         cnt = NR_TO_INIT / ENTRIES_PER_PAGE;
710
711         for (i = 0; i < cnt; i++) {
712                 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
713
714                 /* If we fail, we'll try later anyway */
715                 if (!pg->next)
716                         break;
717
718                 pg = pg->next;
719         }
720
721         return 0;
722 }
723
724 enum {
725         FTRACE_ITER_FILTER      = (1 << 0),
726         FTRACE_ITER_CONT        = (1 << 1),
727 };
728
729 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
730
731 struct ftrace_iterator {
732         loff_t                  pos;
733         struct ftrace_page      *pg;
734         unsigned                idx;
735         unsigned                flags;
736         unsigned char           buffer[FTRACE_BUFF_MAX+1];
737         unsigned                buffer_idx;
738         unsigned                filtered;
739 };
740
741 static void *
742 t_next(struct seq_file *m, void *v, loff_t *pos)
743 {
744         struct ftrace_iterator *iter = m->private;
745         struct dyn_ftrace *rec = NULL;
746
747         (*pos)++;
748
749  retry:
750         if (iter->idx >= iter->pg->index) {
751                 if (iter->pg->next) {
752                         iter->pg = iter->pg->next;
753                         iter->idx = 0;
754                         goto retry;
755                 }
756         } else {
757                 rec = &iter->pg->records[iter->idx++];
758                 if ((rec->flags & FTRACE_FL_FAILED) ||
759                     ((iter->flags & FTRACE_ITER_FILTER) &&
760                      !(rec->flags & FTRACE_FL_FILTER))) {
761                         rec = NULL;
762                         goto retry;
763                 }
764         }
765
766         iter->pos = *pos;
767
768         return rec;
769 }
770
771 static void *t_start(struct seq_file *m, loff_t *pos)
772 {
773         struct ftrace_iterator *iter = m->private;
774         void *p = NULL;
775         loff_t l = -1;
776
777         if (*pos != iter->pos) {
778                 for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l))
779                         ;
780         } else {
781                 l = *pos;
782                 p = t_next(m, p, &l);
783         }
784
785         return p;
786 }
787
788 static void t_stop(struct seq_file *m, void *p)
789 {
790 }
791
792 static int t_show(struct seq_file *m, void *v)
793 {
794         struct dyn_ftrace *rec = v;
795         char str[KSYM_SYMBOL_LEN];
796
797         if (!rec)
798                 return 0;
799
800         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
801
802         seq_printf(m, "%s\n", str);
803
804         return 0;
805 }
806
807 static struct seq_operations show_ftrace_seq_ops = {
808         .start = t_start,
809         .next = t_next,
810         .stop = t_stop,
811         .show = t_show,
812 };
813
814 static int
815 ftrace_avail_open(struct inode *inode, struct file *file)
816 {
817         struct ftrace_iterator *iter;
818         int ret;
819
820         if (unlikely(ftrace_disabled))
821                 return -ENODEV;
822
823         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
824         if (!iter)
825                 return -ENOMEM;
826
827         iter->pg = ftrace_pages_start;
828         iter->pos = -1;
829
830         ret = seq_open(file, &show_ftrace_seq_ops);
831         if (!ret) {
832                 struct seq_file *m = file->private_data;
833
834                 m->private = iter;
835         } else {
836                 kfree(iter);
837         }
838
839         return ret;
840 }
841
842 int ftrace_avail_release(struct inode *inode, struct file *file)
843 {
844         struct seq_file *m = (struct seq_file *)file->private_data;
845         struct ftrace_iterator *iter = m->private;
846
847         seq_release(inode, file);
848         kfree(iter);
849
850         return 0;
851 }
852
853 static void ftrace_filter_reset(void)
854 {
855         struct ftrace_page *pg;
856         struct dyn_ftrace *rec;
857         unsigned i;
858
859         /* keep kstop machine from running */
860         preempt_disable();
861         ftrace_filtered = 0;
862         pg = ftrace_pages_start;
863         while (pg) {
864                 for (i = 0; i < pg->index; i++) {
865                         rec = &pg->records[i];
866                         if (rec->flags & FTRACE_FL_FAILED)
867                                 continue;
868                         rec->flags &= ~FTRACE_FL_FILTER;
869                 }
870                 pg = pg->next;
871         }
872         preempt_enable();
873 }
874
875 static int
876 ftrace_filter_open(struct inode *inode, struct file *file)
877 {
878         struct ftrace_iterator *iter;
879         int ret = 0;
880
881         if (unlikely(ftrace_disabled))
882                 return -ENODEV;
883
884         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
885         if (!iter)
886                 return -ENOMEM;
887
888         mutex_lock(&ftrace_filter_lock);
889         if ((file->f_mode & FMODE_WRITE) &&
890             !(file->f_flags & O_APPEND))
891                 ftrace_filter_reset();
892
893         if (file->f_mode & FMODE_READ) {
894                 iter->pg = ftrace_pages_start;
895                 iter->pos = -1;
896                 iter->flags = FTRACE_ITER_FILTER;
897
898                 ret = seq_open(file, &show_ftrace_seq_ops);
899                 if (!ret) {
900                         struct seq_file *m = file->private_data;
901                         m->private = iter;
902                 } else
903                         kfree(iter);
904         } else
905                 file->private_data = iter;
906         mutex_unlock(&ftrace_filter_lock);
907
908         return ret;
909 }
910
911 static ssize_t
912 ftrace_filter_read(struct file *file, char __user *ubuf,
913                        size_t cnt, loff_t *ppos)
914 {
915         if (file->f_mode & FMODE_READ)
916                 return seq_read(file, ubuf, cnt, ppos);
917         else
918                 return -EPERM;
919 }
920
921 static loff_t
922 ftrace_filter_lseek(struct file *file, loff_t offset, int origin)
923 {
924         loff_t ret;
925
926         if (file->f_mode & FMODE_READ)
927                 ret = seq_lseek(file, offset, origin);
928         else
929                 file->f_pos = ret = 1;
930
931         return ret;
932 }
933
934 enum {
935         MATCH_FULL,
936         MATCH_FRONT_ONLY,
937         MATCH_MIDDLE_ONLY,
938         MATCH_END_ONLY,
939 };
940
941 static void
942 ftrace_match(unsigned char *buff, int len)
943 {
944         char str[KSYM_SYMBOL_LEN];
945         char *search = NULL;
946         struct ftrace_page *pg;
947         struct dyn_ftrace *rec;
948         int type = MATCH_FULL;
949         unsigned i, match = 0, search_len = 0;
950
951         for (i = 0; i < len; i++) {
952                 if (buff[i] == '*') {
953                         if (!i) {
954                                 search = buff + i + 1;
955                                 type = MATCH_END_ONLY;
956                                 search_len = len - (i + 1);
957                         } else {
958                                 if (type == MATCH_END_ONLY) {
959                                         type = MATCH_MIDDLE_ONLY;
960                                 } else {
961                                         match = i;
962                                         type = MATCH_FRONT_ONLY;
963                                 }
964                                 buff[i] = 0;
965                                 break;
966                         }
967                 }
968         }
969
970         /* keep kstop machine from running */
971         preempt_disable();
972         ftrace_filtered = 1;
973         pg = ftrace_pages_start;
974         while (pg) {
975                 for (i = 0; i < pg->index; i++) {
976                         int matched = 0;
977                         char *ptr;
978
979                         rec = &pg->records[i];
980                         if (rec->flags & FTRACE_FL_FAILED)
981                                 continue;
982                         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
983                         switch (type) {
984                         case MATCH_FULL:
985                                 if (strcmp(str, buff) == 0)
986                                         matched = 1;
987                                 break;
988                         case MATCH_FRONT_ONLY:
989                                 if (memcmp(str, buff, match) == 0)
990                                         matched = 1;
991                                 break;
992                         case MATCH_MIDDLE_ONLY:
993                                 if (strstr(str, search))
994                                         matched = 1;
995                                 break;
996                         case MATCH_END_ONLY:
997                                 ptr = strstr(str, search);
998                                 if (ptr && (ptr[search_len] == 0))
999                                         matched = 1;
1000                                 break;
1001                         }
1002                         if (matched)
1003                                 rec->flags |= FTRACE_FL_FILTER;
1004                 }
1005                 pg = pg->next;
1006         }
1007         preempt_enable();
1008 }
1009
1010 static ssize_t
1011 ftrace_filter_write(struct file *file, const char __user *ubuf,
1012                     size_t cnt, loff_t *ppos)
1013 {
1014         struct ftrace_iterator *iter;
1015         char ch;
1016         size_t read = 0;
1017         ssize_t ret;
1018
1019         if (!cnt || cnt < 0)
1020                 return 0;
1021
1022         mutex_lock(&ftrace_filter_lock);
1023
1024         if (file->f_mode & FMODE_READ) {
1025                 struct seq_file *m = file->private_data;
1026                 iter = m->private;
1027         } else
1028                 iter = file->private_data;
1029
1030         if (!*ppos) {
1031                 iter->flags &= ~FTRACE_ITER_CONT;
1032                 iter->buffer_idx = 0;
1033         }
1034
1035         ret = get_user(ch, ubuf++);
1036         if (ret)
1037                 goto out;
1038         read++;
1039         cnt--;
1040
1041         if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1042                 /* skip white space */
1043                 while (cnt && isspace(ch)) {
1044                         ret = get_user(ch, ubuf++);
1045                         if (ret)
1046                                 goto out;
1047                         read++;
1048                         cnt--;
1049                 }
1050
1051
1052                 if (isspace(ch)) {
1053                         file->f_pos += read;
1054                         ret = read;
1055                         goto out;
1056                 }
1057
1058                 iter->buffer_idx = 0;
1059         }
1060
1061         while (cnt && !isspace(ch)) {
1062                 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1063                         iter->buffer[iter->buffer_idx++] = ch;
1064                 else {
1065                         ret = -EINVAL;
1066                         goto out;
1067                 }
1068                 ret = get_user(ch, ubuf++);
1069                 if (ret)
1070                         goto out;
1071                 read++;
1072                 cnt--;
1073         }
1074
1075         if (isspace(ch)) {
1076                 iter->filtered++;
1077                 iter->buffer[iter->buffer_idx] = 0;
1078                 ftrace_match(iter->buffer, iter->buffer_idx);
1079                 iter->buffer_idx = 0;
1080         } else
1081                 iter->flags |= FTRACE_ITER_CONT;
1082
1083
1084         file->f_pos += read;
1085
1086         ret = read;
1087  out:
1088         mutex_unlock(&ftrace_filter_lock);
1089
1090         return ret;
1091 }
1092
1093 /**
1094  * ftrace_set_filter - set a function to filter on in ftrace
1095  * @buf - the string that holds the function filter text.
1096  * @len - the length of the string.
1097  * @reset - non zero to reset all filters before applying this filter.
1098  *
1099  * Filters denote which functions should be enabled when tracing is enabled.
1100  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1101  */
1102 void ftrace_set_filter(unsigned char *buf, int len, int reset)
1103 {
1104         if (unlikely(ftrace_disabled))
1105                 return;
1106
1107         mutex_lock(&ftrace_filter_lock);
1108         if (reset)
1109                 ftrace_filter_reset();
1110         if (buf)
1111                 ftrace_match(buf, len);
1112         mutex_unlock(&ftrace_filter_lock);
1113 }
1114
1115 static int
1116 ftrace_filter_release(struct inode *inode, struct file *file)
1117 {
1118         struct seq_file *m = (struct seq_file *)file->private_data;
1119         struct ftrace_iterator *iter;
1120
1121         mutex_lock(&ftrace_filter_lock);
1122         if (file->f_mode & FMODE_READ) {
1123                 iter = m->private;
1124
1125                 seq_release(inode, file);
1126         } else
1127                 iter = file->private_data;
1128
1129         if (iter->buffer_idx) {
1130                 iter->filtered++;
1131                 iter->buffer[iter->buffer_idx] = 0;
1132                 ftrace_match(iter->buffer, iter->buffer_idx);
1133         }
1134
1135         mutex_lock(&ftrace_sysctl_lock);
1136         mutex_lock(&ftraced_lock);
1137         if (iter->filtered && ftraced_suspend && ftrace_enabled)
1138                 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1139         mutex_unlock(&ftraced_lock);
1140         mutex_unlock(&ftrace_sysctl_lock);
1141
1142         kfree(iter);
1143         mutex_unlock(&ftrace_filter_lock);
1144         return 0;
1145 }
1146
1147 static struct file_operations ftrace_avail_fops = {
1148         .open = ftrace_avail_open,
1149         .read = seq_read,
1150         .llseek = seq_lseek,
1151         .release = ftrace_avail_release,
1152 };
1153
1154 static struct file_operations ftrace_filter_fops = {
1155         .open = ftrace_filter_open,
1156         .read = ftrace_filter_read,
1157         .write = ftrace_filter_write,
1158         .llseek = ftrace_filter_lseek,
1159         .release = ftrace_filter_release,
1160 };
1161
1162 /**
1163  * ftrace_force_update - force an update to all recording ftrace functions
1164  *
1165  * The ftrace dynamic update daemon only wakes up once a second.
1166  * There may be cases where an update needs to be done immediately
1167  * for tests or internal kernel tracing to begin. This function
1168  * wakes the daemon to do an update and will not return until the
1169  * update is complete.
1170  */
1171 int ftrace_force_update(void)
1172 {
1173         unsigned long last_counter;
1174         DECLARE_WAITQUEUE(wait, current);
1175         int ret = 0;
1176
1177         if (unlikely(ftrace_disabled))
1178                 return -ENODEV;
1179
1180         mutex_lock(&ftraced_lock);
1181         last_counter = ftraced_iteration_counter;
1182
1183         set_current_state(TASK_INTERRUPTIBLE);
1184         add_wait_queue(&ftraced_waiters, &wait);
1185
1186         if (unlikely(!ftraced_task)) {
1187                 ret = -ENODEV;
1188                 goto out;
1189         }
1190
1191         do {
1192                 mutex_unlock(&ftraced_lock);
1193                 wake_up_process(ftraced_task);
1194                 schedule();
1195                 mutex_lock(&ftraced_lock);
1196                 if (signal_pending(current)) {
1197                         ret = -EINTR;
1198                         break;
1199                 }
1200                 set_current_state(TASK_INTERRUPTIBLE);
1201         } while (last_counter == ftraced_iteration_counter);
1202
1203  out:
1204         mutex_unlock(&ftraced_lock);
1205         remove_wait_queue(&ftraced_waiters, &wait);
1206         set_current_state(TASK_RUNNING);
1207
1208         return ret;
1209 }
1210
1211 static void ftrace_force_shutdown(void)
1212 {
1213         struct task_struct *task;
1214         int command = FTRACE_DISABLE_CALLS | FTRACE_UPDATE_TRACE_FUNC;
1215
1216         mutex_lock(&ftraced_lock);
1217         task = ftraced_task;
1218         ftraced_task = NULL;
1219         ftraced_suspend = -1;
1220         ftrace_run_update_code(command);
1221         mutex_unlock(&ftraced_lock);
1222
1223         if (task)
1224                 kthread_stop(task);
1225 }
1226
1227 static __init int ftrace_init_debugfs(void)
1228 {
1229         struct dentry *d_tracer;
1230         struct dentry *entry;
1231
1232         d_tracer = tracing_init_dentry();
1233
1234         entry = debugfs_create_file("available_filter_functions", 0444,
1235                                     d_tracer, NULL, &ftrace_avail_fops);
1236         if (!entry)
1237                 pr_warning("Could not create debugfs "
1238                            "'available_filter_functions' entry\n");
1239
1240         entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1241                                     NULL, &ftrace_filter_fops);
1242         if (!entry)
1243                 pr_warning("Could not create debugfs "
1244                            "'set_ftrace_filter' entry\n");
1245         return 0;
1246 }
1247
1248 fs_initcall(ftrace_init_debugfs);
1249
1250 static int __init ftrace_dynamic_init(void)
1251 {
1252         struct task_struct *p;
1253         unsigned long addr;
1254         int ret;
1255
1256         addr = (unsigned long)ftrace_record_ip;
1257
1258         stop_machine_run(ftrace_dyn_arch_init, &addr, NR_CPUS);
1259
1260         /* ftrace_dyn_arch_init places the return code in addr */
1261         if (addr) {
1262                 ret = (int)addr;
1263                 goto failed;
1264         }
1265
1266         ret = ftrace_dyn_table_alloc();
1267         if (ret)
1268                 goto failed;
1269
1270         p = kthread_run(ftraced, NULL, "ftraced");
1271         if (IS_ERR(p)) {
1272                 ret = -1;
1273                 goto failed;
1274         }
1275
1276         last_ftrace_enabled = ftrace_enabled = 1;
1277         ftraced_task = p;
1278
1279         return 0;
1280
1281  failed:
1282         ftrace_disabled = 1;
1283         return ret;
1284 }
1285
1286 core_initcall(ftrace_dynamic_init);
1287 #else
1288 # define ftrace_startup()               do { } while (0)
1289 # define ftrace_shutdown()              do { } while (0)
1290 # define ftrace_startup_sysctl()        do { } while (0)
1291 # define ftrace_shutdown_sysctl()       do { } while (0)
1292 # define ftrace_force_shutdown()        do { } while (0)
1293 #endif /* CONFIG_DYNAMIC_FTRACE */
1294
1295 /**
1296  * ftrace_kill - totally shutdown ftrace
1297  *
1298  * This is a safety measure. If something was detected that seems
1299  * wrong, calling this function will keep ftrace from doing
1300  * any more modifications, and updates.
1301  * used when something went wrong.
1302  */
1303 void ftrace_kill(void)
1304 {
1305         mutex_lock(&ftrace_sysctl_lock);
1306         ftrace_disabled = 1;
1307         ftrace_enabled = 0;
1308
1309         clear_ftrace_function();
1310         mutex_unlock(&ftrace_sysctl_lock);
1311
1312         /* Try to totally disable ftrace */
1313         ftrace_force_shutdown();
1314 }
1315
1316 /**
1317  * register_ftrace_function - register a function for profiling
1318  * @ops - ops structure that holds the function for profiling.
1319  *
1320  * Register a function to be called by all functions in the
1321  * kernel.
1322  *
1323  * Note: @ops->func and all the functions it calls must be labeled
1324  *       with "notrace", otherwise it will go into a
1325  *       recursive loop.
1326  */
1327 int register_ftrace_function(struct ftrace_ops *ops)
1328 {
1329         int ret;
1330
1331         if (unlikely(ftrace_disabled))
1332                 return -1;
1333
1334         mutex_lock(&ftrace_sysctl_lock);
1335         ret = __register_ftrace_function(ops);
1336         ftrace_startup();
1337         mutex_unlock(&ftrace_sysctl_lock);
1338
1339         return ret;
1340 }
1341
1342 /**
1343  * unregister_ftrace_function - unresgister a function for profiling.
1344  * @ops - ops structure that holds the function to unregister
1345  *
1346  * Unregister a function that was added to be called by ftrace profiling.
1347  */
1348 int unregister_ftrace_function(struct ftrace_ops *ops)
1349 {
1350         int ret;
1351
1352         mutex_lock(&ftrace_sysctl_lock);
1353         ret = __unregister_ftrace_function(ops);
1354         ftrace_shutdown();
1355         mutex_unlock(&ftrace_sysctl_lock);
1356
1357         return ret;
1358 }
1359
1360 int
1361 ftrace_enable_sysctl(struct ctl_table *table, int write,
1362                      struct file *file, void __user *buffer, size_t *lenp,
1363                      loff_t *ppos)
1364 {
1365         int ret;
1366
1367         if (unlikely(ftrace_disabled))
1368                 return -ENODEV;
1369
1370         mutex_lock(&ftrace_sysctl_lock);
1371
1372         ret  = proc_dointvec(table, write, file, buffer, lenp, ppos);
1373
1374         if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
1375                 goto out;
1376
1377         last_ftrace_enabled = ftrace_enabled;
1378
1379         if (ftrace_enabled) {
1380
1381                 ftrace_startup_sysctl();
1382
1383                 /* we are starting ftrace again */
1384                 if (ftrace_list != &ftrace_list_end) {
1385                         if (ftrace_list->next == &ftrace_list_end)
1386                                 ftrace_trace_function = ftrace_list->func;
1387                         else
1388                                 ftrace_trace_function = ftrace_list_func;
1389                 }
1390
1391         } else {
1392                 /* stopping ftrace calls (just send to ftrace_stub) */
1393                 ftrace_trace_function = ftrace_stub;
1394
1395                 ftrace_shutdown_sysctl();
1396         }
1397
1398  out:
1399         mutex_unlock(&ftrace_sysctl_lock);
1400         return ret;
1401 }