Merge branch 'linus' into tracing/ftrace
[linux-2.6] / kernel / trace / ftrace.c
1 /*
2  * Infrastructure for profiling code inserted by 'gcc -pg'.
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally ported from the -rt patch by:
8  *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code in the latency_tracer, that is:
11  *
12  *  Copyright (C) 2004-2006 Ingo Molnar
13  *  Copyright (C) 2004 William Lee Irwin III
14  */
15
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/debugfs.h>
21 #include <linux/hardirq.h>
22 #include <linux/kthread.h>
23 #include <linux/uaccess.h>
24 #include <linux/kprobes.h>
25 #include <linux/ftrace.h>
26 #include <linux/sysctl.h>
27 #include <linux/ctype.h>
28 #include <linux/hash.h>
29 #include <linux/list.h>
30
31 #include <asm/ftrace.h>
32
33 #include "trace.h"
34
35 /* ftrace_enabled is a method to turn ftrace on or off */
36 int ftrace_enabled __read_mostly;
37 static int last_ftrace_enabled;
38
39 /*
40  * ftrace_disabled is set when an anomaly is discovered.
41  * ftrace_disabled is much stronger than ftrace_enabled.
42  */
43 static int ftrace_disabled __read_mostly;
44
45 static DEFINE_SPINLOCK(ftrace_lock);
46 static DEFINE_MUTEX(ftrace_sysctl_lock);
47
48 static struct ftrace_ops ftrace_list_end __read_mostly =
49 {
50         .func = ftrace_stub,
51 };
52
53 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
54 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
55
56 static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
57 {
58         struct ftrace_ops *op = ftrace_list;
59
60         /* in case someone actually ports this to alpha! */
61         read_barrier_depends();
62
63         while (op != &ftrace_list_end) {
64                 /* silly alpha */
65                 read_barrier_depends();
66                 op->func(ip, parent_ip);
67                 op = op->next;
68         };
69 }
70
71 /**
72  * clear_ftrace_function - reset the ftrace function
73  *
74  * This NULLs the ftrace function and in essence stops
75  * tracing.  There may be lag
76  */
77 void clear_ftrace_function(void)
78 {
79         ftrace_trace_function = ftrace_stub;
80 }
81
82 static int __register_ftrace_function(struct ftrace_ops *ops)
83 {
84         /* Should never be called by interrupts */
85         spin_lock(&ftrace_lock);
86
87         ops->next = ftrace_list;
88         /*
89          * We are entering ops into the ftrace_list but another
90          * CPU might be walking that list. We need to make sure
91          * the ops->next pointer is valid before another CPU sees
92          * the ops pointer included into the ftrace_list.
93          */
94         smp_wmb();
95         ftrace_list = ops;
96
97         if (ftrace_enabled) {
98                 /*
99                  * For one func, simply call it directly.
100                  * For more than one func, call the chain.
101                  */
102                 if (ops->next == &ftrace_list_end)
103                         ftrace_trace_function = ops->func;
104                 else
105                         ftrace_trace_function = ftrace_list_func;
106         }
107
108         spin_unlock(&ftrace_lock);
109
110         return 0;
111 }
112
113 static int __unregister_ftrace_function(struct ftrace_ops *ops)
114 {
115         struct ftrace_ops **p;
116         int ret = 0;
117
118         spin_lock(&ftrace_lock);
119
120         /*
121          * If we are removing the last function, then simply point
122          * to the ftrace_stub.
123          */
124         if (ftrace_list == ops && ops->next == &ftrace_list_end) {
125                 ftrace_trace_function = ftrace_stub;
126                 ftrace_list = &ftrace_list_end;
127                 goto out;
128         }
129
130         for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
131                 if (*p == ops)
132                         break;
133
134         if (*p != ops) {
135                 ret = -1;
136                 goto out;
137         }
138
139         *p = (*p)->next;
140
141         if (ftrace_enabled) {
142                 /* If we only have one func left, then call that directly */
143                 if (ftrace_list == &ftrace_list_end ||
144                     ftrace_list->next == &ftrace_list_end)
145                         ftrace_trace_function = ftrace_list->func;
146         }
147
148  out:
149         spin_unlock(&ftrace_lock);
150
151         return ret;
152 }
153
154 #ifdef CONFIG_DYNAMIC_FTRACE
155
156 static struct task_struct *ftraced_task;
157
158 enum {
159         FTRACE_ENABLE_CALLS             = (1 << 0),
160         FTRACE_DISABLE_CALLS            = (1 << 1),
161         FTRACE_UPDATE_TRACE_FUNC        = (1 << 2),
162         FTRACE_ENABLE_MCOUNT            = (1 << 3),
163         FTRACE_DISABLE_MCOUNT           = (1 << 4),
164 };
165
166 static int ftrace_filtered;
167 static int tracing_on;
168 static int frozen_record_count;
169
170 static struct hlist_head ftrace_hash[FTRACE_HASHSIZE];
171
172 static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu);
173
174 static DEFINE_SPINLOCK(ftrace_shutdown_lock);
175 static DEFINE_MUTEX(ftraced_lock);
176 static DEFINE_MUTEX(ftrace_regex_lock);
177
178 struct ftrace_page {
179         struct ftrace_page      *next;
180         unsigned long           index;
181         struct dyn_ftrace       records[];
182 };
183
184 #define ENTRIES_PER_PAGE \
185   ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
186
187 /* estimate from running different kernels */
188 #define NR_TO_INIT              10000
189
190 static struct ftrace_page       *ftrace_pages_start;
191 static struct ftrace_page       *ftrace_pages;
192
193 static int ftraced_trigger;
194 static int ftraced_suspend;
195 static int ftraced_stop;
196
197 static int ftrace_record_suspend;
198
199 static struct dyn_ftrace *ftrace_free_records;
200
201
202 #ifdef CONFIG_KPROBES
203 static inline void freeze_record(struct dyn_ftrace *rec)
204 {
205         if (!(rec->flags & FTRACE_FL_FROZEN)) {
206                 rec->flags |= FTRACE_FL_FROZEN;
207                 frozen_record_count++;
208         }
209 }
210
211 static inline void unfreeze_record(struct dyn_ftrace *rec)
212 {
213         if (rec->flags & FTRACE_FL_FROZEN) {
214                 rec->flags &= ~FTRACE_FL_FROZEN;
215                 frozen_record_count--;
216         }
217 }
218
219 static inline int record_frozen(struct dyn_ftrace *rec)
220 {
221         return rec->flags & FTRACE_FL_FROZEN;
222 }
223 #else
224 # define freeze_record(rec)                     ({ 0; })
225 # define unfreeze_record(rec)                   ({ 0; })
226 # define record_frozen(rec)                     ({ 0; })
227 #endif /* CONFIG_KPROBES */
228
229 int skip_trace(unsigned long ip)
230 {
231         unsigned long fl;
232         struct dyn_ftrace *rec;
233         struct hlist_node *t;
234         struct hlist_head *head;
235
236         if (frozen_record_count == 0)
237                 return 0;
238
239         head = &ftrace_hash[hash_long(ip, FTRACE_HASHBITS)];
240         hlist_for_each_entry_rcu(rec, t, head, node) {
241                 if (rec->ip == ip) {
242                         if (record_frozen(rec)) {
243                                 if (rec->flags & FTRACE_FL_FAILED)
244                                         return 1;
245
246                                 if (!(rec->flags & FTRACE_FL_CONVERTED))
247                                         return 1;
248
249                                 if (!tracing_on || !ftrace_enabled)
250                                         return 1;
251
252                                 if (ftrace_filtered) {
253                                         fl = rec->flags & (FTRACE_FL_FILTER |
254                                                            FTRACE_FL_NOTRACE);
255                                         if (!fl || (fl & FTRACE_FL_NOTRACE))
256                                                 return 1;
257                                 }
258                         }
259                         break;
260                 }
261         }
262
263         return 0;
264 }
265
266 static inline int
267 ftrace_ip_in_hash(unsigned long ip, unsigned long key)
268 {
269         struct dyn_ftrace *p;
270         struct hlist_node *t;
271         int found = 0;
272
273         hlist_for_each_entry_rcu(p, t, &ftrace_hash[key], node) {
274                 if (p->ip == ip) {
275                         found = 1;
276                         break;
277                 }
278         }
279
280         return found;
281 }
282
283 static inline void
284 ftrace_add_hash(struct dyn_ftrace *node, unsigned long key)
285 {
286         hlist_add_head_rcu(&node->node, &ftrace_hash[key]);
287 }
288
289 /* called from kstop_machine */
290 static inline void ftrace_del_hash(struct dyn_ftrace *node)
291 {
292         hlist_del(&node->node);
293 }
294
295 static void ftrace_free_rec(struct dyn_ftrace *rec)
296 {
297         /* no locking, only called from kstop_machine */
298
299         rec->ip = (unsigned long)ftrace_free_records;
300         ftrace_free_records = rec;
301         rec->flags |= FTRACE_FL_FREE;
302 }
303
304 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
305 {
306         struct dyn_ftrace *rec;
307
308         /* First check for freed records */
309         if (ftrace_free_records) {
310                 rec = ftrace_free_records;
311
312                 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
313                         WARN_ON_ONCE(1);
314                         ftrace_free_records = NULL;
315                         ftrace_disabled = 1;
316                         ftrace_enabled = 0;
317                         return NULL;
318                 }
319
320                 ftrace_free_records = (void *)rec->ip;
321                 memset(rec, 0, sizeof(*rec));
322                 return rec;
323         }
324
325         if (ftrace_pages->index == ENTRIES_PER_PAGE) {
326                 if (!ftrace_pages->next)
327                         return NULL;
328                 ftrace_pages = ftrace_pages->next;
329         }
330
331         return &ftrace_pages->records[ftrace_pages->index++];
332 }
333
334 static void
335 ftrace_record_ip(unsigned long ip)
336 {
337         struct dyn_ftrace *node;
338         unsigned long flags;
339         unsigned long key;
340         int resched;
341         int atomic;
342         int cpu;
343
344         if (!ftrace_enabled || ftrace_disabled)
345                 return;
346
347         resched = need_resched();
348         preempt_disable_notrace();
349
350         /*
351          * We simply need to protect against recursion.
352          * Use the the raw version of smp_processor_id and not
353          * __get_cpu_var which can call debug hooks that can
354          * cause a recursive crash here.
355          */
356         cpu = raw_smp_processor_id();
357         per_cpu(ftrace_shutdown_disable_cpu, cpu)++;
358         if (per_cpu(ftrace_shutdown_disable_cpu, cpu) != 1)
359                 goto out;
360
361         if (unlikely(ftrace_record_suspend))
362                 goto out;
363
364         key = hash_long(ip, FTRACE_HASHBITS);
365
366         WARN_ON_ONCE(key >= FTRACE_HASHSIZE);
367
368         if (ftrace_ip_in_hash(ip, key))
369                 goto out;
370
371         atomic = irqs_disabled();
372
373         spin_lock_irqsave(&ftrace_shutdown_lock, flags);
374
375         /* This ip may have hit the hash before the lock */
376         if (ftrace_ip_in_hash(ip, key))
377                 goto out_unlock;
378
379         node = ftrace_alloc_dyn_node(ip);
380         if (!node)
381                 goto out_unlock;
382
383         node->ip = ip;
384
385         ftrace_add_hash(node, key);
386
387         ftraced_trigger = 1;
388
389  out_unlock:
390         spin_unlock_irqrestore(&ftrace_shutdown_lock, flags);
391  out:
392         per_cpu(ftrace_shutdown_disable_cpu, cpu)--;
393
394         /* prevent recursion with scheduler */
395         if (resched)
396                 preempt_enable_no_resched_notrace();
397         else
398                 preempt_enable_notrace();
399 }
400
401 #define FTRACE_ADDR ((long)(ftrace_caller))
402
403 static int
404 __ftrace_replace_code(struct dyn_ftrace *rec,
405                       unsigned char *old, unsigned char *new, int enable)
406 {
407         unsigned long ip, fl;
408
409         ip = rec->ip;
410
411         if (ftrace_filtered && enable) {
412                 /*
413                  * If filtering is on:
414                  *
415                  * If this record is set to be filtered and
416                  * is enabled then do nothing.
417                  *
418                  * If this record is set to be filtered and
419                  * it is not enabled, enable it.
420                  *
421                  * If this record is not set to be filtered
422                  * and it is not enabled do nothing.
423                  *
424                  * If this record is set not to trace then
425                  * do nothing.
426                  *
427                  * If this record is set not to trace and
428                  * it is enabled then disable it.
429                  *
430                  * If this record is not set to be filtered and
431                  * it is enabled, disable it.
432                  */
433
434                 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE |
435                                    FTRACE_FL_ENABLED);
436
437                 if ((fl ==  (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) ||
438                     (fl ==  (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE)) ||
439                     !fl || (fl == FTRACE_FL_NOTRACE))
440                         return 0;
441
442                 /*
443                  * If it is enabled disable it,
444                  * otherwise enable it!
445                  */
446                 if (fl & FTRACE_FL_ENABLED) {
447                         /* swap new and old */
448                         new = old;
449                         old = ftrace_call_replace(ip, FTRACE_ADDR);
450                         rec->flags &= ~FTRACE_FL_ENABLED;
451                 } else {
452                         new = ftrace_call_replace(ip, FTRACE_ADDR);
453                         rec->flags |= FTRACE_FL_ENABLED;
454                 }
455         } else {
456
457                 if (enable) {
458                         /*
459                          * If this record is set not to trace and is
460                          * not enabled, do nothing.
461                          */
462                         fl = rec->flags & (FTRACE_FL_NOTRACE | FTRACE_FL_ENABLED);
463                         if (fl == FTRACE_FL_NOTRACE)
464                                 return 0;
465
466                         new = ftrace_call_replace(ip, FTRACE_ADDR);
467                 } else
468                         old = ftrace_call_replace(ip, FTRACE_ADDR);
469
470                 if (enable) {
471                         if (rec->flags & FTRACE_FL_ENABLED)
472                                 return 0;
473                         rec->flags |= FTRACE_FL_ENABLED;
474                 } else {
475                         if (!(rec->flags & FTRACE_FL_ENABLED))
476                                 return 0;
477                         rec->flags &= ~FTRACE_FL_ENABLED;
478                 }
479         }
480
481         return ftrace_modify_code(ip, old, new);
482 }
483
484 static void ftrace_replace_code(int enable)
485 {
486         int i, failed;
487         unsigned char *new = NULL, *old = NULL;
488         struct dyn_ftrace *rec;
489         struct ftrace_page *pg;
490
491         if (enable)
492                 old = ftrace_nop_replace();
493         else
494                 new = ftrace_nop_replace();
495
496         for (pg = ftrace_pages_start; pg; pg = pg->next) {
497                 for (i = 0; i < pg->index; i++) {
498                         rec = &pg->records[i];
499
500                         /* don't modify code that has already faulted */
501                         if (rec->flags & FTRACE_FL_FAILED)
502                                 continue;
503
504                         /* ignore updates to this record's mcount site */
505                         if (get_kprobe((void *)rec->ip))
506                                 continue;
507
508                         failed = __ftrace_replace_code(rec, old, new, enable);
509                         if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
510                                 rec->flags |= FTRACE_FL_FAILED;
511                                 if ((system_state == SYSTEM_BOOTING) ||
512                                     !core_kernel_text(rec->ip)) {
513                                         ftrace_del_hash(rec);
514                                         ftrace_free_rec(rec);
515                                 }
516                         }
517                 }
518         }
519 }
520
521 static void ftrace_shutdown_replenish(void)
522 {
523         if (ftrace_pages->next)
524                 return;
525
526         /* allocate another page */
527         ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL);
528 }
529
530 static int
531 ftrace_code_disable(struct dyn_ftrace *rec)
532 {
533         unsigned long ip;
534         unsigned char *nop, *call;
535         int failed;
536
537         ip = rec->ip;
538
539         nop = ftrace_nop_replace();
540         call = ftrace_call_replace(ip, MCOUNT_ADDR);
541
542         failed = ftrace_modify_code(ip, call, nop);
543         if (failed) {
544                 rec->flags |= FTRACE_FL_FAILED;
545                 return 0;
546         }
547         return 1;
548 }
549
550 static int __ftrace_update_code(void *ignore);
551
552 static int __ftrace_modify_code(void *data)
553 {
554         unsigned long addr;
555         int *command = data;
556
557         if (*command & FTRACE_ENABLE_CALLS) {
558                 /*
559                  * Update any recorded ips now that we have the
560                  * machine stopped
561                  */
562                 __ftrace_update_code(NULL);
563                 ftrace_replace_code(1);
564                 tracing_on = 1;
565         } else if (*command & FTRACE_DISABLE_CALLS) {
566                 ftrace_replace_code(0);
567                 tracing_on = 0;
568         }
569
570         if (*command & FTRACE_UPDATE_TRACE_FUNC)
571                 ftrace_update_ftrace_func(ftrace_trace_function);
572
573         if (*command & FTRACE_ENABLE_MCOUNT) {
574                 addr = (unsigned long)ftrace_record_ip;
575                 ftrace_mcount_set(&addr);
576         } else if (*command & FTRACE_DISABLE_MCOUNT) {
577                 addr = (unsigned long)ftrace_stub;
578                 ftrace_mcount_set(&addr);
579         }
580
581         return 0;
582 }
583
584 static void ftrace_run_update_code(int command)
585 {
586         stop_machine_run(__ftrace_modify_code, &command, NR_CPUS);
587 }
588
589 void ftrace_disable_daemon(void)
590 {
591         /* Stop the daemon from calling kstop_machine */
592         mutex_lock(&ftraced_lock);
593         ftraced_stop = 1;
594         mutex_unlock(&ftraced_lock);
595
596         ftrace_force_update();
597 }
598
599 void ftrace_enable_daemon(void)
600 {
601         mutex_lock(&ftraced_lock);
602         ftraced_stop = 0;
603         mutex_unlock(&ftraced_lock);
604
605         ftrace_force_update();
606 }
607
608 static ftrace_func_t saved_ftrace_func;
609
610 static void ftrace_startup(void)
611 {
612         int command = 0;
613
614         if (unlikely(ftrace_disabled))
615                 return;
616
617         mutex_lock(&ftraced_lock);
618         ftraced_suspend++;
619         if (ftraced_suspend == 1)
620                 command |= FTRACE_ENABLE_CALLS;
621
622         if (saved_ftrace_func != ftrace_trace_function) {
623                 saved_ftrace_func = ftrace_trace_function;
624                 command |= FTRACE_UPDATE_TRACE_FUNC;
625         }
626
627         if (!command || !ftrace_enabled)
628                 goto out;
629
630         ftrace_run_update_code(command);
631  out:
632         mutex_unlock(&ftraced_lock);
633 }
634
635 static void ftrace_shutdown(void)
636 {
637         int command = 0;
638
639         if (unlikely(ftrace_disabled))
640                 return;
641
642         mutex_lock(&ftraced_lock);
643         ftraced_suspend--;
644         if (!ftraced_suspend)
645                 command |= FTRACE_DISABLE_CALLS;
646
647         if (saved_ftrace_func != ftrace_trace_function) {
648                 saved_ftrace_func = ftrace_trace_function;
649                 command |= FTRACE_UPDATE_TRACE_FUNC;
650         }
651
652         if (!command || !ftrace_enabled)
653                 goto out;
654
655         ftrace_run_update_code(command);
656  out:
657         mutex_unlock(&ftraced_lock);
658 }
659
660 static void ftrace_startup_sysctl(void)
661 {
662         int command = FTRACE_ENABLE_MCOUNT;
663
664         if (unlikely(ftrace_disabled))
665                 return;
666
667         mutex_lock(&ftraced_lock);
668         /* Force update next time */
669         saved_ftrace_func = NULL;
670         /* ftraced_suspend is true if we want ftrace running */
671         if (ftraced_suspend)
672                 command |= FTRACE_ENABLE_CALLS;
673
674         ftrace_run_update_code(command);
675         mutex_unlock(&ftraced_lock);
676 }
677
678 static void ftrace_shutdown_sysctl(void)
679 {
680         int command = FTRACE_DISABLE_MCOUNT;
681
682         if (unlikely(ftrace_disabled))
683                 return;
684
685         mutex_lock(&ftraced_lock);
686         /* ftraced_suspend is true if ftrace is running */
687         if (ftraced_suspend)
688                 command |= FTRACE_DISABLE_CALLS;
689
690         ftrace_run_update_code(command);
691         mutex_unlock(&ftraced_lock);
692 }
693
694 static cycle_t          ftrace_update_time;
695 static unsigned long    ftrace_update_cnt;
696 unsigned long           ftrace_update_tot_cnt;
697
698 static int __ftrace_update_code(void *ignore)
699 {
700         int i, save_ftrace_enabled;
701         cycle_t start, stop;
702         struct dyn_ftrace *p;
703         struct hlist_node *t, *n;
704         struct hlist_head *head, temp_list;
705
706         /* Don't be recording funcs now */
707         ftrace_record_suspend++;
708         save_ftrace_enabled = ftrace_enabled;
709         ftrace_enabled = 0;
710
711         start = ftrace_now(raw_smp_processor_id());
712         ftrace_update_cnt = 0;
713
714         /* No locks needed, the machine is stopped! */
715         for (i = 0; i < FTRACE_HASHSIZE; i++) {
716                 INIT_HLIST_HEAD(&temp_list);
717                 head = &ftrace_hash[i];
718
719                 /* all CPUS are stopped, we are safe to modify code */
720                 hlist_for_each_entry_safe(p, t, n, head, node) {
721                         /* Skip over failed records which have not been
722                          * freed. */
723                         if (p->flags & FTRACE_FL_FAILED)
724                                 continue;
725
726                         /* Unconverted records are always at the head of the
727                          * hash bucket. Once we encounter a converted record,
728                          * simply skip over to the next bucket. Saves ftraced
729                          * some processor cycles (ftrace does its bid for
730                          * global warming :-p ). */
731                         if (p->flags & (FTRACE_FL_CONVERTED))
732                                 break;
733
734                         /* Ignore updates to this record's mcount site.
735                          * Reintroduce this record at the head of this
736                          * bucket to attempt to "convert" it again if
737                          * the kprobe on it is unregistered before the
738                          * next run. */
739                         if (get_kprobe((void *)p->ip)) {
740                                 ftrace_del_hash(p);
741                                 INIT_HLIST_NODE(&p->node);
742                                 hlist_add_head(&p->node, &temp_list);
743                                 continue;
744                         }
745
746                         /* convert record (i.e, patch mcount-call with NOP) */
747                         if (ftrace_code_disable(p)) {
748                                 p->flags |= FTRACE_FL_CONVERTED;
749                                 ftrace_update_cnt++;
750                         } else {
751                                 if ((system_state == SYSTEM_BOOTING) ||
752                                     !core_kernel_text(p->ip)) {
753                                         ftrace_del_hash(p);
754                                         ftrace_free_rec(p);
755                                 }
756                         }
757                 }
758
759                 hlist_for_each_entry_safe(p, t, n, &temp_list, node) {
760                         hlist_del(&p->node);
761                         INIT_HLIST_NODE(&p->node);
762                         hlist_add_head(&p->node, head);
763                 }
764         }
765
766         stop = ftrace_now(raw_smp_processor_id());
767         ftrace_update_time = stop - start;
768         ftrace_update_tot_cnt += ftrace_update_cnt;
769         ftraced_trigger = 0;
770
771         ftrace_enabled = save_ftrace_enabled;
772         ftrace_record_suspend--;
773
774         return 0;
775 }
776
777 static int ftrace_update_code(void)
778 {
779         if (unlikely(ftrace_disabled) ||
780             !ftrace_enabled || !ftraced_trigger)
781                 return 0;
782
783         stop_machine_run(__ftrace_update_code, NULL, NR_CPUS);
784
785         return 1;
786 }
787
788 static int ftraced(void *ignore)
789 {
790         unsigned long usecs;
791
792         while (!kthread_should_stop()) {
793
794                 set_current_state(TASK_INTERRUPTIBLE);
795
796                 /* check once a second */
797                 schedule_timeout(HZ);
798
799                 if (unlikely(ftrace_disabled))
800                         continue;
801
802                 mutex_lock(&ftrace_sysctl_lock);
803                 mutex_lock(&ftraced_lock);
804                 if (!ftraced_suspend && !ftraced_stop &&
805                     ftrace_update_code()) {
806                         usecs = nsecs_to_usecs(ftrace_update_time);
807                         if (ftrace_update_tot_cnt > 100000) {
808                                 ftrace_update_tot_cnt = 0;
809                                 pr_info("hm, dftrace overflow: %lu change%s"
810                                         " (%lu total) in %lu usec%s\n",
811                                         ftrace_update_cnt,
812                                         ftrace_update_cnt != 1 ? "s" : "",
813                                         ftrace_update_tot_cnt,
814                                         usecs, usecs != 1 ? "s" : "");
815                                 ftrace_disabled = 1;
816                                 WARN_ON_ONCE(1);
817                         }
818                 }
819                 mutex_unlock(&ftraced_lock);
820                 mutex_unlock(&ftrace_sysctl_lock);
821
822                 ftrace_shutdown_replenish();
823         }
824         __set_current_state(TASK_RUNNING);
825         return 0;
826 }
827
828 static int __init ftrace_dyn_table_alloc(void)
829 {
830         struct ftrace_page *pg;
831         int cnt;
832         int i;
833
834         /* allocate a few pages */
835         ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
836         if (!ftrace_pages_start)
837                 return -1;
838
839         /*
840          * Allocate a few more pages.
841          *
842          * TODO: have some parser search vmlinux before
843          *   final linking to find all calls to ftrace.
844          *   Then we can:
845          *    a) know how many pages to allocate.
846          *     and/or
847          *    b) set up the table then.
848          *
849          *  The dynamic code is still necessary for
850          *  modules.
851          */
852
853         pg = ftrace_pages = ftrace_pages_start;
854
855         cnt = NR_TO_INIT / ENTRIES_PER_PAGE;
856
857         for (i = 0; i < cnt; i++) {
858                 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
859
860                 /* If we fail, we'll try later anyway */
861                 if (!pg->next)
862                         break;
863
864                 pg = pg->next;
865         }
866
867         return 0;
868 }
869
870 enum {
871         FTRACE_ITER_FILTER      = (1 << 0),
872         FTRACE_ITER_CONT        = (1 << 1),
873         FTRACE_ITER_NOTRACE     = (1 << 2),
874         FTRACE_ITER_FAILURES    = (1 << 3),
875 };
876
877 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
878
879 struct ftrace_iterator {
880         loff_t                  pos;
881         struct ftrace_page      *pg;
882         unsigned                idx;
883         unsigned                flags;
884         unsigned char           buffer[FTRACE_BUFF_MAX+1];
885         unsigned                buffer_idx;
886         unsigned                filtered;
887 };
888
889 static void *
890 t_next(struct seq_file *m, void *v, loff_t *pos)
891 {
892         struct ftrace_iterator *iter = m->private;
893         struct dyn_ftrace *rec = NULL;
894
895         (*pos)++;
896
897  retry:
898         if (iter->idx >= iter->pg->index) {
899                 if (iter->pg->next) {
900                         iter->pg = iter->pg->next;
901                         iter->idx = 0;
902                         goto retry;
903                 }
904         } else {
905                 rec = &iter->pg->records[iter->idx++];
906                 if ((!(iter->flags & FTRACE_ITER_FAILURES) &&
907                      (rec->flags & FTRACE_FL_FAILED)) ||
908
909                     ((iter->flags & FTRACE_ITER_FAILURES) &&
910                      (!(rec->flags & FTRACE_FL_FAILED) ||
911                       (rec->flags & FTRACE_FL_FREE))) ||
912
913                     ((iter->flags & FTRACE_ITER_FILTER) &&
914                      !(rec->flags & FTRACE_FL_FILTER)) ||
915
916                     ((iter->flags & FTRACE_ITER_NOTRACE) &&
917                      !(rec->flags & FTRACE_FL_NOTRACE))) {
918                         rec = NULL;
919                         goto retry;
920                 }
921         }
922
923         iter->pos = *pos;
924
925         return rec;
926 }
927
928 static void *t_start(struct seq_file *m, loff_t *pos)
929 {
930         struct ftrace_iterator *iter = m->private;
931         void *p = NULL;
932         loff_t l = -1;
933
934         if (*pos != iter->pos) {
935                 for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l))
936                         ;
937         } else {
938                 l = *pos;
939                 p = t_next(m, p, &l);
940         }
941
942         return p;
943 }
944
945 static void t_stop(struct seq_file *m, void *p)
946 {
947 }
948
949 static int t_show(struct seq_file *m, void *v)
950 {
951         struct dyn_ftrace *rec = v;
952         char str[KSYM_SYMBOL_LEN];
953
954         if (!rec)
955                 return 0;
956
957         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
958
959         seq_printf(m, "%s\n", str);
960
961         return 0;
962 }
963
964 static struct seq_operations show_ftrace_seq_ops = {
965         .start = t_start,
966         .next = t_next,
967         .stop = t_stop,
968         .show = t_show,
969 };
970
971 static int
972 ftrace_avail_open(struct inode *inode, struct file *file)
973 {
974         struct ftrace_iterator *iter;
975         int ret;
976
977         if (unlikely(ftrace_disabled))
978                 return -ENODEV;
979
980         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
981         if (!iter)
982                 return -ENOMEM;
983
984         iter->pg = ftrace_pages_start;
985         iter->pos = -1;
986
987         ret = seq_open(file, &show_ftrace_seq_ops);
988         if (!ret) {
989                 struct seq_file *m = file->private_data;
990
991                 m->private = iter;
992         } else {
993                 kfree(iter);
994         }
995
996         return ret;
997 }
998
999 int ftrace_avail_release(struct inode *inode, struct file *file)
1000 {
1001         struct seq_file *m = (struct seq_file *)file->private_data;
1002         struct ftrace_iterator *iter = m->private;
1003
1004         seq_release(inode, file);
1005         kfree(iter);
1006
1007         return 0;
1008 }
1009
1010 static int
1011 ftrace_failures_open(struct inode *inode, struct file *file)
1012 {
1013         int ret;
1014         struct seq_file *m;
1015         struct ftrace_iterator *iter;
1016
1017         ret = ftrace_avail_open(inode, file);
1018         if (!ret) {
1019                 m = (struct seq_file *)file->private_data;
1020                 iter = (struct ftrace_iterator *)m->private;
1021                 iter->flags = FTRACE_ITER_FAILURES;
1022         }
1023
1024         return ret;
1025 }
1026
1027
1028 static void ftrace_filter_reset(int enable)
1029 {
1030         struct ftrace_page *pg;
1031         struct dyn_ftrace *rec;
1032         unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1033         unsigned i;
1034
1035         /* keep kstop machine from running */
1036         preempt_disable();
1037         if (enable)
1038                 ftrace_filtered = 0;
1039         pg = ftrace_pages_start;
1040         while (pg) {
1041                 for (i = 0; i < pg->index; i++) {
1042                         rec = &pg->records[i];
1043                         if (rec->flags & FTRACE_FL_FAILED)
1044                                 continue;
1045                         rec->flags &= ~type;
1046                 }
1047                 pg = pg->next;
1048         }
1049         preempt_enable();
1050 }
1051
1052 static int
1053 ftrace_regex_open(struct inode *inode, struct file *file, int enable)
1054 {
1055         struct ftrace_iterator *iter;
1056         int ret = 0;
1057
1058         if (unlikely(ftrace_disabled))
1059                 return -ENODEV;
1060
1061         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1062         if (!iter)
1063                 return -ENOMEM;
1064
1065         mutex_lock(&ftrace_regex_lock);
1066         if ((file->f_mode & FMODE_WRITE) &&
1067             !(file->f_flags & O_APPEND))
1068                 ftrace_filter_reset(enable);
1069
1070         if (file->f_mode & FMODE_READ) {
1071                 iter->pg = ftrace_pages_start;
1072                 iter->pos = -1;
1073                 iter->flags = enable ? FTRACE_ITER_FILTER :
1074                         FTRACE_ITER_NOTRACE;
1075
1076                 ret = seq_open(file, &show_ftrace_seq_ops);
1077                 if (!ret) {
1078                         struct seq_file *m = file->private_data;
1079                         m->private = iter;
1080                 } else
1081                         kfree(iter);
1082         } else
1083                 file->private_data = iter;
1084         mutex_unlock(&ftrace_regex_lock);
1085
1086         return ret;
1087 }
1088
1089 static int
1090 ftrace_filter_open(struct inode *inode, struct file *file)
1091 {
1092         return ftrace_regex_open(inode, file, 1);
1093 }
1094
1095 static int
1096 ftrace_notrace_open(struct inode *inode, struct file *file)
1097 {
1098         return ftrace_regex_open(inode, file, 0);
1099 }
1100
1101 static ssize_t
1102 ftrace_regex_read(struct file *file, char __user *ubuf,
1103                        size_t cnt, loff_t *ppos)
1104 {
1105         if (file->f_mode & FMODE_READ)
1106                 return seq_read(file, ubuf, cnt, ppos);
1107         else
1108                 return -EPERM;
1109 }
1110
1111 static loff_t
1112 ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
1113 {
1114         loff_t ret;
1115
1116         if (file->f_mode & FMODE_READ)
1117                 ret = seq_lseek(file, offset, origin);
1118         else
1119                 file->f_pos = ret = 1;
1120
1121         return ret;
1122 }
1123
1124 enum {
1125         MATCH_FULL,
1126         MATCH_FRONT_ONLY,
1127         MATCH_MIDDLE_ONLY,
1128         MATCH_END_ONLY,
1129 };
1130
1131 static void
1132 ftrace_match(unsigned char *buff, int len, int enable)
1133 {
1134         char str[KSYM_SYMBOL_LEN];
1135         char *search = NULL;
1136         struct ftrace_page *pg;
1137         struct dyn_ftrace *rec;
1138         int type = MATCH_FULL;
1139         unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1140         unsigned i, match = 0, search_len = 0;
1141
1142         for (i = 0; i < len; i++) {
1143                 if (buff[i] == '*') {
1144                         if (!i) {
1145                                 search = buff + i + 1;
1146                                 type = MATCH_END_ONLY;
1147                                 search_len = len - (i + 1);
1148                         } else {
1149                                 if (type == MATCH_END_ONLY) {
1150                                         type = MATCH_MIDDLE_ONLY;
1151                                 } else {
1152                                         match = i;
1153                                         type = MATCH_FRONT_ONLY;
1154                                 }
1155                                 buff[i] = 0;
1156                                 break;
1157                         }
1158                 }
1159         }
1160
1161         /* keep kstop machine from running */
1162         preempt_disable();
1163         if (enable)
1164                 ftrace_filtered = 1;
1165         pg = ftrace_pages_start;
1166         while (pg) {
1167                 for (i = 0; i < pg->index; i++) {
1168                         int matched = 0;
1169                         char *ptr;
1170
1171                         rec = &pg->records[i];
1172                         if (rec->flags & FTRACE_FL_FAILED)
1173                                 continue;
1174                         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1175                         switch (type) {
1176                         case MATCH_FULL:
1177                                 if (strcmp(str, buff) == 0)
1178                                         matched = 1;
1179                                 break;
1180                         case MATCH_FRONT_ONLY:
1181                                 if (memcmp(str, buff, match) == 0)
1182                                         matched = 1;
1183                                 break;
1184                         case MATCH_MIDDLE_ONLY:
1185                                 if (strstr(str, search))
1186                                         matched = 1;
1187                                 break;
1188                         case MATCH_END_ONLY:
1189                                 ptr = strstr(str, search);
1190                                 if (ptr && (ptr[search_len] == 0))
1191                                         matched = 1;
1192                                 break;
1193                         }
1194                         if (matched)
1195                                 rec->flags |= flag;
1196                 }
1197                 pg = pg->next;
1198         }
1199         preempt_enable();
1200 }
1201
1202 static ssize_t
1203 ftrace_regex_write(struct file *file, const char __user *ubuf,
1204                    size_t cnt, loff_t *ppos, int enable)
1205 {
1206         struct ftrace_iterator *iter;
1207         char ch;
1208         size_t read = 0;
1209         ssize_t ret;
1210
1211         if (!cnt || cnt < 0)
1212                 return 0;
1213
1214         mutex_lock(&ftrace_regex_lock);
1215
1216         if (file->f_mode & FMODE_READ) {
1217                 struct seq_file *m = file->private_data;
1218                 iter = m->private;
1219         } else
1220                 iter = file->private_data;
1221
1222         if (!*ppos) {
1223                 iter->flags &= ~FTRACE_ITER_CONT;
1224                 iter->buffer_idx = 0;
1225         }
1226
1227         ret = get_user(ch, ubuf++);
1228         if (ret)
1229                 goto out;
1230         read++;
1231         cnt--;
1232
1233         if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1234                 /* skip white space */
1235                 while (cnt && isspace(ch)) {
1236                         ret = get_user(ch, ubuf++);
1237                         if (ret)
1238                                 goto out;
1239                         read++;
1240                         cnt--;
1241                 }
1242
1243                 if (isspace(ch)) {
1244                         file->f_pos += read;
1245                         ret = read;
1246                         goto out;
1247                 }
1248
1249                 iter->buffer_idx = 0;
1250         }
1251
1252         while (cnt && !isspace(ch)) {
1253                 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1254                         iter->buffer[iter->buffer_idx++] = ch;
1255                 else {
1256                         ret = -EINVAL;
1257                         goto out;
1258                 }
1259                 ret = get_user(ch, ubuf++);
1260                 if (ret)
1261                         goto out;
1262                 read++;
1263                 cnt--;
1264         }
1265
1266         if (isspace(ch)) {
1267                 iter->filtered++;
1268                 iter->buffer[iter->buffer_idx] = 0;
1269                 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1270                 iter->buffer_idx = 0;
1271         } else
1272                 iter->flags |= FTRACE_ITER_CONT;
1273
1274
1275         file->f_pos += read;
1276
1277         ret = read;
1278  out:
1279         mutex_unlock(&ftrace_regex_lock);
1280
1281         return ret;
1282 }
1283
1284 static ssize_t
1285 ftrace_filter_write(struct file *file, const char __user *ubuf,
1286                     size_t cnt, loff_t *ppos)
1287 {
1288         return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
1289 }
1290
1291 static ssize_t
1292 ftrace_notrace_write(struct file *file, const char __user *ubuf,
1293                      size_t cnt, loff_t *ppos)
1294 {
1295         return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
1296 }
1297
1298 static void
1299 ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
1300 {
1301         if (unlikely(ftrace_disabled))
1302                 return;
1303
1304         mutex_lock(&ftrace_regex_lock);
1305         if (reset)
1306                 ftrace_filter_reset(enable);
1307         if (buf)
1308                 ftrace_match(buf, len, enable);
1309         mutex_unlock(&ftrace_regex_lock);
1310 }
1311
1312 /**
1313  * ftrace_set_filter - set a function to filter on in ftrace
1314  * @buf - the string that holds the function filter text.
1315  * @len - the length of the string.
1316  * @reset - non zero to reset all filters before applying this filter.
1317  *
1318  * Filters denote which functions should be enabled when tracing is enabled.
1319  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1320  */
1321 void ftrace_set_filter(unsigned char *buf, int len, int reset)
1322 {
1323         ftrace_set_regex(buf, len, reset, 1);
1324 }
1325
1326 /**
1327  * ftrace_set_notrace - set a function to not trace in ftrace
1328  * @buf - the string that holds the function notrace text.
1329  * @len - the length of the string.
1330  * @reset - non zero to reset all filters before applying this filter.
1331  *
1332  * Notrace Filters denote which functions should not be enabled when tracing
1333  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1334  * for tracing.
1335  */
1336 void ftrace_set_notrace(unsigned char *buf, int len, int reset)
1337 {
1338         ftrace_set_regex(buf, len, reset, 0);
1339 }
1340
1341 static int
1342 ftrace_regex_release(struct inode *inode, struct file *file, int enable)
1343 {
1344         struct seq_file *m = (struct seq_file *)file->private_data;
1345         struct ftrace_iterator *iter;
1346
1347         mutex_lock(&ftrace_regex_lock);
1348         if (file->f_mode & FMODE_READ) {
1349                 iter = m->private;
1350
1351                 seq_release(inode, file);
1352         } else
1353                 iter = file->private_data;
1354
1355         if (iter->buffer_idx) {
1356                 iter->filtered++;
1357                 iter->buffer[iter->buffer_idx] = 0;
1358                 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1359         }
1360
1361         mutex_lock(&ftrace_sysctl_lock);
1362         mutex_lock(&ftraced_lock);
1363         if (iter->filtered && ftraced_suspend && ftrace_enabled)
1364                 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1365         mutex_unlock(&ftraced_lock);
1366         mutex_unlock(&ftrace_sysctl_lock);
1367
1368         kfree(iter);
1369         mutex_unlock(&ftrace_regex_lock);
1370         return 0;
1371 }
1372
1373 static int
1374 ftrace_filter_release(struct inode *inode, struct file *file)
1375 {
1376         return ftrace_regex_release(inode, file, 1);
1377 }
1378
1379 static int
1380 ftrace_notrace_release(struct inode *inode, struct file *file)
1381 {
1382         return ftrace_regex_release(inode, file, 0);
1383 }
1384
1385 static ssize_t
1386 ftraced_read(struct file *filp, char __user *ubuf,
1387                      size_t cnt, loff_t *ppos)
1388 {
1389         /* don't worry about races */
1390         char *buf = ftraced_stop ? "disabled\n" : "enabled\n";
1391         int r = strlen(buf);
1392
1393         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1394 }
1395
1396 static ssize_t
1397 ftraced_write(struct file *filp, const char __user *ubuf,
1398                       size_t cnt, loff_t *ppos)
1399 {
1400         char buf[64];
1401         long val;
1402         int ret;
1403
1404         if (cnt >= sizeof(buf))
1405                 return -EINVAL;
1406
1407         if (copy_from_user(&buf, ubuf, cnt))
1408                 return -EFAULT;
1409
1410         if (strncmp(buf, "enable", 6) == 0)
1411                 val = 1;
1412         else if (strncmp(buf, "disable", 7) == 0)
1413                 val = 0;
1414         else {
1415                 buf[cnt] = 0;
1416
1417                 ret = strict_strtoul(buf, 10, &val);
1418                 if (ret < 0)
1419                         return ret;
1420
1421                 val = !!val;
1422         }
1423
1424         if (val)
1425                 ftrace_enable_daemon();
1426         else
1427                 ftrace_disable_daemon();
1428
1429         filp->f_pos += cnt;
1430
1431         return cnt;
1432 }
1433
1434 static struct file_operations ftrace_avail_fops = {
1435         .open = ftrace_avail_open,
1436         .read = seq_read,
1437         .llseek = seq_lseek,
1438         .release = ftrace_avail_release,
1439 };
1440
1441 static struct file_operations ftrace_failures_fops = {
1442         .open = ftrace_failures_open,
1443         .read = seq_read,
1444         .llseek = seq_lseek,
1445         .release = ftrace_avail_release,
1446 };
1447
1448 static struct file_operations ftrace_filter_fops = {
1449         .open = ftrace_filter_open,
1450         .read = ftrace_regex_read,
1451         .write = ftrace_filter_write,
1452         .llseek = ftrace_regex_lseek,
1453         .release = ftrace_filter_release,
1454 };
1455
1456 static struct file_operations ftrace_notrace_fops = {
1457         .open = ftrace_notrace_open,
1458         .read = ftrace_regex_read,
1459         .write = ftrace_notrace_write,
1460         .llseek = ftrace_regex_lseek,
1461         .release = ftrace_notrace_release,
1462 };
1463
1464 static struct file_operations ftraced_fops = {
1465         .open = tracing_open_generic,
1466         .read = ftraced_read,
1467         .write = ftraced_write,
1468 };
1469
1470 /**
1471  * ftrace_force_update - force an update to all recording ftrace functions
1472  */
1473 int ftrace_force_update(void)
1474 {
1475         int ret = 0;
1476
1477         if (unlikely(ftrace_disabled))
1478                 return -ENODEV;
1479
1480         mutex_lock(&ftrace_sysctl_lock);
1481         mutex_lock(&ftraced_lock);
1482
1483         /*
1484          * If ftraced_trigger is not set, then there is nothing
1485          * to update.
1486          */
1487         if (ftraced_trigger && !ftrace_update_code())
1488                 ret = -EBUSY;
1489
1490         mutex_unlock(&ftraced_lock);
1491         mutex_unlock(&ftrace_sysctl_lock);
1492
1493         return ret;
1494 }
1495
1496 static void ftrace_force_shutdown(void)
1497 {
1498         struct task_struct *task;
1499         int command = FTRACE_DISABLE_CALLS | FTRACE_UPDATE_TRACE_FUNC;
1500
1501         mutex_lock(&ftraced_lock);
1502         task = ftraced_task;
1503         ftraced_task = NULL;
1504         ftraced_suspend = -1;
1505         ftrace_run_update_code(command);
1506         mutex_unlock(&ftraced_lock);
1507
1508         if (task)
1509                 kthread_stop(task);
1510 }
1511
1512 static __init int ftrace_init_debugfs(void)
1513 {
1514         struct dentry *d_tracer;
1515         struct dentry *entry;
1516
1517         d_tracer = tracing_init_dentry();
1518
1519         entry = debugfs_create_file("available_filter_functions", 0444,
1520                                     d_tracer, NULL, &ftrace_avail_fops);
1521         if (!entry)
1522                 pr_warning("Could not create debugfs "
1523                            "'available_filter_functions' entry\n");
1524
1525         entry = debugfs_create_file("failures", 0444,
1526                                     d_tracer, NULL, &ftrace_failures_fops);
1527         if (!entry)
1528                 pr_warning("Could not create debugfs 'failures' entry\n");
1529
1530         entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1531                                     NULL, &ftrace_filter_fops);
1532         if (!entry)
1533                 pr_warning("Could not create debugfs "
1534                            "'set_ftrace_filter' entry\n");
1535
1536         entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
1537                                     NULL, &ftrace_notrace_fops);
1538         if (!entry)
1539                 pr_warning("Could not create debugfs "
1540                            "'set_ftrace_notrace' entry\n");
1541
1542         entry = debugfs_create_file("ftraced_enabled", 0644, d_tracer,
1543                                     NULL, &ftraced_fops);
1544         if (!entry)
1545                 pr_warning("Could not create debugfs "
1546                            "'ftraced_enabled' entry\n");
1547         return 0;
1548 }
1549
1550 fs_initcall(ftrace_init_debugfs);
1551
1552 static int __init ftrace_dynamic_init(void)
1553 {
1554         struct task_struct *p;
1555         unsigned long addr;
1556         int ret;
1557
1558         addr = (unsigned long)ftrace_record_ip;
1559
1560         stop_machine_run(ftrace_dyn_arch_init, &addr, NR_CPUS);
1561
1562         /* ftrace_dyn_arch_init places the return code in addr */
1563         if (addr) {
1564                 ret = (int)addr;
1565                 goto failed;
1566         }
1567
1568         ret = ftrace_dyn_table_alloc();
1569         if (ret)
1570                 goto failed;
1571
1572         p = kthread_run(ftraced, NULL, "ftraced");
1573         if (IS_ERR(p)) {
1574                 ret = -1;
1575                 goto failed;
1576         }
1577
1578         last_ftrace_enabled = ftrace_enabled = 1;
1579         ftraced_task = p;
1580
1581         return 0;
1582
1583  failed:
1584         ftrace_disabled = 1;
1585         return ret;
1586 }
1587
1588 core_initcall(ftrace_dynamic_init);
1589 #else
1590 # define ftrace_startup()               do { } while (0)
1591 # define ftrace_shutdown()              do { } while (0)
1592 # define ftrace_startup_sysctl()        do { } while (0)
1593 # define ftrace_shutdown_sysctl()       do { } while (0)
1594 # define ftrace_force_shutdown()        do { } while (0)
1595 #endif /* CONFIG_DYNAMIC_FTRACE */
1596
1597 /**
1598  * ftrace_kill - totally shutdown ftrace
1599  *
1600  * This is a safety measure. If something was detected that seems
1601  * wrong, calling this function will keep ftrace from doing
1602  * any more modifications, and updates.
1603  * used when something went wrong.
1604  */
1605 void ftrace_kill(void)
1606 {
1607         mutex_lock(&ftrace_sysctl_lock);
1608         ftrace_disabled = 1;
1609         ftrace_enabled = 0;
1610
1611         clear_ftrace_function();
1612         mutex_unlock(&ftrace_sysctl_lock);
1613
1614         /* Try to totally disable ftrace */
1615         ftrace_force_shutdown();
1616 }
1617
1618 /**
1619  * register_ftrace_function - register a function for profiling
1620  * @ops - ops structure that holds the function for profiling.
1621  *
1622  * Register a function to be called by all functions in the
1623  * kernel.
1624  *
1625  * Note: @ops->func and all the functions it calls must be labeled
1626  *       with "notrace", otherwise it will go into a
1627  *       recursive loop.
1628  */
1629 int register_ftrace_function(struct ftrace_ops *ops)
1630 {
1631         int ret;
1632
1633         if (unlikely(ftrace_disabled))
1634                 return -1;
1635
1636         mutex_lock(&ftrace_sysctl_lock);
1637         ret = __register_ftrace_function(ops);
1638         ftrace_startup();
1639         mutex_unlock(&ftrace_sysctl_lock);
1640
1641         return ret;
1642 }
1643
1644 /**
1645  * unregister_ftrace_function - unresgister a function for profiling.
1646  * @ops - ops structure that holds the function to unregister
1647  *
1648  * Unregister a function that was added to be called by ftrace profiling.
1649  */
1650 int unregister_ftrace_function(struct ftrace_ops *ops)
1651 {
1652         int ret;
1653
1654         mutex_lock(&ftrace_sysctl_lock);
1655         ret = __unregister_ftrace_function(ops);
1656         ftrace_shutdown();
1657         mutex_unlock(&ftrace_sysctl_lock);
1658
1659         return ret;
1660 }
1661
1662 int
1663 ftrace_enable_sysctl(struct ctl_table *table, int write,
1664                      struct file *file, void __user *buffer, size_t *lenp,
1665                      loff_t *ppos)
1666 {
1667         int ret;
1668
1669         if (unlikely(ftrace_disabled))
1670                 return -ENODEV;
1671
1672         mutex_lock(&ftrace_sysctl_lock);
1673
1674         ret  = proc_dointvec(table, write, file, buffer, lenp, ppos);
1675
1676         if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
1677                 goto out;
1678
1679         last_ftrace_enabled = ftrace_enabled;
1680
1681         if (ftrace_enabled) {
1682
1683                 ftrace_startup_sysctl();
1684
1685                 /* we are starting ftrace again */
1686                 if (ftrace_list != &ftrace_list_end) {
1687                         if (ftrace_list->next == &ftrace_list_end)
1688                                 ftrace_trace_function = ftrace_list->func;
1689                         else
1690                                 ftrace_trace_function = ftrace_list_func;
1691                 }
1692
1693         } else {
1694                 /* stopping ftrace calls (just send to ftrace_stub) */
1695                 ftrace_trace_function = ftrace_stub;
1696
1697                 ftrace_shutdown_sysctl();
1698         }
1699
1700  out:
1701         mutex_unlock(&ftrace_sysctl_lock);
1702         return ret;
1703 }