Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394...
[linux-2.6] / kernel / trace / ftrace.c
1 /*
2  * Infrastructure for profiling code inserted by 'gcc -pg'.
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally ported from the -rt patch by:
8  *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code in the latency_tracer, that is:
11  *
12  *  Copyright (C) 2004-2006 Ingo Molnar
13  *  Copyright (C) 2004 William Lee Irwin III
14  */
15
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/debugfs.h>
21 #include <linux/hardirq.h>
22 #include <linux/kthread.h>
23 #include <linux/uaccess.h>
24 #include <linux/kprobes.h>
25 #include <linux/ftrace.h>
26 #include <linux/sysctl.h>
27 #include <linux/ctype.h>
28 #include <linux/hash.h>
29 #include <linux/list.h>
30
31 #include <asm/ftrace.h>
32
33 #include "trace.h"
34
35 /* ftrace_enabled is a method to turn ftrace on or off */
36 int ftrace_enabled __read_mostly;
37 static int last_ftrace_enabled;
38
39 /*
40  * ftrace_disabled is set when an anomaly is discovered.
41  * ftrace_disabled is much stronger than ftrace_enabled.
42  */
43 static int ftrace_disabled __read_mostly;
44
45 static DEFINE_SPINLOCK(ftrace_lock);
46 static DEFINE_MUTEX(ftrace_sysctl_lock);
47
48 static struct ftrace_ops ftrace_list_end __read_mostly =
49 {
50         .func = ftrace_stub,
51 };
52
53 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
54 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
55
56 static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
57 {
58         struct ftrace_ops *op = ftrace_list;
59
60         /* in case someone actually ports this to alpha! */
61         read_barrier_depends();
62
63         while (op != &ftrace_list_end) {
64                 /* silly alpha */
65                 read_barrier_depends();
66                 op->func(ip, parent_ip);
67                 op = op->next;
68         };
69 }
70
71 /**
72  * clear_ftrace_function - reset the ftrace function
73  *
74  * This NULLs the ftrace function and in essence stops
75  * tracing.  There may be lag
76  */
77 void clear_ftrace_function(void)
78 {
79         ftrace_trace_function = ftrace_stub;
80 }
81
82 static int __register_ftrace_function(struct ftrace_ops *ops)
83 {
84         /* Should never be called by interrupts */
85         spin_lock(&ftrace_lock);
86
87         ops->next = ftrace_list;
88         /*
89          * We are entering ops into the ftrace_list but another
90          * CPU might be walking that list. We need to make sure
91          * the ops->next pointer is valid before another CPU sees
92          * the ops pointer included into the ftrace_list.
93          */
94         smp_wmb();
95         ftrace_list = ops;
96
97         if (ftrace_enabled) {
98                 /*
99                  * For one func, simply call it directly.
100                  * For more than one func, call the chain.
101                  */
102                 if (ops->next == &ftrace_list_end)
103                         ftrace_trace_function = ops->func;
104                 else
105                         ftrace_trace_function = ftrace_list_func;
106         }
107
108         spin_unlock(&ftrace_lock);
109
110         return 0;
111 }
112
113 static int __unregister_ftrace_function(struct ftrace_ops *ops)
114 {
115         struct ftrace_ops **p;
116         int ret = 0;
117
118         spin_lock(&ftrace_lock);
119
120         /*
121          * If we are removing the last function, then simply point
122          * to the ftrace_stub.
123          */
124         if (ftrace_list == ops && ops->next == &ftrace_list_end) {
125                 ftrace_trace_function = ftrace_stub;
126                 ftrace_list = &ftrace_list_end;
127                 goto out;
128         }
129
130         for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
131                 if (*p == ops)
132                         break;
133
134         if (*p != ops) {
135                 ret = -1;
136                 goto out;
137         }
138
139         *p = (*p)->next;
140
141         if (ftrace_enabled) {
142                 /* If we only have one func left, then call that directly */
143                 if (ftrace_list == &ftrace_list_end ||
144                     ftrace_list->next == &ftrace_list_end)
145                         ftrace_trace_function = ftrace_list->func;
146         }
147
148  out:
149         spin_unlock(&ftrace_lock);
150
151         return ret;
152 }
153
154 #ifdef CONFIG_DYNAMIC_FTRACE
155
156 static struct task_struct *ftraced_task;
157
158 enum {
159         FTRACE_ENABLE_CALLS             = (1 << 0),
160         FTRACE_DISABLE_CALLS            = (1 << 1),
161         FTRACE_UPDATE_TRACE_FUNC        = (1 << 2),
162         FTRACE_ENABLE_MCOUNT            = (1 << 3),
163         FTRACE_DISABLE_MCOUNT           = (1 << 4),
164 };
165
166 static int ftrace_filtered;
167 static int tracing_on;
168 static int frozen_record_count;
169
170 static struct hlist_head ftrace_hash[FTRACE_HASHSIZE];
171
172 static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu);
173
174 static DEFINE_SPINLOCK(ftrace_shutdown_lock);
175 static DEFINE_MUTEX(ftraced_lock);
176 static DEFINE_MUTEX(ftrace_regex_lock);
177
178 struct ftrace_page {
179         struct ftrace_page      *next;
180         unsigned long           index;
181         struct dyn_ftrace       records[];
182 };
183
184 #define ENTRIES_PER_PAGE \
185   ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
186
187 /* estimate from running different kernels */
188 #define NR_TO_INIT              10000
189
190 static struct ftrace_page       *ftrace_pages_start;
191 static struct ftrace_page       *ftrace_pages;
192
193 static int ftraced_trigger;
194 static int ftraced_suspend;
195 static int ftraced_stop;
196
197 static int ftrace_record_suspend;
198
199 static struct dyn_ftrace *ftrace_free_records;
200
201
202 #ifdef CONFIG_KPROBES
203 static inline void freeze_record(struct dyn_ftrace *rec)
204 {
205         if (!(rec->flags & FTRACE_FL_FROZEN)) {
206                 rec->flags |= FTRACE_FL_FROZEN;
207                 frozen_record_count++;
208         }
209 }
210
211 static inline void unfreeze_record(struct dyn_ftrace *rec)
212 {
213         if (rec->flags & FTRACE_FL_FROZEN) {
214                 rec->flags &= ~FTRACE_FL_FROZEN;
215                 frozen_record_count--;
216         }
217 }
218
219 static inline int record_frozen(struct dyn_ftrace *rec)
220 {
221         return rec->flags & FTRACE_FL_FROZEN;
222 }
223 #else
224 # define freeze_record(rec)                     ({ 0; })
225 # define unfreeze_record(rec)                   ({ 0; })
226 # define record_frozen(rec)                     ({ 0; })
227 #endif /* CONFIG_KPROBES */
228
229 int skip_trace(unsigned long ip)
230 {
231         unsigned long fl;
232         struct dyn_ftrace *rec;
233         struct hlist_node *t;
234         struct hlist_head *head;
235
236         if (frozen_record_count == 0)
237                 return 0;
238
239         head = &ftrace_hash[hash_long(ip, FTRACE_HASHBITS)];
240         hlist_for_each_entry_rcu(rec, t, head, node) {
241                 if (rec->ip == ip) {
242                         if (record_frozen(rec)) {
243                                 if (rec->flags & FTRACE_FL_FAILED)
244                                         return 1;
245
246                                 if (!(rec->flags & FTRACE_FL_CONVERTED))
247                                         return 1;
248
249                                 if (!tracing_on || !ftrace_enabled)
250                                         return 1;
251
252                                 if (ftrace_filtered) {
253                                         fl = rec->flags & (FTRACE_FL_FILTER |
254                                                            FTRACE_FL_NOTRACE);
255                                         if (!fl || (fl & FTRACE_FL_NOTRACE))
256                                                 return 1;
257                                 }
258                         }
259                         break;
260                 }
261         }
262
263         return 0;
264 }
265
266 static inline int
267 ftrace_ip_in_hash(unsigned long ip, unsigned long key)
268 {
269         struct dyn_ftrace *p;
270         struct hlist_node *t;
271         int found = 0;
272
273         hlist_for_each_entry_rcu(p, t, &ftrace_hash[key], node) {
274                 if (p->ip == ip) {
275                         found = 1;
276                         break;
277                 }
278         }
279
280         return found;
281 }
282
283 static inline void
284 ftrace_add_hash(struct dyn_ftrace *node, unsigned long key)
285 {
286         hlist_add_head_rcu(&node->node, &ftrace_hash[key]);
287 }
288
289 /* called from kstop_machine */
290 static inline void ftrace_del_hash(struct dyn_ftrace *node)
291 {
292         hlist_del(&node->node);
293 }
294
295 static void ftrace_free_rec(struct dyn_ftrace *rec)
296 {
297         /* no locking, only called from kstop_machine */
298
299         rec->ip = (unsigned long)ftrace_free_records;
300         ftrace_free_records = rec;
301         rec->flags |= FTRACE_FL_FREE;
302 }
303
304 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
305 {
306         struct dyn_ftrace *rec;
307
308         /* First check for freed records */
309         if (ftrace_free_records) {
310                 rec = ftrace_free_records;
311
312                 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
313                         WARN_ON_ONCE(1);
314                         ftrace_free_records = NULL;
315                         ftrace_disabled = 1;
316                         ftrace_enabled = 0;
317                         return NULL;
318                 }
319
320                 ftrace_free_records = (void *)rec->ip;
321                 memset(rec, 0, sizeof(*rec));
322                 return rec;
323         }
324
325         if (ftrace_pages->index == ENTRIES_PER_PAGE) {
326                 if (!ftrace_pages->next)
327                         return NULL;
328                 ftrace_pages = ftrace_pages->next;
329         }
330
331         return &ftrace_pages->records[ftrace_pages->index++];
332 }
333
334 static void
335 ftrace_record_ip(unsigned long ip)
336 {
337         struct dyn_ftrace *node;
338         unsigned long flags;
339         unsigned long key;
340         int resched;
341         int atomic;
342         int cpu;
343
344         if (!ftrace_enabled || ftrace_disabled)
345                 return;
346
347         resched = need_resched();
348         preempt_disable_notrace();
349
350         /*
351          * We simply need to protect against recursion.
352          * Use the the raw version of smp_processor_id and not
353          * __get_cpu_var which can call debug hooks that can
354          * cause a recursive crash here.
355          */
356         cpu = raw_smp_processor_id();
357         per_cpu(ftrace_shutdown_disable_cpu, cpu)++;
358         if (per_cpu(ftrace_shutdown_disable_cpu, cpu) != 1)
359                 goto out;
360
361         if (unlikely(ftrace_record_suspend))
362                 goto out;
363
364         key = hash_long(ip, FTRACE_HASHBITS);
365
366         WARN_ON_ONCE(key >= FTRACE_HASHSIZE);
367
368         if (ftrace_ip_in_hash(ip, key))
369                 goto out;
370
371         atomic = irqs_disabled();
372
373         spin_lock_irqsave(&ftrace_shutdown_lock, flags);
374
375         /* This ip may have hit the hash before the lock */
376         if (ftrace_ip_in_hash(ip, key))
377                 goto out_unlock;
378
379         node = ftrace_alloc_dyn_node(ip);
380         if (!node)
381                 goto out_unlock;
382
383         node->ip = ip;
384
385         ftrace_add_hash(node, key);
386
387         ftraced_trigger = 1;
388
389  out_unlock:
390         spin_unlock_irqrestore(&ftrace_shutdown_lock, flags);
391  out:
392         per_cpu(ftrace_shutdown_disable_cpu, cpu)--;
393
394         /* prevent recursion with scheduler */
395         if (resched)
396                 preempt_enable_no_resched_notrace();
397         else
398                 preempt_enable_notrace();
399 }
400
401 #define FTRACE_ADDR ((long)(ftrace_caller))
402
403 static int
404 __ftrace_replace_code(struct dyn_ftrace *rec,
405                       unsigned char *old, unsigned char *new, int enable)
406 {
407         unsigned long ip, fl;
408
409         ip = rec->ip;
410
411         if (ftrace_filtered && enable) {
412                 /*
413                  * If filtering is on:
414                  *
415                  * If this record is set to be filtered and
416                  * is enabled then do nothing.
417                  *
418                  * If this record is set to be filtered and
419                  * it is not enabled, enable it.
420                  *
421                  * If this record is not set to be filtered
422                  * and it is not enabled do nothing.
423                  *
424                  * If this record is set not to trace then
425                  * do nothing.
426                  *
427                  * If this record is set not to trace and
428                  * it is enabled then disable it.
429                  *
430                  * If this record is not set to be filtered and
431                  * it is enabled, disable it.
432                  */
433
434                 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE |
435                                    FTRACE_FL_ENABLED);
436
437                 if ((fl ==  (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) ||
438                     (fl ==  (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE)) ||
439                     !fl || (fl == FTRACE_FL_NOTRACE))
440                         return 0;
441
442                 /*
443                  * If it is enabled disable it,
444                  * otherwise enable it!
445                  */
446                 if (fl & FTRACE_FL_ENABLED) {
447                         /* swap new and old */
448                         new = old;
449                         old = ftrace_call_replace(ip, FTRACE_ADDR);
450                         rec->flags &= ~FTRACE_FL_ENABLED;
451                 } else {
452                         new = ftrace_call_replace(ip, FTRACE_ADDR);
453                         rec->flags |= FTRACE_FL_ENABLED;
454                 }
455         } else {
456
457                 if (enable) {
458                         /*
459                          * If this record is set not to trace and is
460                          * not enabled, do nothing.
461                          */
462                         fl = rec->flags & (FTRACE_FL_NOTRACE | FTRACE_FL_ENABLED);
463                         if (fl == FTRACE_FL_NOTRACE)
464                                 return 0;
465
466                         new = ftrace_call_replace(ip, FTRACE_ADDR);
467                 } else
468                         old = ftrace_call_replace(ip, FTRACE_ADDR);
469
470                 if (enable) {
471                         if (rec->flags & FTRACE_FL_ENABLED)
472                                 return 0;
473                         rec->flags |= FTRACE_FL_ENABLED;
474                 } else {
475                         if (!(rec->flags & FTRACE_FL_ENABLED))
476                                 return 0;
477                         rec->flags &= ~FTRACE_FL_ENABLED;
478                 }
479         }
480
481         return ftrace_modify_code(ip, old, new);
482 }
483
484 static void ftrace_replace_code(int enable)
485 {
486         int i, failed;
487         unsigned char *new = NULL, *old = NULL;
488         struct dyn_ftrace *rec;
489         struct ftrace_page *pg;
490
491         if (enable)
492                 old = ftrace_nop_replace();
493         else
494                 new = ftrace_nop_replace();
495
496         for (pg = ftrace_pages_start; pg; pg = pg->next) {
497                 for (i = 0; i < pg->index; i++) {
498                         rec = &pg->records[i];
499
500                         /* don't modify code that has already faulted */
501                         if (rec->flags & FTRACE_FL_FAILED)
502                                 continue;
503
504                         /* ignore updates to this record's mcount site */
505                         if (get_kprobe((void *)rec->ip)) {
506                                 freeze_record(rec);
507                                 continue;
508                         } else {
509                                 unfreeze_record(rec);
510                         }
511
512                         failed = __ftrace_replace_code(rec, old, new, enable);
513                         if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
514                                 rec->flags |= FTRACE_FL_FAILED;
515                                 if ((system_state == SYSTEM_BOOTING) ||
516                                     !core_kernel_text(rec->ip)) {
517                                         ftrace_del_hash(rec);
518                                         ftrace_free_rec(rec);
519                                 }
520                         }
521                 }
522         }
523 }
524
525 static void ftrace_shutdown_replenish(void)
526 {
527         if (ftrace_pages->next)
528                 return;
529
530         /* allocate another page */
531         ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL);
532 }
533
534 static int
535 ftrace_code_disable(struct dyn_ftrace *rec)
536 {
537         unsigned long ip;
538         unsigned char *nop, *call;
539         int failed;
540
541         ip = rec->ip;
542
543         nop = ftrace_nop_replace();
544         call = ftrace_call_replace(ip, MCOUNT_ADDR);
545
546         failed = ftrace_modify_code(ip, call, nop);
547         if (failed) {
548                 rec->flags |= FTRACE_FL_FAILED;
549                 return 0;
550         }
551         return 1;
552 }
553
554 static int __ftrace_update_code(void *ignore);
555
556 static int __ftrace_modify_code(void *data)
557 {
558         unsigned long addr;
559         int *command = data;
560
561         if (*command & FTRACE_ENABLE_CALLS) {
562                 /*
563                  * Update any recorded ips now that we have the
564                  * machine stopped
565                  */
566                 __ftrace_update_code(NULL);
567                 ftrace_replace_code(1);
568                 tracing_on = 1;
569         } else if (*command & FTRACE_DISABLE_CALLS) {
570                 ftrace_replace_code(0);
571                 tracing_on = 0;
572         }
573
574         if (*command & FTRACE_UPDATE_TRACE_FUNC)
575                 ftrace_update_ftrace_func(ftrace_trace_function);
576
577         if (*command & FTRACE_ENABLE_MCOUNT) {
578                 addr = (unsigned long)ftrace_record_ip;
579                 ftrace_mcount_set(&addr);
580         } else if (*command & FTRACE_DISABLE_MCOUNT) {
581                 addr = (unsigned long)ftrace_stub;
582                 ftrace_mcount_set(&addr);
583         }
584
585         return 0;
586 }
587
588 static void ftrace_run_update_code(int command)
589 {
590         stop_machine_run(__ftrace_modify_code, &command, NR_CPUS);
591 }
592
593 void ftrace_disable_daemon(void)
594 {
595         /* Stop the daemon from calling kstop_machine */
596         mutex_lock(&ftraced_lock);
597         ftraced_stop = 1;
598         mutex_unlock(&ftraced_lock);
599
600         ftrace_force_update();
601 }
602
603 void ftrace_enable_daemon(void)
604 {
605         mutex_lock(&ftraced_lock);
606         ftraced_stop = 0;
607         mutex_unlock(&ftraced_lock);
608
609         ftrace_force_update();
610 }
611
612 static ftrace_func_t saved_ftrace_func;
613
614 static void ftrace_startup(void)
615 {
616         int command = 0;
617
618         if (unlikely(ftrace_disabled))
619                 return;
620
621         mutex_lock(&ftraced_lock);
622         ftraced_suspend++;
623         if (ftraced_suspend == 1)
624                 command |= FTRACE_ENABLE_CALLS;
625
626         if (saved_ftrace_func != ftrace_trace_function) {
627                 saved_ftrace_func = ftrace_trace_function;
628                 command |= FTRACE_UPDATE_TRACE_FUNC;
629         }
630
631         if (!command || !ftrace_enabled)
632                 goto out;
633
634         ftrace_run_update_code(command);
635  out:
636         mutex_unlock(&ftraced_lock);
637 }
638
639 static void ftrace_shutdown(void)
640 {
641         int command = 0;
642
643         if (unlikely(ftrace_disabled))
644                 return;
645
646         mutex_lock(&ftraced_lock);
647         ftraced_suspend--;
648         if (!ftraced_suspend)
649                 command |= FTRACE_DISABLE_CALLS;
650
651         if (saved_ftrace_func != ftrace_trace_function) {
652                 saved_ftrace_func = ftrace_trace_function;
653                 command |= FTRACE_UPDATE_TRACE_FUNC;
654         }
655
656         if (!command || !ftrace_enabled)
657                 goto out;
658
659         ftrace_run_update_code(command);
660  out:
661         mutex_unlock(&ftraced_lock);
662 }
663
664 static void ftrace_startup_sysctl(void)
665 {
666         int command = FTRACE_ENABLE_MCOUNT;
667
668         if (unlikely(ftrace_disabled))
669                 return;
670
671         mutex_lock(&ftraced_lock);
672         /* Force update next time */
673         saved_ftrace_func = NULL;
674         /* ftraced_suspend is true if we want ftrace running */
675         if (ftraced_suspend)
676                 command |= FTRACE_ENABLE_CALLS;
677
678         ftrace_run_update_code(command);
679         mutex_unlock(&ftraced_lock);
680 }
681
682 static void ftrace_shutdown_sysctl(void)
683 {
684         int command = FTRACE_DISABLE_MCOUNT;
685
686         if (unlikely(ftrace_disabled))
687                 return;
688
689         mutex_lock(&ftraced_lock);
690         /* ftraced_suspend is true if ftrace is running */
691         if (ftraced_suspend)
692                 command |= FTRACE_DISABLE_CALLS;
693
694         ftrace_run_update_code(command);
695         mutex_unlock(&ftraced_lock);
696 }
697
698 static cycle_t          ftrace_update_time;
699 static unsigned long    ftrace_update_cnt;
700 unsigned long           ftrace_update_tot_cnt;
701
702 static int __ftrace_update_code(void *ignore)
703 {
704         int i, save_ftrace_enabled;
705         cycle_t start, stop;
706         struct dyn_ftrace *p;
707         struct hlist_node *t, *n;
708         struct hlist_head *head, temp_list;
709
710         /* Don't be recording funcs now */
711         ftrace_record_suspend++;
712         save_ftrace_enabled = ftrace_enabled;
713         ftrace_enabled = 0;
714
715         start = ftrace_now(raw_smp_processor_id());
716         ftrace_update_cnt = 0;
717
718         /* No locks needed, the machine is stopped! */
719         for (i = 0; i < FTRACE_HASHSIZE; i++) {
720                 INIT_HLIST_HEAD(&temp_list);
721                 head = &ftrace_hash[i];
722
723                 /* all CPUS are stopped, we are safe to modify code */
724                 hlist_for_each_entry_safe(p, t, n, head, node) {
725                         /* Skip over failed records which have not been
726                          * freed. */
727                         if (p->flags & FTRACE_FL_FAILED)
728                                 continue;
729
730                         /* Unconverted records are always at the head of the
731                          * hash bucket. Once we encounter a converted record,
732                          * simply skip over to the next bucket. Saves ftraced
733                          * some processor cycles (ftrace does its bid for
734                          * global warming :-p ). */
735                         if (p->flags & (FTRACE_FL_CONVERTED))
736                                 break;
737
738                         /* Ignore updates to this record's mcount site.
739                          * Reintroduce this record at the head of this
740                          * bucket to attempt to "convert" it again if
741                          * the kprobe on it is unregistered before the
742                          * next run. */
743                         if (get_kprobe((void *)p->ip)) {
744                                 ftrace_del_hash(p);
745                                 INIT_HLIST_NODE(&p->node);
746                                 hlist_add_head(&p->node, &temp_list);
747                                 freeze_record(p);
748                                 continue;
749                         } else {
750                                 unfreeze_record(p);
751                         }
752
753                         /* convert record (i.e, patch mcount-call with NOP) */
754                         if (ftrace_code_disable(p)) {
755                                 p->flags |= FTRACE_FL_CONVERTED;
756                                 ftrace_update_cnt++;
757                         } else {
758                                 if ((system_state == SYSTEM_BOOTING) ||
759                                     !core_kernel_text(p->ip)) {
760                                         ftrace_del_hash(p);
761                                         ftrace_free_rec(p);
762                                 }
763                         }
764                 }
765
766                 hlist_for_each_entry_safe(p, t, n, &temp_list, node) {
767                         hlist_del(&p->node);
768                         INIT_HLIST_NODE(&p->node);
769                         hlist_add_head(&p->node, head);
770                 }
771         }
772
773         stop = ftrace_now(raw_smp_processor_id());
774         ftrace_update_time = stop - start;
775         ftrace_update_tot_cnt += ftrace_update_cnt;
776         ftraced_trigger = 0;
777
778         ftrace_enabled = save_ftrace_enabled;
779         ftrace_record_suspend--;
780
781         return 0;
782 }
783
784 static int ftrace_update_code(void)
785 {
786         if (unlikely(ftrace_disabled) ||
787             !ftrace_enabled || !ftraced_trigger)
788                 return 0;
789
790         stop_machine_run(__ftrace_update_code, NULL, NR_CPUS);
791
792         return 1;
793 }
794
795 static int ftraced(void *ignore)
796 {
797         unsigned long usecs;
798
799         while (!kthread_should_stop()) {
800
801                 set_current_state(TASK_INTERRUPTIBLE);
802
803                 /* check once a second */
804                 schedule_timeout(HZ);
805
806                 if (unlikely(ftrace_disabled))
807                         continue;
808
809                 mutex_lock(&ftrace_sysctl_lock);
810                 mutex_lock(&ftraced_lock);
811                 if (!ftraced_suspend && !ftraced_stop &&
812                     ftrace_update_code()) {
813                         usecs = nsecs_to_usecs(ftrace_update_time);
814                         if (ftrace_update_tot_cnt > 100000) {
815                                 ftrace_update_tot_cnt = 0;
816                                 pr_info("hm, dftrace overflow: %lu change%s"
817                                         " (%lu total) in %lu usec%s\n",
818                                         ftrace_update_cnt,
819                                         ftrace_update_cnt != 1 ? "s" : "",
820                                         ftrace_update_tot_cnt,
821                                         usecs, usecs != 1 ? "s" : "");
822                                 ftrace_disabled = 1;
823                                 WARN_ON_ONCE(1);
824                         }
825                 }
826                 mutex_unlock(&ftraced_lock);
827                 mutex_unlock(&ftrace_sysctl_lock);
828
829                 ftrace_shutdown_replenish();
830         }
831         __set_current_state(TASK_RUNNING);
832         return 0;
833 }
834
835 static int __init ftrace_dyn_table_alloc(void)
836 {
837         struct ftrace_page *pg;
838         int cnt;
839         int i;
840
841         /* allocate a few pages */
842         ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
843         if (!ftrace_pages_start)
844                 return -1;
845
846         /*
847          * Allocate a few more pages.
848          *
849          * TODO: have some parser search vmlinux before
850          *   final linking to find all calls to ftrace.
851          *   Then we can:
852          *    a) know how many pages to allocate.
853          *     and/or
854          *    b) set up the table then.
855          *
856          *  The dynamic code is still necessary for
857          *  modules.
858          */
859
860         pg = ftrace_pages = ftrace_pages_start;
861
862         cnt = NR_TO_INIT / ENTRIES_PER_PAGE;
863
864         for (i = 0; i < cnt; i++) {
865                 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
866
867                 /* If we fail, we'll try later anyway */
868                 if (!pg->next)
869                         break;
870
871                 pg = pg->next;
872         }
873
874         return 0;
875 }
876
877 enum {
878         FTRACE_ITER_FILTER      = (1 << 0),
879         FTRACE_ITER_CONT        = (1 << 1),
880         FTRACE_ITER_NOTRACE     = (1 << 2),
881         FTRACE_ITER_FAILURES    = (1 << 3),
882 };
883
884 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
885
886 struct ftrace_iterator {
887         loff_t                  pos;
888         struct ftrace_page      *pg;
889         unsigned                idx;
890         unsigned                flags;
891         unsigned char           buffer[FTRACE_BUFF_MAX+1];
892         unsigned                buffer_idx;
893         unsigned                filtered;
894 };
895
896 static void *
897 t_next(struct seq_file *m, void *v, loff_t *pos)
898 {
899         struct ftrace_iterator *iter = m->private;
900         struct dyn_ftrace *rec = NULL;
901
902         (*pos)++;
903
904  retry:
905         if (iter->idx >= iter->pg->index) {
906                 if (iter->pg->next) {
907                         iter->pg = iter->pg->next;
908                         iter->idx = 0;
909                         goto retry;
910                 }
911         } else {
912                 rec = &iter->pg->records[iter->idx++];
913                 if ((!(iter->flags & FTRACE_ITER_FAILURES) &&
914                      (rec->flags & FTRACE_FL_FAILED)) ||
915
916                     ((iter->flags & FTRACE_ITER_FAILURES) &&
917                      (!(rec->flags & FTRACE_FL_FAILED) ||
918                       (rec->flags & FTRACE_FL_FREE))) ||
919
920                     ((iter->flags & FTRACE_ITER_FILTER) &&
921                      !(rec->flags & FTRACE_FL_FILTER)) ||
922
923                     ((iter->flags & FTRACE_ITER_NOTRACE) &&
924                      !(rec->flags & FTRACE_FL_NOTRACE))) {
925                         rec = NULL;
926                         goto retry;
927                 }
928         }
929
930         iter->pos = *pos;
931
932         return rec;
933 }
934
935 static void *t_start(struct seq_file *m, loff_t *pos)
936 {
937         struct ftrace_iterator *iter = m->private;
938         void *p = NULL;
939         loff_t l = -1;
940
941         if (*pos != iter->pos) {
942                 for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l))
943                         ;
944         } else {
945                 l = *pos;
946                 p = t_next(m, p, &l);
947         }
948
949         return p;
950 }
951
952 static void t_stop(struct seq_file *m, void *p)
953 {
954 }
955
956 static int t_show(struct seq_file *m, void *v)
957 {
958         struct dyn_ftrace *rec = v;
959         char str[KSYM_SYMBOL_LEN];
960
961         if (!rec)
962                 return 0;
963
964         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
965
966         seq_printf(m, "%s\n", str);
967
968         return 0;
969 }
970
971 static struct seq_operations show_ftrace_seq_ops = {
972         .start = t_start,
973         .next = t_next,
974         .stop = t_stop,
975         .show = t_show,
976 };
977
978 static int
979 ftrace_avail_open(struct inode *inode, struct file *file)
980 {
981         struct ftrace_iterator *iter;
982         int ret;
983
984         if (unlikely(ftrace_disabled))
985                 return -ENODEV;
986
987         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
988         if (!iter)
989                 return -ENOMEM;
990
991         iter->pg = ftrace_pages_start;
992         iter->pos = -1;
993
994         ret = seq_open(file, &show_ftrace_seq_ops);
995         if (!ret) {
996                 struct seq_file *m = file->private_data;
997
998                 m->private = iter;
999         } else {
1000                 kfree(iter);
1001         }
1002
1003         return ret;
1004 }
1005
1006 int ftrace_avail_release(struct inode *inode, struct file *file)
1007 {
1008         struct seq_file *m = (struct seq_file *)file->private_data;
1009         struct ftrace_iterator *iter = m->private;
1010
1011         seq_release(inode, file);
1012         kfree(iter);
1013
1014         return 0;
1015 }
1016
1017 static int
1018 ftrace_failures_open(struct inode *inode, struct file *file)
1019 {
1020         int ret;
1021         struct seq_file *m;
1022         struct ftrace_iterator *iter;
1023
1024         ret = ftrace_avail_open(inode, file);
1025         if (!ret) {
1026                 m = (struct seq_file *)file->private_data;
1027                 iter = (struct ftrace_iterator *)m->private;
1028                 iter->flags = FTRACE_ITER_FAILURES;
1029         }
1030
1031         return ret;
1032 }
1033
1034
1035 static void ftrace_filter_reset(int enable)
1036 {
1037         struct ftrace_page *pg;
1038         struct dyn_ftrace *rec;
1039         unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1040         unsigned i;
1041
1042         /* keep kstop machine from running */
1043         preempt_disable();
1044         if (enable)
1045                 ftrace_filtered = 0;
1046         pg = ftrace_pages_start;
1047         while (pg) {
1048                 for (i = 0; i < pg->index; i++) {
1049                         rec = &pg->records[i];
1050                         if (rec->flags & FTRACE_FL_FAILED)
1051                                 continue;
1052                         rec->flags &= ~type;
1053                 }
1054                 pg = pg->next;
1055         }
1056         preempt_enable();
1057 }
1058
1059 static int
1060 ftrace_regex_open(struct inode *inode, struct file *file, int enable)
1061 {
1062         struct ftrace_iterator *iter;
1063         int ret = 0;
1064
1065         if (unlikely(ftrace_disabled))
1066                 return -ENODEV;
1067
1068         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1069         if (!iter)
1070                 return -ENOMEM;
1071
1072         mutex_lock(&ftrace_regex_lock);
1073         if ((file->f_mode & FMODE_WRITE) &&
1074             !(file->f_flags & O_APPEND))
1075                 ftrace_filter_reset(enable);
1076
1077         if (file->f_mode & FMODE_READ) {
1078                 iter->pg = ftrace_pages_start;
1079                 iter->pos = -1;
1080                 iter->flags = enable ? FTRACE_ITER_FILTER :
1081                         FTRACE_ITER_NOTRACE;
1082
1083                 ret = seq_open(file, &show_ftrace_seq_ops);
1084                 if (!ret) {
1085                         struct seq_file *m = file->private_data;
1086                         m->private = iter;
1087                 } else
1088                         kfree(iter);
1089         } else
1090                 file->private_data = iter;
1091         mutex_unlock(&ftrace_regex_lock);
1092
1093         return ret;
1094 }
1095
1096 static int
1097 ftrace_filter_open(struct inode *inode, struct file *file)
1098 {
1099         return ftrace_regex_open(inode, file, 1);
1100 }
1101
1102 static int
1103 ftrace_notrace_open(struct inode *inode, struct file *file)
1104 {
1105         return ftrace_regex_open(inode, file, 0);
1106 }
1107
1108 static ssize_t
1109 ftrace_regex_read(struct file *file, char __user *ubuf,
1110                        size_t cnt, loff_t *ppos)
1111 {
1112         if (file->f_mode & FMODE_READ)
1113                 return seq_read(file, ubuf, cnt, ppos);
1114         else
1115                 return -EPERM;
1116 }
1117
1118 static loff_t
1119 ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
1120 {
1121         loff_t ret;
1122
1123         if (file->f_mode & FMODE_READ)
1124                 ret = seq_lseek(file, offset, origin);
1125         else
1126                 file->f_pos = ret = 1;
1127
1128         return ret;
1129 }
1130
1131 enum {
1132         MATCH_FULL,
1133         MATCH_FRONT_ONLY,
1134         MATCH_MIDDLE_ONLY,
1135         MATCH_END_ONLY,
1136 };
1137
1138 static void
1139 ftrace_match(unsigned char *buff, int len, int enable)
1140 {
1141         char str[KSYM_SYMBOL_LEN];
1142         char *search = NULL;
1143         struct ftrace_page *pg;
1144         struct dyn_ftrace *rec;
1145         int type = MATCH_FULL;
1146         unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1147         unsigned i, match = 0, search_len = 0;
1148
1149         for (i = 0; i < len; i++) {
1150                 if (buff[i] == '*') {
1151                         if (!i) {
1152                                 search = buff + i + 1;
1153                                 type = MATCH_END_ONLY;
1154                                 search_len = len - (i + 1);
1155                         } else {
1156                                 if (type == MATCH_END_ONLY) {
1157                                         type = MATCH_MIDDLE_ONLY;
1158                                 } else {
1159                                         match = i;
1160                                         type = MATCH_FRONT_ONLY;
1161                                 }
1162                                 buff[i] = 0;
1163                                 break;
1164                         }
1165                 }
1166         }
1167
1168         /* keep kstop machine from running */
1169         preempt_disable();
1170         if (enable)
1171                 ftrace_filtered = 1;
1172         pg = ftrace_pages_start;
1173         while (pg) {
1174                 for (i = 0; i < pg->index; i++) {
1175                         int matched = 0;
1176                         char *ptr;
1177
1178                         rec = &pg->records[i];
1179                         if (rec->flags & FTRACE_FL_FAILED)
1180                                 continue;
1181                         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1182                         switch (type) {
1183                         case MATCH_FULL:
1184                                 if (strcmp(str, buff) == 0)
1185                                         matched = 1;
1186                                 break;
1187                         case MATCH_FRONT_ONLY:
1188                                 if (memcmp(str, buff, match) == 0)
1189                                         matched = 1;
1190                                 break;
1191                         case MATCH_MIDDLE_ONLY:
1192                                 if (strstr(str, search))
1193                                         matched = 1;
1194                                 break;
1195                         case MATCH_END_ONLY:
1196                                 ptr = strstr(str, search);
1197                                 if (ptr && (ptr[search_len] == 0))
1198                                         matched = 1;
1199                                 break;
1200                         }
1201                         if (matched)
1202                                 rec->flags |= flag;
1203                 }
1204                 pg = pg->next;
1205         }
1206         preempt_enable();
1207 }
1208
1209 static ssize_t
1210 ftrace_regex_write(struct file *file, const char __user *ubuf,
1211                    size_t cnt, loff_t *ppos, int enable)
1212 {
1213         struct ftrace_iterator *iter;
1214         char ch;
1215         size_t read = 0;
1216         ssize_t ret;
1217
1218         if (!cnt || cnt < 0)
1219                 return 0;
1220
1221         mutex_lock(&ftrace_regex_lock);
1222
1223         if (file->f_mode & FMODE_READ) {
1224                 struct seq_file *m = file->private_data;
1225                 iter = m->private;
1226         } else
1227                 iter = file->private_data;
1228
1229         if (!*ppos) {
1230                 iter->flags &= ~FTRACE_ITER_CONT;
1231                 iter->buffer_idx = 0;
1232         }
1233
1234         ret = get_user(ch, ubuf++);
1235         if (ret)
1236                 goto out;
1237         read++;
1238         cnt--;
1239
1240         if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1241                 /* skip white space */
1242                 while (cnt && isspace(ch)) {
1243                         ret = get_user(ch, ubuf++);
1244                         if (ret)
1245                                 goto out;
1246                         read++;
1247                         cnt--;
1248                 }
1249
1250                 if (isspace(ch)) {
1251                         file->f_pos += read;
1252                         ret = read;
1253                         goto out;
1254                 }
1255
1256                 iter->buffer_idx = 0;
1257         }
1258
1259         while (cnt && !isspace(ch)) {
1260                 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1261                         iter->buffer[iter->buffer_idx++] = ch;
1262                 else {
1263                         ret = -EINVAL;
1264                         goto out;
1265                 }
1266                 ret = get_user(ch, ubuf++);
1267                 if (ret)
1268                         goto out;
1269                 read++;
1270                 cnt--;
1271         }
1272
1273         if (isspace(ch)) {
1274                 iter->filtered++;
1275                 iter->buffer[iter->buffer_idx] = 0;
1276                 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1277                 iter->buffer_idx = 0;
1278         } else
1279                 iter->flags |= FTRACE_ITER_CONT;
1280
1281
1282         file->f_pos += read;
1283
1284         ret = read;
1285  out:
1286         mutex_unlock(&ftrace_regex_lock);
1287
1288         return ret;
1289 }
1290
1291 static ssize_t
1292 ftrace_filter_write(struct file *file, const char __user *ubuf,
1293                     size_t cnt, loff_t *ppos)
1294 {
1295         return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
1296 }
1297
1298 static ssize_t
1299 ftrace_notrace_write(struct file *file, const char __user *ubuf,
1300                      size_t cnt, loff_t *ppos)
1301 {
1302         return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
1303 }
1304
1305 static void
1306 ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
1307 {
1308         if (unlikely(ftrace_disabled))
1309                 return;
1310
1311         mutex_lock(&ftrace_regex_lock);
1312         if (reset)
1313                 ftrace_filter_reset(enable);
1314         if (buf)
1315                 ftrace_match(buf, len, enable);
1316         mutex_unlock(&ftrace_regex_lock);
1317 }
1318
1319 /**
1320  * ftrace_set_filter - set a function to filter on in ftrace
1321  * @buf - the string that holds the function filter text.
1322  * @len - the length of the string.
1323  * @reset - non zero to reset all filters before applying this filter.
1324  *
1325  * Filters denote which functions should be enabled when tracing is enabled.
1326  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1327  */
1328 void ftrace_set_filter(unsigned char *buf, int len, int reset)
1329 {
1330         ftrace_set_regex(buf, len, reset, 1);
1331 }
1332
1333 /**
1334  * ftrace_set_notrace - set a function to not trace in ftrace
1335  * @buf - the string that holds the function notrace text.
1336  * @len - the length of the string.
1337  * @reset - non zero to reset all filters before applying this filter.
1338  *
1339  * Notrace Filters denote which functions should not be enabled when tracing
1340  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1341  * for tracing.
1342  */
1343 void ftrace_set_notrace(unsigned char *buf, int len, int reset)
1344 {
1345         ftrace_set_regex(buf, len, reset, 0);
1346 }
1347
1348 static int
1349 ftrace_regex_release(struct inode *inode, struct file *file, int enable)
1350 {
1351         struct seq_file *m = (struct seq_file *)file->private_data;
1352         struct ftrace_iterator *iter;
1353
1354         mutex_lock(&ftrace_regex_lock);
1355         if (file->f_mode & FMODE_READ) {
1356                 iter = m->private;
1357
1358                 seq_release(inode, file);
1359         } else
1360                 iter = file->private_data;
1361
1362         if (iter->buffer_idx) {
1363                 iter->filtered++;
1364                 iter->buffer[iter->buffer_idx] = 0;
1365                 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1366         }
1367
1368         mutex_lock(&ftrace_sysctl_lock);
1369         mutex_lock(&ftraced_lock);
1370         if (iter->filtered && ftraced_suspend && ftrace_enabled)
1371                 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1372         mutex_unlock(&ftraced_lock);
1373         mutex_unlock(&ftrace_sysctl_lock);
1374
1375         kfree(iter);
1376         mutex_unlock(&ftrace_regex_lock);
1377         return 0;
1378 }
1379
1380 static int
1381 ftrace_filter_release(struct inode *inode, struct file *file)
1382 {
1383         return ftrace_regex_release(inode, file, 1);
1384 }
1385
1386 static int
1387 ftrace_notrace_release(struct inode *inode, struct file *file)
1388 {
1389         return ftrace_regex_release(inode, file, 0);
1390 }
1391
1392 static ssize_t
1393 ftraced_read(struct file *filp, char __user *ubuf,
1394                      size_t cnt, loff_t *ppos)
1395 {
1396         /* don't worry about races */
1397         char *buf = ftraced_stop ? "disabled\n" : "enabled\n";
1398         int r = strlen(buf);
1399
1400         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1401 }
1402
1403 static ssize_t
1404 ftraced_write(struct file *filp, const char __user *ubuf,
1405                       size_t cnt, loff_t *ppos)
1406 {
1407         char buf[64];
1408         long val;
1409         int ret;
1410
1411         if (cnt >= sizeof(buf))
1412                 return -EINVAL;
1413
1414         if (copy_from_user(&buf, ubuf, cnt))
1415                 return -EFAULT;
1416
1417         if (strncmp(buf, "enable", 6) == 0)
1418                 val = 1;
1419         else if (strncmp(buf, "disable", 7) == 0)
1420                 val = 0;
1421         else {
1422                 buf[cnt] = 0;
1423
1424                 ret = strict_strtoul(buf, 10, &val);
1425                 if (ret < 0)
1426                         return ret;
1427
1428                 val = !!val;
1429         }
1430
1431         if (val)
1432                 ftrace_enable_daemon();
1433         else
1434                 ftrace_disable_daemon();
1435
1436         filp->f_pos += cnt;
1437
1438         return cnt;
1439 }
1440
1441 static struct file_operations ftrace_avail_fops = {
1442         .open = ftrace_avail_open,
1443         .read = seq_read,
1444         .llseek = seq_lseek,
1445         .release = ftrace_avail_release,
1446 };
1447
1448 static struct file_operations ftrace_failures_fops = {
1449         .open = ftrace_failures_open,
1450         .read = seq_read,
1451         .llseek = seq_lseek,
1452         .release = ftrace_avail_release,
1453 };
1454
1455 static struct file_operations ftrace_filter_fops = {
1456         .open = ftrace_filter_open,
1457         .read = ftrace_regex_read,
1458         .write = ftrace_filter_write,
1459         .llseek = ftrace_regex_lseek,
1460         .release = ftrace_filter_release,
1461 };
1462
1463 static struct file_operations ftrace_notrace_fops = {
1464         .open = ftrace_notrace_open,
1465         .read = ftrace_regex_read,
1466         .write = ftrace_notrace_write,
1467         .llseek = ftrace_regex_lseek,
1468         .release = ftrace_notrace_release,
1469 };
1470
1471 static struct file_operations ftraced_fops = {
1472         .open = tracing_open_generic,
1473         .read = ftraced_read,
1474         .write = ftraced_write,
1475 };
1476
1477 /**
1478  * ftrace_force_update - force an update to all recording ftrace functions
1479  */
1480 int ftrace_force_update(void)
1481 {
1482         int ret = 0;
1483
1484         if (unlikely(ftrace_disabled))
1485                 return -ENODEV;
1486
1487         mutex_lock(&ftrace_sysctl_lock);
1488         mutex_lock(&ftraced_lock);
1489
1490         /*
1491          * If ftraced_trigger is not set, then there is nothing
1492          * to update.
1493          */
1494         if (ftraced_trigger && !ftrace_update_code())
1495                 ret = -EBUSY;
1496
1497         mutex_unlock(&ftraced_lock);
1498         mutex_unlock(&ftrace_sysctl_lock);
1499
1500         return ret;
1501 }
1502
1503 static void ftrace_force_shutdown(void)
1504 {
1505         struct task_struct *task;
1506         int command = FTRACE_DISABLE_CALLS | FTRACE_UPDATE_TRACE_FUNC;
1507
1508         mutex_lock(&ftraced_lock);
1509         task = ftraced_task;
1510         ftraced_task = NULL;
1511         ftraced_suspend = -1;
1512         ftrace_run_update_code(command);
1513         mutex_unlock(&ftraced_lock);
1514
1515         if (task)
1516                 kthread_stop(task);
1517 }
1518
1519 static __init int ftrace_init_debugfs(void)
1520 {
1521         struct dentry *d_tracer;
1522         struct dentry *entry;
1523
1524         d_tracer = tracing_init_dentry();
1525
1526         entry = debugfs_create_file("available_filter_functions", 0444,
1527                                     d_tracer, NULL, &ftrace_avail_fops);
1528         if (!entry)
1529                 pr_warning("Could not create debugfs "
1530                            "'available_filter_functions' entry\n");
1531
1532         entry = debugfs_create_file("failures", 0444,
1533                                     d_tracer, NULL, &ftrace_failures_fops);
1534         if (!entry)
1535                 pr_warning("Could not create debugfs 'failures' entry\n");
1536
1537         entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1538                                     NULL, &ftrace_filter_fops);
1539         if (!entry)
1540                 pr_warning("Could not create debugfs "
1541                            "'set_ftrace_filter' entry\n");
1542
1543         entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
1544                                     NULL, &ftrace_notrace_fops);
1545         if (!entry)
1546                 pr_warning("Could not create debugfs "
1547                            "'set_ftrace_notrace' entry\n");
1548
1549         entry = debugfs_create_file("ftraced_enabled", 0644, d_tracer,
1550                                     NULL, &ftraced_fops);
1551         if (!entry)
1552                 pr_warning("Could not create debugfs "
1553                            "'ftraced_enabled' entry\n");
1554         return 0;
1555 }
1556
1557 fs_initcall(ftrace_init_debugfs);
1558
1559 static int __init ftrace_dynamic_init(void)
1560 {
1561         struct task_struct *p;
1562         unsigned long addr;
1563         int ret;
1564
1565         addr = (unsigned long)ftrace_record_ip;
1566
1567         stop_machine_run(ftrace_dyn_arch_init, &addr, NR_CPUS);
1568
1569         /* ftrace_dyn_arch_init places the return code in addr */
1570         if (addr) {
1571                 ret = (int)addr;
1572                 goto failed;
1573         }
1574
1575         ret = ftrace_dyn_table_alloc();
1576         if (ret)
1577                 goto failed;
1578
1579         p = kthread_run(ftraced, NULL, "ftraced");
1580         if (IS_ERR(p)) {
1581                 ret = -1;
1582                 goto failed;
1583         }
1584
1585         last_ftrace_enabled = ftrace_enabled = 1;
1586         ftraced_task = p;
1587
1588         return 0;
1589
1590  failed:
1591         ftrace_disabled = 1;
1592         return ret;
1593 }
1594
1595 core_initcall(ftrace_dynamic_init);
1596 #else
1597 # define ftrace_startup()               do { } while (0)
1598 # define ftrace_shutdown()              do { } while (0)
1599 # define ftrace_startup_sysctl()        do { } while (0)
1600 # define ftrace_shutdown_sysctl()       do { } while (0)
1601 # define ftrace_force_shutdown()        do { } while (0)
1602 #endif /* CONFIG_DYNAMIC_FTRACE */
1603
1604 /**
1605  * ftrace_kill_atomic - kill ftrace from critical sections
1606  *
1607  * This function should be used by panic code. It stops ftrace
1608  * but in a not so nice way. If you need to simply kill ftrace
1609  * from a non-atomic section, use ftrace_kill.
1610  */
1611 void ftrace_kill_atomic(void)
1612 {
1613         ftrace_disabled = 1;
1614         ftrace_enabled = 0;
1615 #ifdef CONFIG_DYNAMIC_FTRACE
1616         ftraced_suspend = -1;
1617 #endif
1618         clear_ftrace_function();
1619 }
1620
1621 /**
1622  * ftrace_kill - totally shutdown ftrace
1623  *
1624  * This is a safety measure. If something was detected that seems
1625  * wrong, calling this function will keep ftrace from doing
1626  * any more modifications, and updates.
1627  * used when something went wrong.
1628  */
1629 void ftrace_kill(void)
1630 {
1631         mutex_lock(&ftrace_sysctl_lock);
1632         ftrace_disabled = 1;
1633         ftrace_enabled = 0;
1634
1635         clear_ftrace_function();
1636         mutex_unlock(&ftrace_sysctl_lock);
1637
1638         /* Try to totally disable ftrace */
1639         ftrace_force_shutdown();
1640 }
1641
1642 /**
1643  * register_ftrace_function - register a function for profiling
1644  * @ops - ops structure that holds the function for profiling.
1645  *
1646  * Register a function to be called by all functions in the
1647  * kernel.
1648  *
1649  * Note: @ops->func and all the functions it calls must be labeled
1650  *       with "notrace", otherwise it will go into a
1651  *       recursive loop.
1652  */
1653 int register_ftrace_function(struct ftrace_ops *ops)
1654 {
1655         int ret;
1656
1657         if (unlikely(ftrace_disabled))
1658                 return -1;
1659
1660         mutex_lock(&ftrace_sysctl_lock);
1661         ret = __register_ftrace_function(ops);
1662         ftrace_startup();
1663         mutex_unlock(&ftrace_sysctl_lock);
1664
1665         return ret;
1666 }
1667
1668 /**
1669  * unregister_ftrace_function - unresgister a function for profiling.
1670  * @ops - ops structure that holds the function to unregister
1671  *
1672  * Unregister a function that was added to be called by ftrace profiling.
1673  */
1674 int unregister_ftrace_function(struct ftrace_ops *ops)
1675 {
1676         int ret;
1677
1678         mutex_lock(&ftrace_sysctl_lock);
1679         ret = __unregister_ftrace_function(ops);
1680         ftrace_shutdown();
1681         mutex_unlock(&ftrace_sysctl_lock);
1682
1683         return ret;
1684 }
1685
1686 int
1687 ftrace_enable_sysctl(struct ctl_table *table, int write,
1688                      struct file *file, void __user *buffer, size_t *lenp,
1689                      loff_t *ppos)
1690 {
1691         int ret;
1692
1693         if (unlikely(ftrace_disabled))
1694                 return -ENODEV;
1695
1696         mutex_lock(&ftrace_sysctl_lock);
1697
1698         ret  = proc_dointvec(table, write, file, buffer, lenp, ppos);
1699
1700         if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
1701                 goto out;
1702
1703         last_ftrace_enabled = ftrace_enabled;
1704
1705         if (ftrace_enabled) {
1706
1707                 ftrace_startup_sysctl();
1708
1709                 /* we are starting ftrace again */
1710                 if (ftrace_list != &ftrace_list_end) {
1711                         if (ftrace_list->next == &ftrace_list_end)
1712                                 ftrace_trace_function = ftrace_list->func;
1713                         else
1714                                 ftrace_trace_function = ftrace_list_func;
1715                 }
1716
1717         } else {
1718                 /* stopping ftrace calls (just send to ftrace_stub) */
1719                 ftrace_trace_function = ftrace_stub;
1720
1721                 ftrace_shutdown_sysctl();
1722         }
1723
1724  out:
1725         mutex_unlock(&ftrace_sysctl_lock);
1726         return ret;
1727 }