ftrace: release functions from hash
[linux-2.6] / kernel / trace / ftrace.c
1 /*
2  * Infrastructure for profiling code inserted by 'gcc -pg'.
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally ported from the -rt patch by:
8  *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code in the latency_tracer, that is:
11  *
12  *  Copyright (C) 2004-2006 Ingo Molnar
13  *  Copyright (C) 2004 William Lee Irwin III
14  */
15
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/debugfs.h>
21 #include <linux/hardirq.h>
22 #include <linux/kthread.h>
23 #include <linux/uaccess.h>
24 #include <linux/kprobes.h>
25 #include <linux/ftrace.h>
26 #include <linux/sysctl.h>
27 #include <linux/ctype.h>
28 #include <linux/hash.h>
29 #include <linux/list.h>
30
31 #include <asm/ftrace.h>
32
33 #include "trace.h"
34
35 /* ftrace_enabled is a method to turn ftrace on or off */
36 int ftrace_enabled __read_mostly;
37 static int last_ftrace_enabled;
38
39 /*
40  * ftrace_disabled is set when an anomaly is discovered.
41  * ftrace_disabled is much stronger than ftrace_enabled.
42  */
43 static int ftrace_disabled __read_mostly;
44
45 static DEFINE_SPINLOCK(ftrace_lock);
46 static DEFINE_MUTEX(ftrace_sysctl_lock);
47
48 static struct ftrace_ops ftrace_list_end __read_mostly =
49 {
50         .func = ftrace_stub,
51 };
52
53 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
54 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
55
56 static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
57 {
58         struct ftrace_ops *op = ftrace_list;
59
60         /* in case someone actually ports this to alpha! */
61         read_barrier_depends();
62
63         while (op != &ftrace_list_end) {
64                 /* silly alpha */
65                 read_barrier_depends();
66                 op->func(ip, parent_ip);
67                 op = op->next;
68         };
69 }
70
71 /**
72  * clear_ftrace_function - reset the ftrace function
73  *
74  * This NULLs the ftrace function and in essence stops
75  * tracing.  There may be lag
76  */
77 void clear_ftrace_function(void)
78 {
79         ftrace_trace_function = ftrace_stub;
80 }
81
82 static int __register_ftrace_function(struct ftrace_ops *ops)
83 {
84         /* should not be called from interrupt context */
85         spin_lock(&ftrace_lock);
86
87         ops->next = ftrace_list;
88         /*
89          * We are entering ops into the ftrace_list but another
90          * CPU might be walking that list. We need to make sure
91          * the ops->next pointer is valid before another CPU sees
92          * the ops pointer included into the ftrace_list.
93          */
94         smp_wmb();
95         ftrace_list = ops;
96
97         if (ftrace_enabled) {
98                 /*
99                  * For one func, simply call it directly.
100                  * For more than one func, call the chain.
101                  */
102                 if (ops->next == &ftrace_list_end)
103                         ftrace_trace_function = ops->func;
104                 else
105                         ftrace_trace_function = ftrace_list_func;
106         }
107
108         spin_unlock(&ftrace_lock);
109
110         return 0;
111 }
112
113 static int __unregister_ftrace_function(struct ftrace_ops *ops)
114 {
115         struct ftrace_ops **p;
116         int ret = 0;
117
118         /* should not be called from interrupt context */
119         spin_lock(&ftrace_lock);
120
121         /*
122          * If we are removing the last function, then simply point
123          * to the ftrace_stub.
124          */
125         if (ftrace_list == ops && ops->next == &ftrace_list_end) {
126                 ftrace_trace_function = ftrace_stub;
127                 ftrace_list = &ftrace_list_end;
128                 goto out;
129         }
130
131         for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
132                 if (*p == ops)
133                         break;
134
135         if (*p != ops) {
136                 ret = -1;
137                 goto out;
138         }
139
140         *p = (*p)->next;
141
142         if (ftrace_enabled) {
143                 /* If we only have one func left, then call that directly */
144                 if (ftrace_list == &ftrace_list_end ||
145                     ftrace_list->next == &ftrace_list_end)
146                         ftrace_trace_function = ftrace_list->func;
147         }
148
149  out:
150         spin_unlock(&ftrace_lock);
151
152         return ret;
153 }
154
155 #ifdef CONFIG_DYNAMIC_FTRACE
156
157 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
158 /*
159  * The hash lock is only needed when the recording of the mcount
160  * callers are dynamic. That is, by the caller themselves and
161  * not recorded via the compilation.
162  */
163 static DEFINE_SPINLOCK(ftrace_hash_lock);
164 #define ftrace_hash_lock(flags)   spin_lock_irqsave(&ftrace_hash_lock, flags)
165 #define ftrace_hash_unlock(flags) \
166                         spin_unlock_irqrestore(&ftrace_hash_lock, flags)
167 static void ftrace_release_hash(unsigned long start, unsigned long end);
168 #else
169 /* This is protected via the ftrace_lock with MCOUNT_RECORD. */
170 #define ftrace_hash_lock(flags)   do { (void)(flags); } while (0)
171 #define ftrace_hash_unlock(flags) do { } while(0)
172 static inline void ftrace_release_hash(unsigned long start, unsigned long end)
173 {
174 }
175 #endif
176
177 /*
178  * Since MCOUNT_ADDR may point to mcount itself, we do not want
179  * to get it confused by reading a reference in the code as we
180  * are parsing on objcopy output of text. Use a variable for
181  * it instead.
182  */
183 static unsigned long mcount_addr = MCOUNT_ADDR;
184
185 static struct task_struct *ftraced_task;
186
187 enum {
188         FTRACE_ENABLE_CALLS             = (1 << 0),
189         FTRACE_DISABLE_CALLS            = (1 << 1),
190         FTRACE_UPDATE_TRACE_FUNC        = (1 << 2),
191         FTRACE_ENABLE_MCOUNT            = (1 << 3),
192         FTRACE_DISABLE_MCOUNT           = (1 << 4),
193 };
194
195 static int ftrace_filtered;
196 static int tracing_on;
197 static int frozen_record_count;
198
199 static struct hlist_head ftrace_hash[FTRACE_HASHSIZE];
200
201 static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu);
202
203 static DEFINE_MUTEX(ftraced_lock);
204 static DEFINE_MUTEX(ftrace_regex_lock);
205
206 struct ftrace_page {
207         struct ftrace_page      *next;
208         unsigned long           index;
209         struct dyn_ftrace       records[];
210 };
211
212 #define ENTRIES_PER_PAGE \
213   ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
214
215 /* estimate from running different kernels */
216 #define NR_TO_INIT              10000
217
218 static struct ftrace_page       *ftrace_pages_start;
219 static struct ftrace_page       *ftrace_pages;
220
221 static int ftraced_trigger;
222 static int ftraced_suspend;
223 static int ftraced_stop;
224
225 static int ftrace_record_suspend;
226
227 static struct dyn_ftrace *ftrace_free_records;
228
229
230 #ifdef CONFIG_KPROBES
231 static inline void freeze_record(struct dyn_ftrace *rec)
232 {
233         if (!(rec->flags & FTRACE_FL_FROZEN)) {
234                 rec->flags |= FTRACE_FL_FROZEN;
235                 frozen_record_count++;
236         }
237 }
238
239 static inline void unfreeze_record(struct dyn_ftrace *rec)
240 {
241         if (rec->flags & FTRACE_FL_FROZEN) {
242                 rec->flags &= ~FTRACE_FL_FROZEN;
243                 frozen_record_count--;
244         }
245 }
246
247 static inline int record_frozen(struct dyn_ftrace *rec)
248 {
249         return rec->flags & FTRACE_FL_FROZEN;
250 }
251 #else
252 # define freeze_record(rec)                     ({ 0; })
253 # define unfreeze_record(rec)                   ({ 0; })
254 # define record_frozen(rec)                     ({ 0; })
255 #endif /* CONFIG_KPROBES */
256
257 int skip_trace(unsigned long ip)
258 {
259         unsigned long fl;
260         struct dyn_ftrace *rec;
261         struct hlist_node *t;
262         struct hlist_head *head;
263
264         if (frozen_record_count == 0)
265                 return 0;
266
267         head = &ftrace_hash[hash_long(ip, FTRACE_HASHBITS)];
268         hlist_for_each_entry_rcu(rec, t, head, node) {
269                 if (rec->ip == ip) {
270                         if (record_frozen(rec)) {
271                                 if (rec->flags & FTRACE_FL_FAILED)
272                                         return 1;
273
274                                 if (!(rec->flags & FTRACE_FL_CONVERTED))
275                                         return 1;
276
277                                 if (!tracing_on || !ftrace_enabled)
278                                         return 1;
279
280                                 if (ftrace_filtered) {
281                                         fl = rec->flags & (FTRACE_FL_FILTER |
282                                                            FTRACE_FL_NOTRACE);
283                                         if (!fl || (fl & FTRACE_FL_NOTRACE))
284                                                 return 1;
285                                 }
286                         }
287                         break;
288                 }
289         }
290
291         return 0;
292 }
293
294 static inline int
295 ftrace_ip_in_hash(unsigned long ip, unsigned long key)
296 {
297         struct dyn_ftrace *p;
298         struct hlist_node *t;
299         int found = 0;
300
301         hlist_for_each_entry_rcu(p, t, &ftrace_hash[key], node) {
302                 if (p->ip == ip) {
303                         found = 1;
304                         break;
305                 }
306         }
307
308         return found;
309 }
310
311 static inline void
312 ftrace_add_hash(struct dyn_ftrace *node, unsigned long key)
313 {
314         hlist_add_head_rcu(&node->node, &ftrace_hash[key]);
315 }
316
317 /* called from kstop_machine */
318 static inline void ftrace_del_hash(struct dyn_ftrace *node)
319 {
320         hlist_del(&node->node);
321 }
322
323 static void ftrace_free_rec(struct dyn_ftrace *rec)
324 {
325         rec->ip = (unsigned long)ftrace_free_records;
326         ftrace_free_records = rec;
327         rec->flags |= FTRACE_FL_FREE;
328 }
329
330 void ftrace_release(void *start, unsigned long size)
331 {
332         struct dyn_ftrace *rec;
333         struct ftrace_page *pg;
334         unsigned long s = (unsigned long)start;
335         unsigned long e = s + size;
336         int i;
337
338         if (ftrace_disabled || !start)
339                 return;
340
341         /* should not be called from interrupt context */
342         spin_lock(&ftrace_lock);
343
344         for (pg = ftrace_pages_start; pg; pg = pg->next) {
345                 for (i = 0; i < pg->index; i++) {
346                         rec = &pg->records[i];
347
348                         if ((rec->ip >= s) && (rec->ip < e))
349                                 ftrace_free_rec(rec);
350                 }
351         }
352         spin_unlock(&ftrace_lock);
353
354         ftrace_release_hash(s, e);
355 }
356
357 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
358 {
359         struct dyn_ftrace *rec;
360
361         /* First check for freed records */
362         if (ftrace_free_records) {
363                 rec = ftrace_free_records;
364
365                 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
366                         WARN_ON_ONCE(1);
367                         ftrace_free_records = NULL;
368                         ftrace_disabled = 1;
369                         ftrace_enabled = 0;
370                         return NULL;
371                 }
372
373                 ftrace_free_records = (void *)rec->ip;
374                 memset(rec, 0, sizeof(*rec));
375                 return rec;
376         }
377
378         if (ftrace_pages->index == ENTRIES_PER_PAGE) {
379                 if (!ftrace_pages->next)
380                         return NULL;
381                 ftrace_pages = ftrace_pages->next;
382         }
383
384         return &ftrace_pages->records[ftrace_pages->index++];
385 }
386
387 static void
388 ftrace_record_ip(unsigned long ip)
389 {
390         struct dyn_ftrace *node;
391         unsigned long flags;
392         unsigned long key;
393         int resched;
394         int cpu;
395
396         if (!ftrace_enabled || ftrace_disabled)
397                 return;
398
399         resched = need_resched();
400         preempt_disable_notrace();
401
402         /*
403          * We simply need to protect against recursion.
404          * Use the the raw version of smp_processor_id and not
405          * __get_cpu_var which can call debug hooks that can
406          * cause a recursive crash here.
407          */
408         cpu = raw_smp_processor_id();
409         per_cpu(ftrace_shutdown_disable_cpu, cpu)++;
410         if (per_cpu(ftrace_shutdown_disable_cpu, cpu) != 1)
411                 goto out;
412
413         if (unlikely(ftrace_record_suspend))
414                 goto out;
415
416         key = hash_long(ip, FTRACE_HASHBITS);
417
418         WARN_ON_ONCE(key >= FTRACE_HASHSIZE);
419
420         if (ftrace_ip_in_hash(ip, key))
421                 goto out;
422
423         ftrace_hash_lock(flags);
424
425         /* This ip may have hit the hash before the lock */
426         if (ftrace_ip_in_hash(ip, key))
427                 goto out_unlock;
428
429         node = ftrace_alloc_dyn_node(ip);
430         if (!node)
431                 goto out_unlock;
432
433         node->ip = ip;
434
435         ftrace_add_hash(node, key);
436
437         ftraced_trigger = 1;
438
439  out_unlock:
440         ftrace_hash_unlock(flags);
441  out:
442         per_cpu(ftrace_shutdown_disable_cpu, cpu)--;
443
444         /* prevent recursion with scheduler */
445         if (resched)
446                 preempt_enable_no_resched_notrace();
447         else
448                 preempt_enable_notrace();
449 }
450
451 #define FTRACE_ADDR ((long)(ftrace_caller))
452
453 static int
454 __ftrace_replace_code(struct dyn_ftrace *rec,
455                       unsigned char *old, unsigned char *new, int enable)
456 {
457         unsigned long ip, fl;
458
459         ip = rec->ip;
460
461         if (ftrace_filtered && enable) {
462                 /*
463                  * If filtering is on:
464                  *
465                  * If this record is set to be filtered and
466                  * is enabled then do nothing.
467                  *
468                  * If this record is set to be filtered and
469                  * it is not enabled, enable it.
470                  *
471                  * If this record is not set to be filtered
472                  * and it is not enabled do nothing.
473                  *
474                  * If this record is set not to trace then
475                  * do nothing.
476                  *
477                  * If this record is set not to trace and
478                  * it is enabled then disable it.
479                  *
480                  * If this record is not set to be filtered and
481                  * it is enabled, disable it.
482                  */
483
484                 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE |
485                                    FTRACE_FL_ENABLED);
486
487                 if ((fl ==  (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) ||
488                     (fl ==  (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE)) ||
489                     !fl || (fl == FTRACE_FL_NOTRACE))
490                         return 0;
491
492                 /*
493                  * If it is enabled disable it,
494                  * otherwise enable it!
495                  */
496                 if (fl & FTRACE_FL_ENABLED) {
497                         /* swap new and old */
498                         new = old;
499                         old = ftrace_call_replace(ip, FTRACE_ADDR);
500                         rec->flags &= ~FTRACE_FL_ENABLED;
501                 } else {
502                         new = ftrace_call_replace(ip, FTRACE_ADDR);
503                         rec->flags |= FTRACE_FL_ENABLED;
504                 }
505         } else {
506
507                 if (enable) {
508                         /*
509                          * If this record is set not to trace and is
510                          * not enabled, do nothing.
511                          */
512                         fl = rec->flags & (FTRACE_FL_NOTRACE | FTRACE_FL_ENABLED);
513                         if (fl == FTRACE_FL_NOTRACE)
514                                 return 0;
515
516                         new = ftrace_call_replace(ip, FTRACE_ADDR);
517                 } else
518                         old = ftrace_call_replace(ip, FTRACE_ADDR);
519
520                 if (enable) {
521                         if (rec->flags & FTRACE_FL_ENABLED)
522                                 return 0;
523                         rec->flags |= FTRACE_FL_ENABLED;
524                 } else {
525                         if (!(rec->flags & FTRACE_FL_ENABLED))
526                                 return 0;
527                         rec->flags &= ~FTRACE_FL_ENABLED;
528                 }
529         }
530
531         return ftrace_modify_code(ip, old, new);
532 }
533
534 static void ftrace_replace_code(int enable)
535 {
536         int i, failed;
537         unsigned char *new = NULL, *old = NULL;
538         struct dyn_ftrace *rec;
539         struct ftrace_page *pg;
540
541         if (enable)
542                 old = ftrace_nop_replace();
543         else
544                 new = ftrace_nop_replace();
545
546         for (pg = ftrace_pages_start; pg; pg = pg->next) {
547                 for (i = 0; i < pg->index; i++) {
548                         rec = &pg->records[i];
549
550                         /* don't modify code that has already faulted */
551                         if (rec->flags & FTRACE_FL_FAILED)
552                                 continue;
553
554                         /* ignore updates to this record's mcount site */
555                         if (get_kprobe((void *)rec->ip)) {
556                                 freeze_record(rec);
557                                 continue;
558                         } else {
559                                 unfreeze_record(rec);
560                         }
561
562                         failed = __ftrace_replace_code(rec, old, new, enable);
563                         if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
564                                 rec->flags |= FTRACE_FL_FAILED;
565                                 if ((system_state == SYSTEM_BOOTING) ||
566                                     !core_kernel_text(rec->ip)) {
567                                         ftrace_del_hash(rec);
568                                         ftrace_free_rec(rec);
569                                 }
570                         }
571                 }
572         }
573 }
574
575 static void ftrace_shutdown_replenish(void)
576 {
577         if (ftrace_pages->next)
578                 return;
579
580         /* allocate another page */
581         ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL);
582 }
583
584 static void print_ip_ins(const char *fmt, unsigned char *p)
585 {
586         int i;
587
588         printk(KERN_CONT "%s", fmt);
589
590         for (i = 0; i < MCOUNT_INSN_SIZE; i++)
591                 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
592 }
593
594 static int
595 ftrace_code_disable(struct dyn_ftrace *rec)
596 {
597         unsigned long ip;
598         unsigned char *nop, *call;
599         int failed;
600
601         ip = rec->ip;
602
603         nop = ftrace_nop_replace();
604         call = ftrace_call_replace(ip, mcount_addr);
605
606         failed = ftrace_modify_code(ip, call, nop);
607         if (failed) {
608                 switch (failed) {
609                 case 1:
610                         WARN_ON_ONCE(1);
611                         pr_info("ftrace faulted on modifying ");
612                         print_ip_sym(ip);
613                         break;
614                 case 2:
615                         WARN_ON_ONCE(1);
616                         pr_info("ftrace failed to modify ");
617                         print_ip_sym(ip);
618                         print_ip_ins(" expected: ", call);
619                         print_ip_ins(" actual: ", (unsigned char *)ip);
620                         print_ip_ins(" replace: ", nop);
621                         printk(KERN_CONT "\n");
622                         break;
623                 }
624
625                 rec->flags |= FTRACE_FL_FAILED;
626                 return 0;
627         }
628         return 1;
629 }
630
631 static int __ftrace_update_code(void *ignore);
632
633 static int __ftrace_modify_code(void *data)
634 {
635         unsigned long addr;
636         int *command = data;
637
638         if (*command & FTRACE_ENABLE_CALLS) {
639                 /*
640                  * Update any recorded ips now that we have the
641                  * machine stopped
642                  */
643                 __ftrace_update_code(NULL);
644                 ftrace_replace_code(1);
645                 tracing_on = 1;
646         } else if (*command & FTRACE_DISABLE_CALLS) {
647                 ftrace_replace_code(0);
648                 tracing_on = 0;
649         }
650
651         if (*command & FTRACE_UPDATE_TRACE_FUNC)
652                 ftrace_update_ftrace_func(ftrace_trace_function);
653
654         if (*command & FTRACE_ENABLE_MCOUNT) {
655                 addr = (unsigned long)ftrace_record_ip;
656                 ftrace_mcount_set(&addr);
657         } else if (*command & FTRACE_DISABLE_MCOUNT) {
658                 addr = (unsigned long)ftrace_stub;
659                 ftrace_mcount_set(&addr);
660         }
661
662         return 0;
663 }
664
665 static void ftrace_run_update_code(int command)
666 {
667         stop_machine(__ftrace_modify_code, &command, NULL);
668 }
669
670 void ftrace_disable_daemon(void)
671 {
672         /* Stop the daemon from calling kstop_machine */
673         mutex_lock(&ftraced_lock);
674         ftraced_stop = 1;
675         mutex_unlock(&ftraced_lock);
676
677         ftrace_force_update();
678 }
679
680 void ftrace_enable_daemon(void)
681 {
682         mutex_lock(&ftraced_lock);
683         ftraced_stop = 0;
684         mutex_unlock(&ftraced_lock);
685
686         ftrace_force_update();
687 }
688
689 static ftrace_func_t saved_ftrace_func;
690
691 static void ftrace_startup(void)
692 {
693         int command = 0;
694
695         if (unlikely(ftrace_disabled))
696                 return;
697
698         mutex_lock(&ftraced_lock);
699         ftraced_suspend++;
700         if (ftraced_suspend == 1)
701                 command |= FTRACE_ENABLE_CALLS;
702
703         if (saved_ftrace_func != ftrace_trace_function) {
704                 saved_ftrace_func = ftrace_trace_function;
705                 command |= FTRACE_UPDATE_TRACE_FUNC;
706         }
707
708         if (!command || !ftrace_enabled)
709                 goto out;
710
711         ftrace_run_update_code(command);
712  out:
713         mutex_unlock(&ftraced_lock);
714 }
715
716 static void ftrace_shutdown(void)
717 {
718         int command = 0;
719
720         if (unlikely(ftrace_disabled))
721                 return;
722
723         mutex_lock(&ftraced_lock);
724         ftraced_suspend--;
725         if (!ftraced_suspend)
726                 command |= FTRACE_DISABLE_CALLS;
727
728         if (saved_ftrace_func != ftrace_trace_function) {
729                 saved_ftrace_func = ftrace_trace_function;
730                 command |= FTRACE_UPDATE_TRACE_FUNC;
731         }
732
733         if (!command || !ftrace_enabled)
734                 goto out;
735
736         ftrace_run_update_code(command);
737  out:
738         mutex_unlock(&ftraced_lock);
739 }
740
741 static void ftrace_startup_sysctl(void)
742 {
743         int command = FTRACE_ENABLE_MCOUNT;
744
745         if (unlikely(ftrace_disabled))
746                 return;
747
748         mutex_lock(&ftraced_lock);
749         /* Force update next time */
750         saved_ftrace_func = NULL;
751         /* ftraced_suspend is true if we want ftrace running */
752         if (ftraced_suspend)
753                 command |= FTRACE_ENABLE_CALLS;
754
755         ftrace_run_update_code(command);
756         mutex_unlock(&ftraced_lock);
757 }
758
759 static void ftrace_shutdown_sysctl(void)
760 {
761         int command = FTRACE_DISABLE_MCOUNT;
762
763         if (unlikely(ftrace_disabled))
764                 return;
765
766         mutex_lock(&ftraced_lock);
767         /* ftraced_suspend is true if ftrace is running */
768         if (ftraced_suspend)
769                 command |= FTRACE_DISABLE_CALLS;
770
771         ftrace_run_update_code(command);
772         mutex_unlock(&ftraced_lock);
773 }
774
775 static cycle_t          ftrace_update_time;
776 static unsigned long    ftrace_update_cnt;
777 unsigned long           ftrace_update_tot_cnt;
778
779 static int __ftrace_update_code(void *ignore)
780 {
781         int i, save_ftrace_enabled;
782         cycle_t start, stop;
783         struct dyn_ftrace *p;
784         struct hlist_node *t, *n;
785         struct hlist_head *head, temp_list;
786
787         /* Don't be recording funcs now */
788         ftrace_record_suspend++;
789         save_ftrace_enabled = ftrace_enabled;
790         ftrace_enabled = 0;
791
792         start = ftrace_now(raw_smp_processor_id());
793         ftrace_update_cnt = 0;
794
795         /* No locks needed, the machine is stopped! */
796         for (i = 0; i < FTRACE_HASHSIZE; i++) {
797                 INIT_HLIST_HEAD(&temp_list);
798                 head = &ftrace_hash[i];
799
800                 /* all CPUS are stopped, we are safe to modify code */
801                 hlist_for_each_entry_safe(p, t, n, head, node) {
802                         /* Skip over failed records which have not been
803                          * freed. */
804                         if (p->flags & FTRACE_FL_FAILED)
805                                 continue;
806
807                         /* Unconverted records are always at the head of the
808                          * hash bucket. Once we encounter a converted record,
809                          * simply skip over to the next bucket. Saves ftraced
810                          * some processor cycles (ftrace does its bid for
811                          * global warming :-p ). */
812                         if (p->flags & (FTRACE_FL_CONVERTED))
813                                 break;
814
815                         /* Ignore updates to this record's mcount site.
816                          * Reintroduce this record at the head of this
817                          * bucket to attempt to "convert" it again if
818                          * the kprobe on it is unregistered before the
819                          * next run. */
820                         if (get_kprobe((void *)p->ip)) {
821                                 ftrace_del_hash(p);
822                                 INIT_HLIST_NODE(&p->node);
823                                 hlist_add_head(&p->node, &temp_list);
824                                 freeze_record(p);
825                                 continue;
826                         } else {
827                                 unfreeze_record(p);
828                         }
829
830                         /* convert record (i.e, patch mcount-call with NOP) */
831                         if (ftrace_code_disable(p)) {
832                                 p->flags |= FTRACE_FL_CONVERTED;
833                                 ftrace_update_cnt++;
834                         } else {
835                                 if ((system_state == SYSTEM_BOOTING) ||
836                                     !core_kernel_text(p->ip)) {
837                                         ftrace_del_hash(p);
838                                         ftrace_free_rec(p);
839                                 }
840                         }
841                 }
842
843                 hlist_for_each_entry_safe(p, t, n, &temp_list, node) {
844                         hlist_del(&p->node);
845                         INIT_HLIST_NODE(&p->node);
846                         hlist_add_head(&p->node, head);
847                 }
848         }
849
850         stop = ftrace_now(raw_smp_processor_id());
851         ftrace_update_time = stop - start;
852         ftrace_update_tot_cnt += ftrace_update_cnt;
853         ftraced_trigger = 0;
854
855         ftrace_enabled = save_ftrace_enabled;
856         ftrace_record_suspend--;
857
858         return 0;
859 }
860
861 static int ftrace_update_code(void)
862 {
863         if (unlikely(ftrace_disabled) ||
864             !ftrace_enabled || !ftraced_trigger)
865                 return 0;
866
867         stop_machine(__ftrace_update_code, NULL, NULL);
868
869         return 1;
870 }
871
872 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
873 {
874         struct ftrace_page *pg;
875         int cnt;
876         int i;
877
878         /* allocate a few pages */
879         ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
880         if (!ftrace_pages_start)
881                 return -1;
882
883         /*
884          * Allocate a few more pages.
885          *
886          * TODO: have some parser search vmlinux before
887          *   final linking to find all calls to ftrace.
888          *   Then we can:
889          *    a) know how many pages to allocate.
890          *     and/or
891          *    b) set up the table then.
892          *
893          *  The dynamic code is still necessary for
894          *  modules.
895          */
896
897         pg = ftrace_pages = ftrace_pages_start;
898
899         cnt = num_to_init / ENTRIES_PER_PAGE;
900         pr_info("ftrace: allocating %ld hash entries in %d pages\n",
901                 num_to_init, cnt);
902
903         for (i = 0; i < cnt; i++) {
904                 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
905
906                 /* If we fail, we'll try later anyway */
907                 if (!pg->next)
908                         break;
909
910                 pg = pg->next;
911         }
912
913         return 0;
914 }
915
916 enum {
917         FTRACE_ITER_FILTER      = (1 << 0),
918         FTRACE_ITER_CONT        = (1 << 1),
919         FTRACE_ITER_NOTRACE     = (1 << 2),
920         FTRACE_ITER_FAILURES    = (1 << 3),
921 };
922
923 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
924
925 struct ftrace_iterator {
926         loff_t                  pos;
927         struct ftrace_page      *pg;
928         unsigned                idx;
929         unsigned                flags;
930         unsigned char           buffer[FTRACE_BUFF_MAX+1];
931         unsigned                buffer_idx;
932         unsigned                filtered;
933 };
934
935 static void *
936 t_next(struct seq_file *m, void *v, loff_t *pos)
937 {
938         struct ftrace_iterator *iter = m->private;
939         struct dyn_ftrace *rec = NULL;
940
941         (*pos)++;
942
943         /* should not be called from interrupt context */
944         spin_lock(&ftrace_lock);
945  retry:
946         if (iter->idx >= iter->pg->index) {
947                 if (iter->pg->next) {
948                         iter->pg = iter->pg->next;
949                         iter->idx = 0;
950                         goto retry;
951                 }
952         } else {
953                 rec = &iter->pg->records[iter->idx++];
954                 if ((rec->flags & FTRACE_FL_FREE) ||
955
956                     (!(iter->flags & FTRACE_ITER_FAILURES) &&
957                      (rec->flags & FTRACE_FL_FAILED)) ||
958
959                     ((iter->flags & FTRACE_ITER_FAILURES) &&
960                      !(rec->flags & FTRACE_FL_FAILED)) ||
961
962                     ((iter->flags & FTRACE_ITER_NOTRACE) &&
963                      !(rec->flags & FTRACE_FL_NOTRACE))) {
964                         rec = NULL;
965                         goto retry;
966                 }
967         }
968         spin_unlock(&ftrace_lock);
969
970         iter->pos = *pos;
971
972         return rec;
973 }
974
975 static void *t_start(struct seq_file *m, loff_t *pos)
976 {
977         struct ftrace_iterator *iter = m->private;
978         void *p = NULL;
979         loff_t l = -1;
980
981         if (*pos != iter->pos) {
982                 for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l))
983                         ;
984         } else {
985                 l = *pos;
986                 p = t_next(m, p, &l);
987         }
988
989         return p;
990 }
991
992 static void t_stop(struct seq_file *m, void *p)
993 {
994 }
995
996 static int t_show(struct seq_file *m, void *v)
997 {
998         struct dyn_ftrace *rec = v;
999         char str[KSYM_SYMBOL_LEN];
1000
1001         if (!rec)
1002                 return 0;
1003
1004         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1005
1006         seq_printf(m, "%s\n", str);
1007
1008         return 0;
1009 }
1010
1011 static struct seq_operations show_ftrace_seq_ops = {
1012         .start = t_start,
1013         .next = t_next,
1014         .stop = t_stop,
1015         .show = t_show,
1016 };
1017
1018 static int
1019 ftrace_avail_open(struct inode *inode, struct file *file)
1020 {
1021         struct ftrace_iterator *iter;
1022         int ret;
1023
1024         if (unlikely(ftrace_disabled))
1025                 return -ENODEV;
1026
1027         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1028         if (!iter)
1029                 return -ENOMEM;
1030
1031         iter->pg = ftrace_pages_start;
1032         iter->pos = -1;
1033
1034         ret = seq_open(file, &show_ftrace_seq_ops);
1035         if (!ret) {
1036                 struct seq_file *m = file->private_data;
1037
1038                 m->private = iter;
1039         } else {
1040                 kfree(iter);
1041         }
1042
1043         return ret;
1044 }
1045
1046 int ftrace_avail_release(struct inode *inode, struct file *file)
1047 {
1048         struct seq_file *m = (struct seq_file *)file->private_data;
1049         struct ftrace_iterator *iter = m->private;
1050
1051         seq_release(inode, file);
1052         kfree(iter);
1053
1054         return 0;
1055 }
1056
1057 static int
1058 ftrace_failures_open(struct inode *inode, struct file *file)
1059 {
1060         int ret;
1061         struct seq_file *m;
1062         struct ftrace_iterator *iter;
1063
1064         ret = ftrace_avail_open(inode, file);
1065         if (!ret) {
1066                 m = (struct seq_file *)file->private_data;
1067                 iter = (struct ftrace_iterator *)m->private;
1068                 iter->flags = FTRACE_ITER_FAILURES;
1069         }
1070
1071         return ret;
1072 }
1073
1074
1075 static void ftrace_filter_reset(int enable)
1076 {
1077         struct ftrace_page *pg;
1078         struct dyn_ftrace *rec;
1079         unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1080         unsigned i;
1081
1082         /* should not be called from interrupt context */
1083         spin_lock(&ftrace_lock);
1084         if (enable)
1085                 ftrace_filtered = 0;
1086         pg = ftrace_pages_start;
1087         while (pg) {
1088                 for (i = 0; i < pg->index; i++) {
1089                         rec = &pg->records[i];
1090                         if (rec->flags & FTRACE_FL_FAILED)
1091                                 continue;
1092                         rec->flags &= ~type;
1093                 }
1094                 pg = pg->next;
1095         }
1096         spin_unlock(&ftrace_lock);
1097 }
1098
1099 static int
1100 ftrace_regex_open(struct inode *inode, struct file *file, int enable)
1101 {
1102         struct ftrace_iterator *iter;
1103         int ret = 0;
1104
1105         if (unlikely(ftrace_disabled))
1106                 return -ENODEV;
1107
1108         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1109         if (!iter)
1110                 return -ENOMEM;
1111
1112         mutex_lock(&ftrace_regex_lock);
1113         if ((file->f_mode & FMODE_WRITE) &&
1114             !(file->f_flags & O_APPEND))
1115                 ftrace_filter_reset(enable);
1116
1117         if (file->f_mode & FMODE_READ) {
1118                 iter->pg = ftrace_pages_start;
1119                 iter->pos = -1;
1120                 iter->flags = enable ? FTRACE_ITER_FILTER :
1121                         FTRACE_ITER_NOTRACE;
1122
1123                 ret = seq_open(file, &show_ftrace_seq_ops);
1124                 if (!ret) {
1125                         struct seq_file *m = file->private_data;
1126                         m->private = iter;
1127                 } else
1128                         kfree(iter);
1129         } else
1130                 file->private_data = iter;
1131         mutex_unlock(&ftrace_regex_lock);
1132
1133         return ret;
1134 }
1135
1136 static int
1137 ftrace_filter_open(struct inode *inode, struct file *file)
1138 {
1139         return ftrace_regex_open(inode, file, 1);
1140 }
1141
1142 static int
1143 ftrace_notrace_open(struct inode *inode, struct file *file)
1144 {
1145         return ftrace_regex_open(inode, file, 0);
1146 }
1147
1148 static ssize_t
1149 ftrace_regex_read(struct file *file, char __user *ubuf,
1150                        size_t cnt, loff_t *ppos)
1151 {
1152         if (file->f_mode & FMODE_READ)
1153                 return seq_read(file, ubuf, cnt, ppos);
1154         else
1155                 return -EPERM;
1156 }
1157
1158 static loff_t
1159 ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
1160 {
1161         loff_t ret;
1162
1163         if (file->f_mode & FMODE_READ)
1164                 ret = seq_lseek(file, offset, origin);
1165         else
1166                 file->f_pos = ret = 1;
1167
1168         return ret;
1169 }
1170
1171 enum {
1172         MATCH_FULL,
1173         MATCH_FRONT_ONLY,
1174         MATCH_MIDDLE_ONLY,
1175         MATCH_END_ONLY,
1176 };
1177
1178 static void
1179 ftrace_match(unsigned char *buff, int len, int enable)
1180 {
1181         char str[KSYM_SYMBOL_LEN];
1182         char *search = NULL;
1183         struct ftrace_page *pg;
1184         struct dyn_ftrace *rec;
1185         int type = MATCH_FULL;
1186         unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1187         unsigned i, match = 0, search_len = 0;
1188
1189         for (i = 0; i < len; i++) {
1190                 if (buff[i] == '*') {
1191                         if (!i) {
1192                                 search = buff + i + 1;
1193                                 type = MATCH_END_ONLY;
1194                                 search_len = len - (i + 1);
1195                         } else {
1196                                 if (type == MATCH_END_ONLY) {
1197                                         type = MATCH_MIDDLE_ONLY;
1198                                 } else {
1199                                         match = i;
1200                                         type = MATCH_FRONT_ONLY;
1201                                 }
1202                                 buff[i] = 0;
1203                                 break;
1204                         }
1205                 }
1206         }
1207
1208         /* should not be called from interrupt context */
1209         spin_lock(&ftrace_lock);
1210         if (enable)
1211                 ftrace_filtered = 1;
1212         pg = ftrace_pages_start;
1213         while (pg) {
1214                 for (i = 0; i < pg->index; i++) {
1215                         int matched = 0;
1216                         char *ptr;
1217
1218                         rec = &pg->records[i];
1219                         if (rec->flags & FTRACE_FL_FAILED)
1220                                 continue;
1221                         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1222                         switch (type) {
1223                         case MATCH_FULL:
1224                                 if (strcmp(str, buff) == 0)
1225                                         matched = 1;
1226                                 break;
1227                         case MATCH_FRONT_ONLY:
1228                                 if (memcmp(str, buff, match) == 0)
1229                                         matched = 1;
1230                                 break;
1231                         case MATCH_MIDDLE_ONLY:
1232                                 if (strstr(str, search))
1233                                         matched = 1;
1234                                 break;
1235                         case MATCH_END_ONLY:
1236                                 ptr = strstr(str, search);
1237                                 if (ptr && (ptr[search_len] == 0))
1238                                         matched = 1;
1239                                 break;
1240                         }
1241                         if (matched)
1242                                 rec->flags |= flag;
1243                 }
1244                 pg = pg->next;
1245         }
1246         spin_unlock(&ftrace_lock);
1247 }
1248
1249 static ssize_t
1250 ftrace_regex_write(struct file *file, const char __user *ubuf,
1251                    size_t cnt, loff_t *ppos, int enable)
1252 {
1253         struct ftrace_iterator *iter;
1254         char ch;
1255         size_t read = 0;
1256         ssize_t ret;
1257
1258         if (!cnt || cnt < 0)
1259                 return 0;
1260
1261         mutex_lock(&ftrace_regex_lock);
1262
1263         if (file->f_mode & FMODE_READ) {
1264                 struct seq_file *m = file->private_data;
1265                 iter = m->private;
1266         } else
1267                 iter = file->private_data;
1268
1269         if (!*ppos) {
1270                 iter->flags &= ~FTRACE_ITER_CONT;
1271                 iter->buffer_idx = 0;
1272         }
1273
1274         ret = get_user(ch, ubuf++);
1275         if (ret)
1276                 goto out;
1277         read++;
1278         cnt--;
1279
1280         if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1281                 /* skip white space */
1282                 while (cnt && isspace(ch)) {
1283                         ret = get_user(ch, ubuf++);
1284                         if (ret)
1285                                 goto out;
1286                         read++;
1287                         cnt--;
1288                 }
1289
1290                 if (isspace(ch)) {
1291                         file->f_pos += read;
1292                         ret = read;
1293                         goto out;
1294                 }
1295
1296                 iter->buffer_idx = 0;
1297         }
1298
1299         while (cnt && !isspace(ch)) {
1300                 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1301                         iter->buffer[iter->buffer_idx++] = ch;
1302                 else {
1303                         ret = -EINVAL;
1304                         goto out;
1305                 }
1306                 ret = get_user(ch, ubuf++);
1307                 if (ret)
1308                         goto out;
1309                 read++;
1310                 cnt--;
1311         }
1312
1313         if (isspace(ch)) {
1314                 iter->filtered++;
1315                 iter->buffer[iter->buffer_idx] = 0;
1316                 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1317                 iter->buffer_idx = 0;
1318         } else
1319                 iter->flags |= FTRACE_ITER_CONT;
1320
1321
1322         file->f_pos += read;
1323
1324         ret = read;
1325  out:
1326         mutex_unlock(&ftrace_regex_lock);
1327
1328         return ret;
1329 }
1330
1331 static ssize_t
1332 ftrace_filter_write(struct file *file, const char __user *ubuf,
1333                     size_t cnt, loff_t *ppos)
1334 {
1335         return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
1336 }
1337
1338 static ssize_t
1339 ftrace_notrace_write(struct file *file, const char __user *ubuf,
1340                      size_t cnt, loff_t *ppos)
1341 {
1342         return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
1343 }
1344
1345 static void
1346 ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
1347 {
1348         if (unlikely(ftrace_disabled))
1349                 return;
1350
1351         mutex_lock(&ftrace_regex_lock);
1352         if (reset)
1353                 ftrace_filter_reset(enable);
1354         if (buf)
1355                 ftrace_match(buf, len, enable);
1356         mutex_unlock(&ftrace_regex_lock);
1357 }
1358
1359 /**
1360  * ftrace_set_filter - set a function to filter on in ftrace
1361  * @buf - the string that holds the function filter text.
1362  * @len - the length of the string.
1363  * @reset - non zero to reset all filters before applying this filter.
1364  *
1365  * Filters denote which functions should be enabled when tracing is enabled.
1366  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1367  */
1368 void ftrace_set_filter(unsigned char *buf, int len, int reset)
1369 {
1370         ftrace_set_regex(buf, len, reset, 1);
1371 }
1372
1373 /**
1374  * ftrace_set_notrace - set a function to not trace in ftrace
1375  * @buf - the string that holds the function notrace text.
1376  * @len - the length of the string.
1377  * @reset - non zero to reset all filters before applying this filter.
1378  *
1379  * Notrace Filters denote which functions should not be enabled when tracing
1380  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1381  * for tracing.
1382  */
1383 void ftrace_set_notrace(unsigned char *buf, int len, int reset)
1384 {
1385         ftrace_set_regex(buf, len, reset, 0);
1386 }
1387
1388 static int
1389 ftrace_regex_release(struct inode *inode, struct file *file, int enable)
1390 {
1391         struct seq_file *m = (struct seq_file *)file->private_data;
1392         struct ftrace_iterator *iter;
1393
1394         mutex_lock(&ftrace_regex_lock);
1395         if (file->f_mode & FMODE_READ) {
1396                 iter = m->private;
1397
1398                 seq_release(inode, file);
1399         } else
1400                 iter = file->private_data;
1401
1402         if (iter->buffer_idx) {
1403                 iter->filtered++;
1404                 iter->buffer[iter->buffer_idx] = 0;
1405                 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1406         }
1407
1408         mutex_lock(&ftrace_sysctl_lock);
1409         mutex_lock(&ftraced_lock);
1410         if (iter->filtered && ftraced_suspend && ftrace_enabled)
1411                 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1412         mutex_unlock(&ftraced_lock);
1413         mutex_unlock(&ftrace_sysctl_lock);
1414
1415         kfree(iter);
1416         mutex_unlock(&ftrace_regex_lock);
1417         return 0;
1418 }
1419
1420 static int
1421 ftrace_filter_release(struct inode *inode, struct file *file)
1422 {
1423         return ftrace_regex_release(inode, file, 1);
1424 }
1425
1426 static int
1427 ftrace_notrace_release(struct inode *inode, struct file *file)
1428 {
1429         return ftrace_regex_release(inode, file, 0);
1430 }
1431
1432 static ssize_t
1433 ftraced_read(struct file *filp, char __user *ubuf,
1434                      size_t cnt, loff_t *ppos)
1435 {
1436         /* don't worry about races */
1437         char *buf = ftraced_stop ? "disabled\n" : "enabled\n";
1438         int r = strlen(buf);
1439
1440         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1441 }
1442
1443 static ssize_t
1444 ftraced_write(struct file *filp, const char __user *ubuf,
1445                       size_t cnt, loff_t *ppos)
1446 {
1447         char buf[64];
1448         long val;
1449         int ret;
1450
1451         if (cnt >= sizeof(buf))
1452                 return -EINVAL;
1453
1454         if (copy_from_user(&buf, ubuf, cnt))
1455                 return -EFAULT;
1456
1457         if (strncmp(buf, "enable", 6) == 0)
1458                 val = 1;
1459         else if (strncmp(buf, "disable", 7) == 0)
1460                 val = 0;
1461         else {
1462                 buf[cnt] = 0;
1463
1464                 ret = strict_strtoul(buf, 10, &val);
1465                 if (ret < 0)
1466                         return ret;
1467
1468                 val = !!val;
1469         }
1470
1471         if (val)
1472                 ftrace_enable_daemon();
1473         else
1474                 ftrace_disable_daemon();
1475
1476         filp->f_pos += cnt;
1477
1478         return cnt;
1479 }
1480
1481 static struct file_operations ftrace_avail_fops = {
1482         .open = ftrace_avail_open,
1483         .read = seq_read,
1484         .llseek = seq_lseek,
1485         .release = ftrace_avail_release,
1486 };
1487
1488 static struct file_operations ftrace_failures_fops = {
1489         .open = ftrace_failures_open,
1490         .read = seq_read,
1491         .llseek = seq_lseek,
1492         .release = ftrace_avail_release,
1493 };
1494
1495 static struct file_operations ftrace_filter_fops = {
1496         .open = ftrace_filter_open,
1497         .read = ftrace_regex_read,
1498         .write = ftrace_filter_write,
1499         .llseek = ftrace_regex_lseek,
1500         .release = ftrace_filter_release,
1501 };
1502
1503 static struct file_operations ftrace_notrace_fops = {
1504         .open = ftrace_notrace_open,
1505         .read = ftrace_regex_read,
1506         .write = ftrace_notrace_write,
1507         .llseek = ftrace_regex_lseek,
1508         .release = ftrace_notrace_release,
1509 };
1510
1511 static struct file_operations ftraced_fops = {
1512         .open = tracing_open_generic,
1513         .read = ftraced_read,
1514         .write = ftraced_write,
1515 };
1516
1517 /**
1518  * ftrace_force_update - force an update to all recording ftrace functions
1519  */
1520 int ftrace_force_update(void)
1521 {
1522         int ret = 0;
1523
1524         if (unlikely(ftrace_disabled))
1525                 return -ENODEV;
1526
1527         mutex_lock(&ftrace_sysctl_lock);
1528         mutex_lock(&ftraced_lock);
1529
1530         /*
1531          * If ftraced_trigger is not set, then there is nothing
1532          * to update.
1533          */
1534         if (ftraced_trigger && !ftrace_update_code())
1535                 ret = -EBUSY;
1536
1537         mutex_unlock(&ftraced_lock);
1538         mutex_unlock(&ftrace_sysctl_lock);
1539
1540         return ret;
1541 }
1542
1543 static void ftrace_force_shutdown(void)
1544 {
1545         struct task_struct *task;
1546         int command = FTRACE_DISABLE_CALLS | FTRACE_UPDATE_TRACE_FUNC;
1547
1548         mutex_lock(&ftraced_lock);
1549         task = ftraced_task;
1550         ftraced_task = NULL;
1551         ftraced_suspend = -1;
1552         ftrace_run_update_code(command);
1553         mutex_unlock(&ftraced_lock);
1554
1555         if (task)
1556                 kthread_stop(task);
1557 }
1558
1559 static __init int ftrace_init_debugfs(void)
1560 {
1561         struct dentry *d_tracer;
1562         struct dentry *entry;
1563
1564         d_tracer = tracing_init_dentry();
1565
1566         entry = debugfs_create_file("available_filter_functions", 0444,
1567                                     d_tracer, NULL, &ftrace_avail_fops);
1568         if (!entry)
1569                 pr_warning("Could not create debugfs "
1570                            "'available_filter_functions' entry\n");
1571
1572         entry = debugfs_create_file("failures", 0444,
1573                                     d_tracer, NULL, &ftrace_failures_fops);
1574         if (!entry)
1575                 pr_warning("Could not create debugfs 'failures' entry\n");
1576
1577         entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1578                                     NULL, &ftrace_filter_fops);
1579         if (!entry)
1580                 pr_warning("Could not create debugfs "
1581                            "'set_ftrace_filter' entry\n");
1582
1583         entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
1584                                     NULL, &ftrace_notrace_fops);
1585         if (!entry)
1586                 pr_warning("Could not create debugfs "
1587                            "'set_ftrace_notrace' entry\n");
1588
1589         entry = debugfs_create_file("ftraced_enabled", 0644, d_tracer,
1590                                     NULL, &ftraced_fops);
1591         if (!entry)
1592                 pr_warning("Could not create debugfs "
1593                            "'ftraced_enabled' entry\n");
1594         return 0;
1595 }
1596
1597 fs_initcall(ftrace_init_debugfs);
1598
1599 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
1600 static int ftrace_convert_nops(unsigned long *start,
1601                                unsigned long *end)
1602 {
1603         unsigned long *p;
1604         unsigned long addr;
1605         unsigned long flags;
1606
1607         p = start;
1608         while (p < end) {
1609                 addr = ftrace_call_adjust(*p++);
1610                 /* should not be called from interrupt context */
1611                 spin_lock(&ftrace_lock);
1612                 ftrace_record_ip(addr);
1613                 spin_unlock(&ftrace_lock);
1614                 ftrace_shutdown_replenish();
1615         }
1616
1617         /* p is ignored */
1618         local_irq_save(flags);
1619         __ftrace_update_code(p);
1620         local_irq_restore(flags);
1621
1622         return 0;
1623 }
1624
1625 void ftrace_init_module(unsigned long *start, unsigned long *end)
1626 {
1627         if (ftrace_disabled || start == end)
1628                 return;
1629         ftrace_convert_nops(start, end);
1630 }
1631
1632 extern unsigned long __start_mcount_loc[];
1633 extern unsigned long __stop_mcount_loc[];
1634
1635 void __init ftrace_init(void)
1636 {
1637         unsigned long count, addr, flags;
1638         int ret;
1639
1640         /* Keep the ftrace pointer to the stub */
1641         addr = (unsigned long)ftrace_stub;
1642
1643         local_irq_save(flags);
1644         ftrace_dyn_arch_init(&addr);
1645         local_irq_restore(flags);
1646
1647         /* ftrace_dyn_arch_init places the return code in addr */
1648         if (addr)
1649                 goto failed;
1650
1651         count = __stop_mcount_loc - __start_mcount_loc;
1652
1653         ret = ftrace_dyn_table_alloc(count);
1654         if (ret)
1655                 goto failed;
1656
1657         last_ftrace_enabled = ftrace_enabled = 1;
1658
1659         ret = ftrace_convert_nops(__start_mcount_loc,
1660                                   __stop_mcount_loc);
1661
1662         return;
1663  failed:
1664         ftrace_disabled = 1;
1665 }
1666 #else /* CONFIG_FTRACE_MCOUNT_RECORD */
1667
1668 static void ftrace_release_hash(unsigned long start, unsigned long end)
1669 {
1670         struct dyn_ftrace *rec;
1671         struct hlist_node *t, *n;
1672         struct hlist_head *head, temp_list;
1673         unsigned long flags;
1674         int i, cpu;
1675
1676         preempt_disable_notrace();
1677
1678         /* disable incase we call something that calls mcount */
1679         cpu = raw_smp_processor_id();
1680         per_cpu(ftrace_shutdown_disable_cpu, cpu)++;
1681
1682         ftrace_hash_lock(flags);
1683
1684         for (i = 0; i < FTRACE_HASHSIZE; i++) {
1685                 INIT_HLIST_HEAD(&temp_list);
1686                 head = &ftrace_hash[i];
1687
1688                 /* all CPUS are stopped, we are safe to modify code */
1689                 hlist_for_each_entry_safe(rec, t, n, head, node) {
1690                         if (rec->flags & FTRACE_FL_FREE)
1691                                 continue;
1692
1693                         if ((rec->ip >= start) && (rec->ip < end))
1694                                 ftrace_free_rec(rec);
1695                 }
1696         }
1697
1698         ftrace_hash_unlock(flags);
1699
1700         per_cpu(ftrace_shutdown_disable_cpu, cpu)--;
1701         preempt_enable_notrace();
1702
1703 }
1704
1705 static int ftraced(void *ignore)
1706 {
1707         unsigned long usecs;
1708
1709         while (!kthread_should_stop()) {
1710
1711                 set_current_state(TASK_INTERRUPTIBLE);
1712
1713                 /* check once a second */
1714                 schedule_timeout(HZ);
1715
1716                 if (unlikely(ftrace_disabled))
1717                         continue;
1718
1719                 mutex_lock(&ftrace_sysctl_lock);
1720                 mutex_lock(&ftraced_lock);
1721                 if (!ftraced_suspend && !ftraced_stop &&
1722                     ftrace_update_code()) {
1723                         usecs = nsecs_to_usecs(ftrace_update_time);
1724                         if (ftrace_update_tot_cnt > 100000) {
1725                                 ftrace_update_tot_cnt = 0;
1726                                 pr_info("hm, dftrace overflow: %lu change%s"
1727                                         " (%lu total) in %lu usec%s\n",
1728                                         ftrace_update_cnt,
1729                                         ftrace_update_cnt != 1 ? "s" : "",
1730                                         ftrace_update_tot_cnt,
1731                                         usecs, usecs != 1 ? "s" : "");
1732                                 ftrace_disabled = 1;
1733                                 WARN_ON_ONCE(1);
1734                         }
1735                 }
1736                 mutex_unlock(&ftraced_lock);
1737                 mutex_unlock(&ftrace_sysctl_lock);
1738
1739                 ftrace_shutdown_replenish();
1740         }
1741         __set_current_state(TASK_RUNNING);
1742         return 0;
1743 }
1744
1745 static int __init ftrace_dynamic_init(void)
1746 {
1747         struct task_struct *p;
1748         unsigned long addr;
1749         int ret;
1750
1751         addr = (unsigned long)ftrace_record_ip;
1752
1753         stop_machine(ftrace_dyn_arch_init, &addr, NULL);
1754
1755         /* ftrace_dyn_arch_init places the return code in addr */
1756         if (addr) {
1757                 ret = (int)addr;
1758                 goto failed;
1759         }
1760
1761         ret = ftrace_dyn_table_alloc(NR_TO_INIT);
1762         if (ret)
1763                 goto failed;
1764
1765         p = kthread_run(ftraced, NULL, "ftraced");
1766         if (IS_ERR(p)) {
1767                 ret = -1;
1768                 goto failed;
1769         }
1770
1771         last_ftrace_enabled = ftrace_enabled = 1;
1772         ftraced_task = p;
1773
1774         return 0;
1775
1776  failed:
1777         ftrace_disabled = 1;
1778         return ret;
1779 }
1780
1781 core_initcall(ftrace_dynamic_init);
1782 #endif /* CONFIG_FTRACE_MCOUNT_RECORD */
1783
1784 #else
1785 # define ftrace_startup()               do { } while (0)
1786 # define ftrace_shutdown()              do { } while (0)
1787 # define ftrace_startup_sysctl()        do { } while (0)
1788 # define ftrace_shutdown_sysctl()       do { } while (0)
1789 # define ftrace_force_shutdown()        do { } while (0)
1790 #endif /* CONFIG_DYNAMIC_FTRACE */
1791
1792 /**
1793  * ftrace_kill_atomic - kill ftrace from critical sections
1794  *
1795  * This function should be used by panic code. It stops ftrace
1796  * but in a not so nice way. If you need to simply kill ftrace
1797  * from a non-atomic section, use ftrace_kill.
1798  */
1799 void ftrace_kill_atomic(void)
1800 {
1801         ftrace_disabled = 1;
1802         ftrace_enabled = 0;
1803 #ifdef CONFIG_DYNAMIC_FTRACE
1804         ftraced_suspend = -1;
1805 #endif
1806         clear_ftrace_function();
1807 }
1808
1809 /**
1810  * ftrace_kill - totally shutdown ftrace
1811  *
1812  * This is a safety measure. If something was detected that seems
1813  * wrong, calling this function will keep ftrace from doing
1814  * any more modifications, and updates.
1815  * used when something went wrong.
1816  */
1817 void ftrace_kill(void)
1818 {
1819         mutex_lock(&ftrace_sysctl_lock);
1820         ftrace_disabled = 1;
1821         ftrace_enabled = 0;
1822
1823         clear_ftrace_function();
1824         mutex_unlock(&ftrace_sysctl_lock);
1825
1826         /* Try to totally disable ftrace */
1827         ftrace_force_shutdown();
1828 }
1829
1830 /**
1831  * register_ftrace_function - register a function for profiling
1832  * @ops - ops structure that holds the function for profiling.
1833  *
1834  * Register a function to be called by all functions in the
1835  * kernel.
1836  *
1837  * Note: @ops->func and all the functions it calls must be labeled
1838  *       with "notrace", otherwise it will go into a
1839  *       recursive loop.
1840  */
1841 int register_ftrace_function(struct ftrace_ops *ops)
1842 {
1843         int ret;
1844
1845         if (unlikely(ftrace_disabled))
1846                 return -1;
1847
1848         mutex_lock(&ftrace_sysctl_lock);
1849         ret = __register_ftrace_function(ops);
1850         ftrace_startup();
1851         mutex_unlock(&ftrace_sysctl_lock);
1852
1853         return ret;
1854 }
1855
1856 /**
1857  * unregister_ftrace_function - unresgister a function for profiling.
1858  * @ops - ops structure that holds the function to unregister
1859  *
1860  * Unregister a function that was added to be called by ftrace profiling.
1861  */
1862 int unregister_ftrace_function(struct ftrace_ops *ops)
1863 {
1864         int ret;
1865
1866         mutex_lock(&ftrace_sysctl_lock);
1867         ret = __unregister_ftrace_function(ops);
1868         ftrace_shutdown();
1869         mutex_unlock(&ftrace_sysctl_lock);
1870
1871         return ret;
1872 }
1873
1874 int
1875 ftrace_enable_sysctl(struct ctl_table *table, int write,
1876                      struct file *file, void __user *buffer, size_t *lenp,
1877                      loff_t *ppos)
1878 {
1879         int ret;
1880
1881         if (unlikely(ftrace_disabled))
1882                 return -ENODEV;
1883
1884         mutex_lock(&ftrace_sysctl_lock);
1885
1886         ret  = proc_dointvec(table, write, file, buffer, lenp, ppos);
1887
1888         if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
1889                 goto out;
1890
1891         last_ftrace_enabled = ftrace_enabled;
1892
1893         if (ftrace_enabled) {
1894
1895                 ftrace_startup_sysctl();
1896
1897                 /* we are starting ftrace again */
1898                 if (ftrace_list != &ftrace_list_end) {
1899                         if (ftrace_list->next == &ftrace_list_end)
1900                                 ftrace_trace_function = ftrace_list->func;
1901                         else
1902                                 ftrace_trace_function = ftrace_list_func;
1903                 }
1904
1905         } else {
1906                 /* stopping ftrace calls (just send to ftrace_stub) */
1907                 ftrace_trace_function = ftrace_stub;
1908
1909                 ftrace_shutdown_sysctl();
1910         }
1911
1912  out:
1913         mutex_unlock(&ftrace_sysctl_lock);
1914         return ret;
1915 }