Merge branches 'tracing/ftrace', 'tracing/kprobes', 'tracing/tasks' and 'linus' into...
[linux-2.6] / kernel / trace / ftrace.c
1 /*
2  * Infrastructure for profiling code inserted by 'gcc -pg'.
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally ported from the -rt patch by:
8  *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code in the latency_tracer, that is:
11  *
12  *  Copyright (C) 2004-2006 Ingo Molnar
13  *  Copyright (C) 2004 William Lee Irwin III
14  */
15
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/suspend.h>
21 #include <linux/debugfs.h>
22 #include <linux/hardirq.h>
23 #include <linux/kthread.h>
24 #include <linux/uaccess.h>
25 #include <linux/kprobes.h>
26 #include <linux/ftrace.h>
27 #include <linux/sysctl.h>
28 #include <linux/ctype.h>
29 #include <linux/list.h>
30 #include <linux/hash.h>
31
32 #include <asm/ftrace.h>
33
34 #include "trace.h"
35
36 #define FTRACE_WARN_ON(cond)                    \
37         do {                                    \
38                 if (WARN_ON(cond))              \
39                         ftrace_kill();          \
40         } while (0)
41
42 #define FTRACE_WARN_ON_ONCE(cond)               \
43         do {                                    \
44                 if (WARN_ON_ONCE(cond))         \
45                         ftrace_kill();          \
46         } while (0)
47
48 /* hash bits for specific function selection */
49 #define FTRACE_HASH_BITS 7
50 #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
51
52 /* ftrace_enabled is a method to turn ftrace on or off */
53 int ftrace_enabled __read_mostly;
54 static int last_ftrace_enabled;
55
56 /* Quick disabling of function tracer. */
57 int function_trace_stop;
58
59 /*
60  * ftrace_disabled is set when an anomaly is discovered.
61  * ftrace_disabled is much stronger than ftrace_enabled.
62  */
63 static int ftrace_disabled __read_mostly;
64
65 static DEFINE_MUTEX(ftrace_lock);
66
67 static struct ftrace_ops ftrace_list_end __read_mostly =
68 {
69         .func = ftrace_stub,
70 };
71
72 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
73 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
74 ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
75 ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
76
77 static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
78 {
79         struct ftrace_ops *op = ftrace_list;
80
81         /* in case someone actually ports this to alpha! */
82         read_barrier_depends();
83
84         while (op != &ftrace_list_end) {
85                 /* silly alpha */
86                 read_barrier_depends();
87                 op->func(ip, parent_ip);
88                 op = op->next;
89         };
90 }
91
92 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
93 {
94         if (!test_tsk_trace_trace(current))
95                 return;
96
97         ftrace_pid_function(ip, parent_ip);
98 }
99
100 static void set_ftrace_pid_function(ftrace_func_t func)
101 {
102         /* do not set ftrace_pid_function to itself! */
103         if (func != ftrace_pid_func)
104                 ftrace_pid_function = func;
105 }
106
107 /**
108  * clear_ftrace_function - reset the ftrace function
109  *
110  * This NULLs the ftrace function and in essence stops
111  * tracing.  There may be lag
112  */
113 void clear_ftrace_function(void)
114 {
115         ftrace_trace_function = ftrace_stub;
116         __ftrace_trace_function = ftrace_stub;
117         ftrace_pid_function = ftrace_stub;
118 }
119
120 #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
121 /*
122  * For those archs that do not test ftrace_trace_stop in their
123  * mcount call site, we need to do it from C.
124  */
125 static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
126 {
127         if (function_trace_stop)
128                 return;
129
130         __ftrace_trace_function(ip, parent_ip);
131 }
132 #endif
133
134 static int __register_ftrace_function(struct ftrace_ops *ops)
135 {
136         ops->next = ftrace_list;
137         /*
138          * We are entering ops into the ftrace_list but another
139          * CPU might be walking that list. We need to make sure
140          * the ops->next pointer is valid before another CPU sees
141          * the ops pointer included into the ftrace_list.
142          */
143         smp_wmb();
144         ftrace_list = ops;
145
146         if (ftrace_enabled) {
147                 ftrace_func_t func;
148
149                 if (ops->next == &ftrace_list_end)
150                         func = ops->func;
151                 else
152                         func = ftrace_list_func;
153
154                 if (ftrace_pid_trace) {
155                         set_ftrace_pid_function(func);
156                         func = ftrace_pid_func;
157                 }
158
159                 /*
160                  * For one func, simply call it directly.
161                  * For more than one func, call the chain.
162                  */
163 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
164                 ftrace_trace_function = func;
165 #else
166                 __ftrace_trace_function = func;
167                 ftrace_trace_function = ftrace_test_stop_func;
168 #endif
169         }
170
171         return 0;
172 }
173
174 static int __unregister_ftrace_function(struct ftrace_ops *ops)
175 {
176         struct ftrace_ops **p;
177
178         /*
179          * If we are removing the last function, then simply point
180          * to the ftrace_stub.
181          */
182         if (ftrace_list == ops && ops->next == &ftrace_list_end) {
183                 ftrace_trace_function = ftrace_stub;
184                 ftrace_list = &ftrace_list_end;
185                 return 0;
186         }
187
188         for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
189                 if (*p == ops)
190                         break;
191
192         if (*p != ops)
193                 return -1;
194
195         *p = (*p)->next;
196
197         if (ftrace_enabled) {
198                 /* If we only have one func left, then call that directly */
199                 if (ftrace_list->next == &ftrace_list_end) {
200                         ftrace_func_t func = ftrace_list->func;
201
202                         if (ftrace_pid_trace) {
203                                 set_ftrace_pid_function(func);
204                                 func = ftrace_pid_func;
205                         }
206 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
207                         ftrace_trace_function = func;
208 #else
209                         __ftrace_trace_function = func;
210 #endif
211                 }
212         }
213
214         return 0;
215 }
216
217 static void ftrace_update_pid_func(void)
218 {
219         ftrace_func_t func;
220
221         if (ftrace_trace_function == ftrace_stub)
222                 return;
223
224         func = ftrace_trace_function;
225
226         if (ftrace_pid_trace) {
227                 set_ftrace_pid_function(func);
228                 func = ftrace_pid_func;
229         } else {
230                 if (func == ftrace_pid_func)
231                         func = ftrace_pid_function;
232         }
233
234 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
235         ftrace_trace_function = func;
236 #else
237         __ftrace_trace_function = func;
238 #endif
239 }
240
241 /* set when tracing only a pid */
242 struct pid *ftrace_pid_trace;
243 static struct pid * const ftrace_swapper_pid = &init_struct_pid;
244
245 #ifdef CONFIG_DYNAMIC_FTRACE
246
247 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
248 # error Dynamic ftrace depends on MCOUNT_RECORD
249 #endif
250
251 static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
252
253 struct ftrace_func_probe {
254         struct hlist_node       node;
255         struct ftrace_probe_ops *ops;
256         unsigned long           flags;
257         unsigned long           ip;
258         void                    *data;
259         struct rcu_head         rcu;
260 };
261
262
263 enum {
264         FTRACE_ENABLE_CALLS             = (1 << 0),
265         FTRACE_DISABLE_CALLS            = (1 << 1),
266         FTRACE_UPDATE_TRACE_FUNC        = (1 << 2),
267         FTRACE_ENABLE_MCOUNT            = (1 << 3),
268         FTRACE_DISABLE_MCOUNT           = (1 << 4),
269         FTRACE_START_FUNC_RET           = (1 << 5),
270         FTRACE_STOP_FUNC_RET            = (1 << 6),
271 };
272
273 static int ftrace_filtered;
274
275 static struct dyn_ftrace *ftrace_new_addrs;
276
277 static DEFINE_MUTEX(ftrace_regex_lock);
278
279 struct ftrace_page {
280         struct ftrace_page      *next;
281         int                     index;
282         struct dyn_ftrace       records[];
283 };
284
285 #define ENTRIES_PER_PAGE \
286   ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
287
288 /* estimate from running different kernels */
289 #define NR_TO_INIT              10000
290
291 static struct ftrace_page       *ftrace_pages_start;
292 static struct ftrace_page       *ftrace_pages;
293
294 static struct dyn_ftrace *ftrace_free_records;
295
296 /*
297  * This is a double for. Do not use 'break' to break out of the loop,
298  * you must use a goto.
299  */
300 #define do_for_each_ftrace_rec(pg, rec)                                 \
301         for (pg = ftrace_pages_start; pg; pg = pg->next) {              \
302                 int _____i;                                             \
303                 for (_____i = 0; _____i < pg->index; _____i++) {        \
304                         rec = &pg->records[_____i];
305
306 #define while_for_each_ftrace_rec()             \
307                 }                               \
308         }
309
310 #ifdef CONFIG_KPROBES
311
312 static int frozen_record_count;
313
314 static inline void freeze_record(struct dyn_ftrace *rec)
315 {
316         if (!(rec->flags & FTRACE_FL_FROZEN)) {
317                 rec->flags |= FTRACE_FL_FROZEN;
318                 frozen_record_count++;
319         }
320 }
321
322 static inline void unfreeze_record(struct dyn_ftrace *rec)
323 {
324         if (rec->flags & FTRACE_FL_FROZEN) {
325                 rec->flags &= ~FTRACE_FL_FROZEN;
326                 frozen_record_count--;
327         }
328 }
329
330 static inline int record_frozen(struct dyn_ftrace *rec)
331 {
332         return rec->flags & FTRACE_FL_FROZEN;
333 }
334 #else
335 # define freeze_record(rec)                     ({ 0; })
336 # define unfreeze_record(rec)                   ({ 0; })
337 # define record_frozen(rec)                     ({ 0; })
338 #endif /* CONFIG_KPROBES */
339
340 static void ftrace_free_rec(struct dyn_ftrace *rec)
341 {
342         rec->ip = (unsigned long)ftrace_free_records;
343         ftrace_free_records = rec;
344         rec->flags |= FTRACE_FL_FREE;
345 }
346
347 void ftrace_release(void *start, unsigned long size)
348 {
349         struct dyn_ftrace *rec;
350         struct ftrace_page *pg;
351         unsigned long s = (unsigned long)start;
352         unsigned long e = s + size;
353
354         if (ftrace_disabled || !start)
355                 return;
356
357         mutex_lock(&ftrace_lock);
358         do_for_each_ftrace_rec(pg, rec) {
359                 if ((rec->ip >= s) && (rec->ip < e) &&
360                     !(rec->flags & FTRACE_FL_FREE))
361                         ftrace_free_rec(rec);
362         } while_for_each_ftrace_rec();
363         mutex_unlock(&ftrace_lock);
364 }
365
366 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
367 {
368         struct dyn_ftrace *rec;
369
370         /* First check for freed records */
371         if (ftrace_free_records) {
372                 rec = ftrace_free_records;
373
374                 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
375                         FTRACE_WARN_ON_ONCE(1);
376                         ftrace_free_records = NULL;
377                         return NULL;
378                 }
379
380                 ftrace_free_records = (void *)rec->ip;
381                 memset(rec, 0, sizeof(*rec));
382                 return rec;
383         }
384
385         if (ftrace_pages->index == ENTRIES_PER_PAGE) {
386                 if (!ftrace_pages->next) {
387                         /* allocate another page */
388                         ftrace_pages->next =
389                                 (void *)get_zeroed_page(GFP_KERNEL);
390                         if (!ftrace_pages->next)
391                                 return NULL;
392                 }
393                 ftrace_pages = ftrace_pages->next;
394         }
395
396         return &ftrace_pages->records[ftrace_pages->index++];
397 }
398
399 static struct dyn_ftrace *
400 ftrace_record_ip(unsigned long ip)
401 {
402         struct dyn_ftrace *rec;
403
404         if (ftrace_disabled)
405                 return NULL;
406
407         rec = ftrace_alloc_dyn_node(ip);
408         if (!rec)
409                 return NULL;
410
411         rec->ip = ip;
412         rec->flags = (unsigned long)ftrace_new_addrs;
413         ftrace_new_addrs = rec;
414
415         return rec;
416 }
417
418 static void print_ip_ins(const char *fmt, unsigned char *p)
419 {
420         int i;
421
422         printk(KERN_CONT "%s", fmt);
423
424         for (i = 0; i < MCOUNT_INSN_SIZE; i++)
425                 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
426 }
427
428 static void ftrace_bug(int failed, unsigned long ip)
429 {
430         switch (failed) {
431         case -EFAULT:
432                 FTRACE_WARN_ON_ONCE(1);
433                 pr_info("ftrace faulted on modifying ");
434                 print_ip_sym(ip);
435                 break;
436         case -EINVAL:
437                 FTRACE_WARN_ON_ONCE(1);
438                 pr_info("ftrace failed to modify ");
439                 print_ip_sym(ip);
440                 print_ip_ins(" actual: ", (unsigned char *)ip);
441                 printk(KERN_CONT "\n");
442                 break;
443         case -EPERM:
444                 FTRACE_WARN_ON_ONCE(1);
445                 pr_info("ftrace faulted on writing ");
446                 print_ip_sym(ip);
447                 break;
448         default:
449                 FTRACE_WARN_ON_ONCE(1);
450                 pr_info("ftrace faulted on unknown error ");
451                 print_ip_sym(ip);
452         }
453 }
454
455
456 static int
457 __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
458 {
459         unsigned long ftrace_addr;
460         unsigned long ip, fl;
461
462         ftrace_addr = (unsigned long)FTRACE_ADDR;
463
464         ip = rec->ip;
465
466         /*
467          * If this record is not to be traced and
468          * it is not enabled then do nothing.
469          *
470          * If this record is not to be traced and
471          * it is enabled then disable it.
472          *
473          */
474         if (rec->flags & FTRACE_FL_NOTRACE) {
475                 if (rec->flags & FTRACE_FL_ENABLED)
476                         rec->flags &= ~FTRACE_FL_ENABLED;
477                 else
478                         return 0;
479
480         } else if (ftrace_filtered && enable) {
481                 /*
482                  * Filtering is on:
483                  */
484
485                 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
486
487                 /* Record is filtered and enabled, do nothing */
488                 if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED))
489                         return 0;
490
491                 /* Record is not filtered or enabled, do nothing */
492                 if (!fl)
493                         return 0;
494
495                 /* Record is not filtered but enabled, disable it */
496                 if (fl == FTRACE_FL_ENABLED)
497                         rec->flags &= ~FTRACE_FL_ENABLED;
498                 else
499                 /* Otherwise record is filtered but not enabled, enable it */
500                         rec->flags |= FTRACE_FL_ENABLED;
501         } else {
502                 /* Disable or not filtered */
503
504                 if (enable) {
505                         /* if record is enabled, do nothing */
506                         if (rec->flags & FTRACE_FL_ENABLED)
507                                 return 0;
508
509                         rec->flags |= FTRACE_FL_ENABLED;
510
511                 } else {
512
513                         /* if record is not enabled, do nothing */
514                         if (!(rec->flags & FTRACE_FL_ENABLED))
515                                 return 0;
516
517                         rec->flags &= ~FTRACE_FL_ENABLED;
518                 }
519         }
520
521         if (rec->flags & FTRACE_FL_ENABLED)
522                 return ftrace_make_call(rec, ftrace_addr);
523         else
524                 return ftrace_make_nop(NULL, rec, ftrace_addr);
525 }
526
527 static void ftrace_replace_code(int enable)
528 {
529         struct dyn_ftrace *rec;
530         struct ftrace_page *pg;
531         int failed;
532
533         do_for_each_ftrace_rec(pg, rec) {
534                 /*
535                  * Skip over free records, records that have
536                  * failed and not converted.
537                  */
538                 if (rec->flags & FTRACE_FL_FREE ||
539                     rec->flags & FTRACE_FL_FAILED ||
540                     !(rec->flags & FTRACE_FL_CONVERTED))
541                         continue;
542
543                 /* ignore updates to this record's mcount site */
544                 if (get_kprobe((void *)rec->ip)) {
545                         freeze_record(rec);
546                         continue;
547                 } else {
548                         unfreeze_record(rec);
549                 }
550
551                 failed = __ftrace_replace_code(rec, enable);
552                 if (failed) {
553                         rec->flags |= FTRACE_FL_FAILED;
554                         if ((system_state == SYSTEM_BOOTING) ||
555                             !core_kernel_text(rec->ip)) {
556                                 ftrace_free_rec(rec);
557                                 } else {
558                                 ftrace_bug(failed, rec->ip);
559                                         /* Stop processing */
560                                         return;
561                                 }
562                 }
563         } while_for_each_ftrace_rec();
564 }
565
566 static int
567 ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
568 {
569         unsigned long ip;
570         int ret;
571
572         ip = rec->ip;
573
574         ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
575         if (ret) {
576                 ftrace_bug(ret, ip);
577                 rec->flags |= FTRACE_FL_FAILED;
578                 return 0;
579         }
580         return 1;
581 }
582
583 /*
584  * archs can override this function if they must do something
585  * before the modifying code is performed.
586  */
587 int __weak ftrace_arch_code_modify_prepare(void)
588 {
589         return 0;
590 }
591
592 /*
593  * archs can override this function if they must do something
594  * after the modifying code is performed.
595  */
596 int __weak ftrace_arch_code_modify_post_process(void)
597 {
598         return 0;
599 }
600
601 static int __ftrace_modify_code(void *data)
602 {
603         int *command = data;
604
605         if (*command & FTRACE_ENABLE_CALLS)
606                 ftrace_replace_code(1);
607         else if (*command & FTRACE_DISABLE_CALLS)
608                 ftrace_replace_code(0);
609
610         if (*command & FTRACE_UPDATE_TRACE_FUNC)
611                 ftrace_update_ftrace_func(ftrace_trace_function);
612
613         if (*command & FTRACE_START_FUNC_RET)
614                 ftrace_enable_ftrace_graph_caller();
615         else if (*command & FTRACE_STOP_FUNC_RET)
616                 ftrace_disable_ftrace_graph_caller();
617
618         return 0;
619 }
620
621 static void ftrace_run_update_code(int command)
622 {
623         int ret;
624
625         ret = ftrace_arch_code_modify_prepare();
626         FTRACE_WARN_ON(ret);
627         if (ret)
628                 return;
629
630         stop_machine(__ftrace_modify_code, &command, NULL);
631
632         ret = ftrace_arch_code_modify_post_process();
633         FTRACE_WARN_ON(ret);
634 }
635
636 static ftrace_func_t saved_ftrace_func;
637 static int ftrace_start_up;
638
639 static void ftrace_startup_enable(int command)
640 {
641         if (saved_ftrace_func != ftrace_trace_function) {
642                 saved_ftrace_func = ftrace_trace_function;
643                 command |= FTRACE_UPDATE_TRACE_FUNC;
644         }
645
646         if (!command || !ftrace_enabled)
647                 return;
648
649         ftrace_run_update_code(command);
650 }
651
652 static void ftrace_startup(int command)
653 {
654         if (unlikely(ftrace_disabled))
655                 return;
656
657         ftrace_start_up++;
658         command |= FTRACE_ENABLE_CALLS;
659
660         ftrace_startup_enable(command);
661 }
662
663 static void ftrace_shutdown(int command)
664 {
665         if (unlikely(ftrace_disabled))
666                 return;
667
668         ftrace_start_up--;
669         if (!ftrace_start_up)
670                 command |= FTRACE_DISABLE_CALLS;
671
672         if (saved_ftrace_func != ftrace_trace_function) {
673                 saved_ftrace_func = ftrace_trace_function;
674                 command |= FTRACE_UPDATE_TRACE_FUNC;
675         }
676
677         if (!command || !ftrace_enabled)
678                 return;
679
680         ftrace_run_update_code(command);
681 }
682
683 static void ftrace_startup_sysctl(void)
684 {
685         int command = FTRACE_ENABLE_MCOUNT;
686
687         if (unlikely(ftrace_disabled))
688                 return;
689
690         /* Force update next time */
691         saved_ftrace_func = NULL;
692         /* ftrace_start_up is true if we want ftrace running */
693         if (ftrace_start_up)
694                 command |= FTRACE_ENABLE_CALLS;
695
696         ftrace_run_update_code(command);
697 }
698
699 static void ftrace_shutdown_sysctl(void)
700 {
701         int command = FTRACE_DISABLE_MCOUNT;
702
703         if (unlikely(ftrace_disabled))
704                 return;
705
706         /* ftrace_start_up is true if ftrace is running */
707         if (ftrace_start_up)
708                 command |= FTRACE_DISABLE_CALLS;
709
710         ftrace_run_update_code(command);
711 }
712
713 static cycle_t          ftrace_update_time;
714 static unsigned long    ftrace_update_cnt;
715 unsigned long           ftrace_update_tot_cnt;
716
717 static int ftrace_update_code(struct module *mod)
718 {
719         struct dyn_ftrace *p;
720         cycle_t start, stop;
721
722         start = ftrace_now(raw_smp_processor_id());
723         ftrace_update_cnt = 0;
724
725         while (ftrace_new_addrs) {
726
727                 /* If something went wrong, bail without enabling anything */
728                 if (unlikely(ftrace_disabled))
729                         return -1;
730
731                 p = ftrace_new_addrs;
732                 ftrace_new_addrs = (struct dyn_ftrace *)p->flags;
733                 p->flags = 0L;
734
735                 /* convert record (i.e, patch mcount-call with NOP) */
736                 if (ftrace_code_disable(mod, p)) {
737                         p->flags |= FTRACE_FL_CONVERTED;
738                         ftrace_update_cnt++;
739                 } else
740                         ftrace_free_rec(p);
741         }
742
743         stop = ftrace_now(raw_smp_processor_id());
744         ftrace_update_time = stop - start;
745         ftrace_update_tot_cnt += ftrace_update_cnt;
746
747         return 0;
748 }
749
750 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
751 {
752         struct ftrace_page *pg;
753         int cnt;
754         int i;
755
756         /* allocate a few pages */
757         ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
758         if (!ftrace_pages_start)
759                 return -1;
760
761         /*
762          * Allocate a few more pages.
763          *
764          * TODO: have some parser search vmlinux before
765          *   final linking to find all calls to ftrace.
766          *   Then we can:
767          *    a) know how many pages to allocate.
768          *     and/or
769          *    b) set up the table then.
770          *
771          *  The dynamic code is still necessary for
772          *  modules.
773          */
774
775         pg = ftrace_pages = ftrace_pages_start;
776
777         cnt = num_to_init / ENTRIES_PER_PAGE;
778         pr_info("ftrace: allocating %ld entries in %d pages\n",
779                 num_to_init, cnt + 1);
780
781         for (i = 0; i < cnt; i++) {
782                 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
783
784                 /* If we fail, we'll try later anyway */
785                 if (!pg->next)
786                         break;
787
788                 pg = pg->next;
789         }
790
791         return 0;
792 }
793
794 enum {
795         FTRACE_ITER_FILTER      = (1 << 0),
796         FTRACE_ITER_CONT        = (1 << 1),
797         FTRACE_ITER_NOTRACE     = (1 << 2),
798         FTRACE_ITER_FAILURES    = (1 << 3),
799         FTRACE_ITER_PRINTALL    = (1 << 4),
800         FTRACE_ITER_HASH        = (1 << 5),
801 };
802
803 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
804
805 struct ftrace_iterator {
806         struct ftrace_page      *pg;
807         int                     hidx;
808         int                     idx;
809         unsigned                flags;
810         unsigned char           buffer[FTRACE_BUFF_MAX+1];
811         unsigned                buffer_idx;
812         unsigned                filtered;
813 };
814
815 static void *
816 t_hash_next(struct seq_file *m, void *v, loff_t *pos)
817 {
818         struct ftrace_iterator *iter = m->private;
819         struct hlist_node *hnd = v;
820         struct hlist_head *hhd;
821
822         WARN_ON(!(iter->flags & FTRACE_ITER_HASH));
823
824         (*pos)++;
825
826  retry:
827         if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
828                 return NULL;
829
830         hhd = &ftrace_func_hash[iter->hidx];
831
832         if (hlist_empty(hhd)) {
833                 iter->hidx++;
834                 hnd = NULL;
835                 goto retry;
836         }
837
838         if (!hnd)
839                 hnd = hhd->first;
840         else {
841                 hnd = hnd->next;
842                 if (!hnd) {
843                         iter->hidx++;
844                         goto retry;
845                 }
846         }
847
848         return hnd;
849 }
850
851 static void *t_hash_start(struct seq_file *m, loff_t *pos)
852 {
853         struct ftrace_iterator *iter = m->private;
854         void *p = NULL;
855
856         iter->flags |= FTRACE_ITER_HASH;
857
858         return t_hash_next(m, p, pos);
859 }
860
861 static int t_hash_show(struct seq_file *m, void *v)
862 {
863         struct ftrace_func_probe *rec;
864         struct hlist_node *hnd = v;
865         char str[KSYM_SYMBOL_LEN];
866
867         rec = hlist_entry(hnd, struct ftrace_func_probe, node);
868
869         if (rec->ops->print)
870                 return rec->ops->print(m, rec->ip, rec->ops, rec->data);
871
872         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
873         seq_printf(m, "%s:", str);
874
875         kallsyms_lookup((unsigned long)rec->ops->func, NULL, NULL, NULL, str);
876         seq_printf(m, "%s", str);
877
878         if (rec->data)
879                 seq_printf(m, ":%p", rec->data);
880         seq_putc(m, '\n');
881
882         return 0;
883 }
884
885 static void *
886 t_next(struct seq_file *m, void *v, loff_t *pos)
887 {
888         struct ftrace_iterator *iter = m->private;
889         struct dyn_ftrace *rec = NULL;
890
891         if (iter->flags & FTRACE_ITER_HASH)
892                 return t_hash_next(m, v, pos);
893
894         (*pos)++;
895
896         if (iter->flags & FTRACE_ITER_PRINTALL)
897                 return NULL;
898
899  retry:
900         if (iter->idx >= iter->pg->index) {
901                 if (iter->pg->next) {
902                         iter->pg = iter->pg->next;
903                         iter->idx = 0;
904                         goto retry;
905                 } else {
906                         iter->idx = -1;
907                 }
908         } else {
909                 rec = &iter->pg->records[iter->idx++];
910                 if ((rec->flags & FTRACE_FL_FREE) ||
911
912                     (!(iter->flags & FTRACE_ITER_FAILURES) &&
913                      (rec->flags & FTRACE_FL_FAILED)) ||
914
915                     ((iter->flags & FTRACE_ITER_FAILURES) &&
916                      !(rec->flags & FTRACE_FL_FAILED)) ||
917
918                     ((iter->flags & FTRACE_ITER_FILTER) &&
919                      !(rec->flags & FTRACE_FL_FILTER)) ||
920
921                     ((iter->flags & FTRACE_ITER_NOTRACE) &&
922                      !(rec->flags & FTRACE_FL_NOTRACE))) {
923                         rec = NULL;
924                         goto retry;
925                 }
926         }
927
928         return rec;
929 }
930
931 static void *t_start(struct seq_file *m, loff_t *pos)
932 {
933         struct ftrace_iterator *iter = m->private;
934         void *p = NULL;
935
936         mutex_lock(&ftrace_lock);
937         /*
938          * For set_ftrace_filter reading, if we have the filter
939          * off, we can short cut and just print out that all
940          * functions are enabled.
941          */
942         if (iter->flags & FTRACE_ITER_FILTER && !ftrace_filtered) {
943                 if (*pos > 0)
944                         return t_hash_start(m, pos);
945                 iter->flags |= FTRACE_ITER_PRINTALL;
946                 (*pos)++;
947                 return iter;
948         }
949
950         if (iter->flags & FTRACE_ITER_HASH)
951                 return t_hash_start(m, pos);
952
953         if (*pos > 0) {
954                 if (iter->idx < 0)
955                         return p;
956                 (*pos)--;
957                 iter->idx--;
958         }
959
960         p = t_next(m, p, pos);
961
962         if (!p)
963                 return t_hash_start(m, pos);
964
965         return p;
966 }
967
968 static void t_stop(struct seq_file *m, void *p)
969 {
970         mutex_unlock(&ftrace_lock);
971 }
972
973 static int t_show(struct seq_file *m, void *v)
974 {
975         struct ftrace_iterator *iter = m->private;
976         struct dyn_ftrace *rec = v;
977         char str[KSYM_SYMBOL_LEN];
978
979         if (iter->flags & FTRACE_ITER_HASH)
980                 return t_hash_show(m, v);
981
982         if (iter->flags & FTRACE_ITER_PRINTALL) {
983                 seq_printf(m, "#### all functions enabled ####\n");
984                 return 0;
985         }
986
987         if (!rec)
988                 return 0;
989
990         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
991
992         seq_printf(m, "%s\n", str);
993
994         return 0;
995 }
996
997 static struct seq_operations show_ftrace_seq_ops = {
998         .start = t_start,
999         .next = t_next,
1000         .stop = t_stop,
1001         .show = t_show,
1002 };
1003
1004 static int
1005 ftrace_avail_open(struct inode *inode, struct file *file)
1006 {
1007         struct ftrace_iterator *iter;
1008         int ret;
1009
1010         if (unlikely(ftrace_disabled))
1011                 return -ENODEV;
1012
1013         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1014         if (!iter)
1015                 return -ENOMEM;
1016
1017         iter->pg = ftrace_pages_start;
1018
1019         ret = seq_open(file, &show_ftrace_seq_ops);
1020         if (!ret) {
1021                 struct seq_file *m = file->private_data;
1022
1023                 m->private = iter;
1024         } else {
1025                 kfree(iter);
1026         }
1027
1028         return ret;
1029 }
1030
1031 int ftrace_avail_release(struct inode *inode, struct file *file)
1032 {
1033         struct seq_file *m = (struct seq_file *)file->private_data;
1034         struct ftrace_iterator *iter = m->private;
1035
1036         seq_release(inode, file);
1037         kfree(iter);
1038
1039         return 0;
1040 }
1041
1042 static int
1043 ftrace_failures_open(struct inode *inode, struct file *file)
1044 {
1045         int ret;
1046         struct seq_file *m;
1047         struct ftrace_iterator *iter;
1048
1049         ret = ftrace_avail_open(inode, file);
1050         if (!ret) {
1051                 m = (struct seq_file *)file->private_data;
1052                 iter = (struct ftrace_iterator *)m->private;
1053                 iter->flags = FTRACE_ITER_FAILURES;
1054         }
1055
1056         return ret;
1057 }
1058
1059
1060 static void ftrace_filter_reset(int enable)
1061 {
1062         struct ftrace_page *pg;
1063         struct dyn_ftrace *rec;
1064         unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1065
1066         mutex_lock(&ftrace_lock);
1067         if (enable)
1068                 ftrace_filtered = 0;
1069         do_for_each_ftrace_rec(pg, rec) {
1070                 if (rec->flags & FTRACE_FL_FAILED)
1071                         continue;
1072                 rec->flags &= ~type;
1073         } while_for_each_ftrace_rec();
1074         mutex_unlock(&ftrace_lock);
1075 }
1076
1077 static int
1078 ftrace_regex_open(struct inode *inode, struct file *file, int enable)
1079 {
1080         struct ftrace_iterator *iter;
1081         int ret = 0;
1082
1083         if (unlikely(ftrace_disabled))
1084                 return -ENODEV;
1085
1086         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1087         if (!iter)
1088                 return -ENOMEM;
1089
1090         mutex_lock(&ftrace_regex_lock);
1091         if ((file->f_mode & FMODE_WRITE) &&
1092             !(file->f_flags & O_APPEND))
1093                 ftrace_filter_reset(enable);
1094
1095         if (file->f_mode & FMODE_READ) {
1096                 iter->pg = ftrace_pages_start;
1097                 iter->flags = enable ? FTRACE_ITER_FILTER :
1098                         FTRACE_ITER_NOTRACE;
1099
1100                 ret = seq_open(file, &show_ftrace_seq_ops);
1101                 if (!ret) {
1102                         struct seq_file *m = file->private_data;
1103                         m->private = iter;
1104                 } else
1105                         kfree(iter);
1106         } else
1107                 file->private_data = iter;
1108         mutex_unlock(&ftrace_regex_lock);
1109
1110         return ret;
1111 }
1112
1113 static int
1114 ftrace_filter_open(struct inode *inode, struct file *file)
1115 {
1116         return ftrace_regex_open(inode, file, 1);
1117 }
1118
1119 static int
1120 ftrace_notrace_open(struct inode *inode, struct file *file)
1121 {
1122         return ftrace_regex_open(inode, file, 0);
1123 }
1124
1125 static loff_t
1126 ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
1127 {
1128         loff_t ret;
1129
1130         if (file->f_mode & FMODE_READ)
1131                 ret = seq_lseek(file, offset, origin);
1132         else
1133                 file->f_pos = ret = 1;
1134
1135         return ret;
1136 }
1137
1138 enum {
1139         MATCH_FULL,
1140         MATCH_FRONT_ONLY,
1141         MATCH_MIDDLE_ONLY,
1142         MATCH_END_ONLY,
1143 };
1144
1145 /*
1146  * (static function - no need for kernel doc)
1147  *
1148  * Pass in a buffer containing a glob and this function will
1149  * set search to point to the search part of the buffer and
1150  * return the type of search it is (see enum above).
1151  * This does modify buff.
1152  *
1153  * Returns enum type.
1154  *  search returns the pointer to use for comparison.
1155  *  not returns 1 if buff started with a '!'
1156  *     0 otherwise.
1157  */
1158 static int
1159 ftrace_setup_glob(char *buff, int len, char **search, int *not)
1160 {
1161         int type = MATCH_FULL;
1162         int i;
1163
1164         if (buff[0] == '!') {
1165                 *not = 1;
1166                 buff++;
1167                 len--;
1168         } else
1169                 *not = 0;
1170
1171         *search = buff;
1172
1173         for (i = 0; i < len; i++) {
1174                 if (buff[i] == '*') {
1175                         if (!i) {
1176                                 *search = buff + 1;
1177                                 type = MATCH_END_ONLY;
1178                         } else {
1179                                 if (type == MATCH_END_ONLY)
1180                                         type = MATCH_MIDDLE_ONLY;
1181                                 else
1182                                         type = MATCH_FRONT_ONLY;
1183                                 buff[i] = 0;
1184                                 break;
1185                         }
1186                 }
1187         }
1188
1189         return type;
1190 }
1191
1192 static int ftrace_match(char *str, char *regex, int len, int type)
1193 {
1194         int matched = 0;
1195         char *ptr;
1196
1197         switch (type) {
1198         case MATCH_FULL:
1199                 if (strcmp(str, regex) == 0)
1200                         matched = 1;
1201                 break;
1202         case MATCH_FRONT_ONLY:
1203                 if (strncmp(str, regex, len) == 0)
1204                         matched = 1;
1205                 break;
1206         case MATCH_MIDDLE_ONLY:
1207                 if (strstr(str, regex))
1208                         matched = 1;
1209                 break;
1210         case MATCH_END_ONLY:
1211                 ptr = strstr(str, regex);
1212                 if (ptr && (ptr[len] == 0))
1213                         matched = 1;
1214                 break;
1215         }
1216
1217         return matched;
1218 }
1219
1220 static int
1221 ftrace_match_record(struct dyn_ftrace *rec, char *regex, int len, int type)
1222 {
1223         char str[KSYM_SYMBOL_LEN];
1224
1225         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1226         return ftrace_match(str, regex, len, type);
1227 }
1228
1229 static void ftrace_match_records(char *buff, int len, int enable)
1230 {
1231         unsigned int search_len;
1232         struct ftrace_page *pg;
1233         struct dyn_ftrace *rec;
1234         unsigned long flag;
1235         char *search;
1236         int type;
1237         int not;
1238
1239         flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1240         type = ftrace_setup_glob(buff, len, &search, &not);
1241
1242         search_len = strlen(search);
1243
1244         mutex_lock(&ftrace_lock);
1245         do_for_each_ftrace_rec(pg, rec) {
1246
1247                 if (rec->flags & FTRACE_FL_FAILED)
1248                         continue;
1249
1250                 if (ftrace_match_record(rec, search, search_len, type)) {
1251                         if (not)
1252                                 rec->flags &= ~flag;
1253                         else
1254                                 rec->flags |= flag;
1255                 }
1256                 /*
1257                  * Only enable filtering if we have a function that
1258                  * is filtered on.
1259                  */
1260                 if (enable && (rec->flags & FTRACE_FL_FILTER))
1261                         ftrace_filtered = 1;
1262         } while_for_each_ftrace_rec();
1263         mutex_unlock(&ftrace_lock);
1264 }
1265
1266 static int
1267 ftrace_match_module_record(struct dyn_ftrace *rec, char *mod,
1268                            char *regex, int len, int type)
1269 {
1270         char str[KSYM_SYMBOL_LEN];
1271         char *modname;
1272
1273         kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
1274
1275         if (!modname || strcmp(modname, mod))
1276                 return 0;
1277
1278         /* blank search means to match all funcs in the mod */
1279         if (len)
1280                 return ftrace_match(str, regex, len, type);
1281         else
1282                 return 1;
1283 }
1284
1285 static void ftrace_match_module_records(char *buff, char *mod, int enable)
1286 {
1287         unsigned search_len = 0;
1288         struct ftrace_page *pg;
1289         struct dyn_ftrace *rec;
1290         int type = MATCH_FULL;
1291         char *search = buff;
1292         unsigned long flag;
1293         int not = 0;
1294
1295         flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1296
1297         /* blank or '*' mean the same */
1298         if (strcmp(buff, "*") == 0)
1299                 buff[0] = 0;
1300
1301         /* handle the case of 'dont filter this module' */
1302         if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
1303                 buff[0] = 0;
1304                 not = 1;
1305         }
1306
1307         if (strlen(buff)) {
1308                 type = ftrace_setup_glob(buff, strlen(buff), &search, &not);
1309                 search_len = strlen(search);
1310         }
1311
1312         mutex_lock(&ftrace_lock);
1313         do_for_each_ftrace_rec(pg, rec) {
1314
1315                 if (rec->flags & FTRACE_FL_FAILED)
1316                         continue;
1317
1318                 if (ftrace_match_module_record(rec, mod,
1319                                                search, search_len, type)) {
1320                         if (not)
1321                                 rec->flags &= ~flag;
1322                         else
1323                                 rec->flags |= flag;
1324                 }
1325                 if (enable && (rec->flags & FTRACE_FL_FILTER))
1326                         ftrace_filtered = 1;
1327
1328         } while_for_each_ftrace_rec();
1329         mutex_unlock(&ftrace_lock);
1330 }
1331
1332 /*
1333  * We register the module command as a template to show others how
1334  * to register the a command as well.
1335  */
1336
1337 static int
1338 ftrace_mod_callback(char *func, char *cmd, char *param, int enable)
1339 {
1340         char *mod;
1341
1342         /*
1343          * cmd == 'mod' because we only registered this func
1344          * for the 'mod' ftrace_func_command.
1345          * But if you register one func with multiple commands,
1346          * you can tell which command was used by the cmd
1347          * parameter.
1348          */
1349
1350         /* we must have a module name */
1351         if (!param)
1352                 return -EINVAL;
1353
1354         mod = strsep(&param, ":");
1355         if (!strlen(mod))
1356                 return -EINVAL;
1357
1358         ftrace_match_module_records(func, mod, enable);
1359         return 0;
1360 }
1361
1362 static struct ftrace_func_command ftrace_mod_cmd = {
1363         .name                   = "mod",
1364         .func                   = ftrace_mod_callback,
1365 };
1366
1367 static int __init ftrace_mod_cmd_init(void)
1368 {
1369         return register_ftrace_command(&ftrace_mod_cmd);
1370 }
1371 device_initcall(ftrace_mod_cmd_init);
1372
1373 static void
1374 function_trace_probe_call(unsigned long ip, unsigned long parent_ip)
1375 {
1376         struct ftrace_func_probe *entry;
1377         struct hlist_head *hhd;
1378         struct hlist_node *n;
1379         unsigned long key;
1380         int resched;
1381
1382         key = hash_long(ip, FTRACE_HASH_BITS);
1383
1384         hhd = &ftrace_func_hash[key];
1385
1386         if (hlist_empty(hhd))
1387                 return;
1388
1389         /*
1390          * Disable preemption for these calls to prevent a RCU grace
1391          * period. This syncs the hash iteration and freeing of items
1392          * on the hash. rcu_read_lock is too dangerous here.
1393          */
1394         resched = ftrace_preempt_disable();
1395         hlist_for_each_entry_rcu(entry, n, hhd, node) {
1396                 if (entry->ip == ip)
1397                         entry->ops->func(ip, parent_ip, &entry->data);
1398         }
1399         ftrace_preempt_enable(resched);
1400 }
1401
1402 static struct ftrace_ops trace_probe_ops __read_mostly =
1403 {
1404         .func = function_trace_probe_call,
1405 };
1406
1407 static int ftrace_probe_registered;
1408
1409 static void __enable_ftrace_function_probe(void)
1410 {
1411         int i;
1412
1413         if (ftrace_probe_registered)
1414                 return;
1415
1416         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
1417                 struct hlist_head *hhd = &ftrace_func_hash[i];
1418                 if (hhd->first)
1419                         break;
1420         }
1421         /* Nothing registered? */
1422         if (i == FTRACE_FUNC_HASHSIZE)
1423                 return;
1424
1425         __register_ftrace_function(&trace_probe_ops);
1426         ftrace_startup(0);
1427         ftrace_probe_registered = 1;
1428 }
1429
1430 static void __disable_ftrace_function_probe(void)
1431 {
1432         int i;
1433
1434         if (!ftrace_probe_registered)
1435                 return;
1436
1437         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
1438                 struct hlist_head *hhd = &ftrace_func_hash[i];
1439                 if (hhd->first)
1440                         return;
1441         }
1442
1443         /* no more funcs left */
1444         __unregister_ftrace_function(&trace_probe_ops);
1445         ftrace_shutdown(0);
1446         ftrace_probe_registered = 0;
1447 }
1448
1449
1450 static void ftrace_free_entry_rcu(struct rcu_head *rhp)
1451 {
1452         struct ftrace_func_probe *entry =
1453                 container_of(rhp, struct ftrace_func_probe, rcu);
1454
1455         if (entry->ops->free)
1456                 entry->ops->free(&entry->data);
1457         kfree(entry);
1458 }
1459
1460
1461 int
1462 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
1463                               void *data)
1464 {
1465         struct ftrace_func_probe *entry;
1466         struct ftrace_page *pg;
1467         struct dyn_ftrace *rec;
1468         int type, len, not;
1469         unsigned long key;
1470         int count = 0;
1471         char *search;
1472
1473         type = ftrace_setup_glob(glob, strlen(glob), &search, &not);
1474         len = strlen(search);
1475
1476         /* we do not support '!' for function probes */
1477         if (WARN_ON(not))
1478                 return -EINVAL;
1479
1480         mutex_lock(&ftrace_lock);
1481         do_for_each_ftrace_rec(pg, rec) {
1482
1483                 if (rec->flags & FTRACE_FL_FAILED)
1484                         continue;
1485
1486                 if (!ftrace_match_record(rec, search, len, type))
1487                         continue;
1488
1489                 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1490                 if (!entry) {
1491                         /* If we did not process any, then return error */
1492                         if (!count)
1493                                 count = -ENOMEM;
1494                         goto out_unlock;
1495                 }
1496
1497                 count++;
1498
1499                 entry->data = data;
1500
1501                 /*
1502                  * The caller might want to do something special
1503                  * for each function we find. We call the callback
1504                  * to give the caller an opportunity to do so.
1505                  */
1506                 if (ops->callback) {
1507                         if (ops->callback(rec->ip, &entry->data) < 0) {
1508                                 /* caller does not like this func */
1509                                 kfree(entry);
1510                                 continue;
1511                         }
1512                 }
1513
1514                 entry->ops = ops;
1515                 entry->ip = rec->ip;
1516
1517                 key = hash_long(entry->ip, FTRACE_HASH_BITS);
1518                 hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
1519
1520         } while_for_each_ftrace_rec();
1521         __enable_ftrace_function_probe();
1522
1523  out_unlock:
1524         mutex_unlock(&ftrace_lock);
1525
1526         return count;
1527 }
1528
1529 enum {
1530         PROBE_TEST_FUNC         = 1,
1531         PROBE_TEST_DATA         = 2
1532 };
1533
1534 static void
1535 __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
1536                                   void *data, int flags)
1537 {
1538         struct ftrace_func_probe *entry;
1539         struct hlist_node *n, *tmp;
1540         char str[KSYM_SYMBOL_LEN];
1541         int type = MATCH_FULL;
1542         int i, len = 0;
1543         char *search;
1544
1545         if (glob && (strcmp(glob, "*") || !strlen(glob)))
1546                 glob = NULL;
1547         else {
1548                 int not;
1549
1550                 type = ftrace_setup_glob(glob, strlen(glob), &search, &not);
1551                 len = strlen(search);
1552
1553                 /* we do not support '!' for function probes */
1554                 if (WARN_ON(not))
1555                         return;
1556         }
1557
1558         mutex_lock(&ftrace_lock);
1559         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
1560                 struct hlist_head *hhd = &ftrace_func_hash[i];
1561
1562                 hlist_for_each_entry_safe(entry, n, tmp, hhd, node) {
1563
1564                         /* break up if statements for readability */
1565                         if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
1566                                 continue;
1567
1568                         if ((flags & PROBE_TEST_DATA) && entry->data != data)
1569                                 continue;
1570
1571                         /* do this last, since it is the most expensive */
1572                         if (glob) {
1573                                 kallsyms_lookup(entry->ip, NULL, NULL,
1574                                                 NULL, str);
1575                                 if (!ftrace_match(str, glob, len, type))
1576                                         continue;
1577                         }
1578
1579                         hlist_del(&entry->node);
1580                         call_rcu(&entry->rcu, ftrace_free_entry_rcu);
1581                 }
1582         }
1583         __disable_ftrace_function_probe();
1584         mutex_unlock(&ftrace_lock);
1585 }
1586
1587 void
1588 unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
1589                                 void *data)
1590 {
1591         __unregister_ftrace_function_probe(glob, ops, data,
1592                                           PROBE_TEST_FUNC | PROBE_TEST_DATA);
1593 }
1594
1595 void
1596 unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
1597 {
1598         __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
1599 }
1600
1601 void unregister_ftrace_function_probe_all(char *glob)
1602 {
1603         __unregister_ftrace_function_probe(glob, NULL, NULL, 0);
1604 }
1605
1606 static LIST_HEAD(ftrace_commands);
1607 static DEFINE_MUTEX(ftrace_cmd_mutex);
1608
1609 int register_ftrace_command(struct ftrace_func_command *cmd)
1610 {
1611         struct ftrace_func_command *p;
1612         int ret = 0;
1613
1614         mutex_lock(&ftrace_cmd_mutex);
1615         list_for_each_entry(p, &ftrace_commands, list) {
1616                 if (strcmp(cmd->name, p->name) == 0) {
1617                         ret = -EBUSY;
1618                         goto out_unlock;
1619                 }
1620         }
1621         list_add(&cmd->list, &ftrace_commands);
1622  out_unlock:
1623         mutex_unlock(&ftrace_cmd_mutex);
1624
1625         return ret;
1626 }
1627
1628 int unregister_ftrace_command(struct ftrace_func_command *cmd)
1629 {
1630         struct ftrace_func_command *p, *n;
1631         int ret = -ENODEV;
1632
1633         mutex_lock(&ftrace_cmd_mutex);
1634         list_for_each_entry_safe(p, n, &ftrace_commands, list) {
1635                 if (strcmp(cmd->name, p->name) == 0) {
1636                         ret = 0;
1637                         list_del_init(&p->list);
1638                         goto out_unlock;
1639                 }
1640         }
1641  out_unlock:
1642         mutex_unlock(&ftrace_cmd_mutex);
1643
1644         return ret;
1645 }
1646
1647 static int ftrace_process_regex(char *buff, int len, int enable)
1648 {
1649         char *func, *command, *next = buff;
1650         struct ftrace_func_command *p;
1651         int ret = -EINVAL;
1652
1653         func = strsep(&next, ":");
1654
1655         if (!next) {
1656                 ftrace_match_records(func, len, enable);
1657                 return 0;
1658         }
1659
1660         /* command found */
1661
1662         command = strsep(&next, ":");
1663
1664         mutex_lock(&ftrace_cmd_mutex);
1665         list_for_each_entry(p, &ftrace_commands, list) {
1666                 if (strcmp(p->name, command) == 0) {
1667                         ret = p->func(func, command, next, enable);
1668                         goto out_unlock;
1669                 }
1670         }
1671  out_unlock:
1672         mutex_unlock(&ftrace_cmd_mutex);
1673
1674         return ret;
1675 }
1676
1677 static ssize_t
1678 ftrace_regex_write(struct file *file, const char __user *ubuf,
1679                    size_t cnt, loff_t *ppos, int enable)
1680 {
1681         struct ftrace_iterator *iter;
1682         char ch;
1683         size_t read = 0;
1684         ssize_t ret;
1685
1686         if (!cnt || cnt < 0)
1687                 return 0;
1688
1689         mutex_lock(&ftrace_regex_lock);
1690
1691         if (file->f_mode & FMODE_READ) {
1692                 struct seq_file *m = file->private_data;
1693                 iter = m->private;
1694         } else
1695                 iter = file->private_data;
1696
1697         if (!*ppos) {
1698                 iter->flags &= ~FTRACE_ITER_CONT;
1699                 iter->buffer_idx = 0;
1700         }
1701
1702         ret = get_user(ch, ubuf++);
1703         if (ret)
1704                 goto out;
1705         read++;
1706         cnt--;
1707
1708         if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1709                 /* skip white space */
1710                 while (cnt && isspace(ch)) {
1711                         ret = get_user(ch, ubuf++);
1712                         if (ret)
1713                                 goto out;
1714                         read++;
1715                         cnt--;
1716                 }
1717
1718                 if (isspace(ch)) {
1719                         file->f_pos += read;
1720                         ret = read;
1721                         goto out;
1722                 }
1723
1724                 iter->buffer_idx = 0;
1725         }
1726
1727         while (cnt && !isspace(ch)) {
1728                 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1729                         iter->buffer[iter->buffer_idx++] = ch;
1730                 else {
1731                         ret = -EINVAL;
1732                         goto out;
1733                 }
1734                 ret = get_user(ch, ubuf++);
1735                 if (ret)
1736                         goto out;
1737                 read++;
1738                 cnt--;
1739         }
1740
1741         if (isspace(ch)) {
1742                 iter->filtered++;
1743                 iter->buffer[iter->buffer_idx] = 0;
1744                 ret = ftrace_process_regex(iter->buffer,
1745                                            iter->buffer_idx, enable);
1746                 if (ret)
1747                         goto out;
1748                 iter->buffer_idx = 0;
1749         } else
1750                 iter->flags |= FTRACE_ITER_CONT;
1751
1752
1753         file->f_pos += read;
1754
1755         ret = read;
1756  out:
1757         mutex_unlock(&ftrace_regex_lock);
1758
1759         return ret;
1760 }
1761
1762 static ssize_t
1763 ftrace_filter_write(struct file *file, const char __user *ubuf,
1764                     size_t cnt, loff_t *ppos)
1765 {
1766         return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
1767 }
1768
1769 static ssize_t
1770 ftrace_notrace_write(struct file *file, const char __user *ubuf,
1771                      size_t cnt, loff_t *ppos)
1772 {
1773         return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
1774 }
1775
1776 static void
1777 ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
1778 {
1779         if (unlikely(ftrace_disabled))
1780                 return;
1781
1782         mutex_lock(&ftrace_regex_lock);
1783         if (reset)
1784                 ftrace_filter_reset(enable);
1785         if (buf)
1786                 ftrace_match_records(buf, len, enable);
1787         mutex_unlock(&ftrace_regex_lock);
1788 }
1789
1790 /**
1791  * ftrace_set_filter - set a function to filter on in ftrace
1792  * @buf - the string that holds the function filter text.
1793  * @len - the length of the string.
1794  * @reset - non zero to reset all filters before applying this filter.
1795  *
1796  * Filters denote which functions should be enabled when tracing is enabled.
1797  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1798  */
1799 void ftrace_set_filter(unsigned char *buf, int len, int reset)
1800 {
1801         ftrace_set_regex(buf, len, reset, 1);
1802 }
1803
1804 /**
1805  * ftrace_set_notrace - set a function to not trace in ftrace
1806  * @buf - the string that holds the function notrace text.
1807  * @len - the length of the string.
1808  * @reset - non zero to reset all filters before applying this filter.
1809  *
1810  * Notrace Filters denote which functions should not be enabled when tracing
1811  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1812  * for tracing.
1813  */
1814 void ftrace_set_notrace(unsigned char *buf, int len, int reset)
1815 {
1816         ftrace_set_regex(buf, len, reset, 0);
1817 }
1818
1819 static int
1820 ftrace_regex_release(struct inode *inode, struct file *file, int enable)
1821 {
1822         struct seq_file *m = (struct seq_file *)file->private_data;
1823         struct ftrace_iterator *iter;
1824
1825         mutex_lock(&ftrace_regex_lock);
1826         if (file->f_mode & FMODE_READ) {
1827                 iter = m->private;
1828
1829                 seq_release(inode, file);
1830         } else
1831                 iter = file->private_data;
1832
1833         if (iter->buffer_idx) {
1834                 iter->filtered++;
1835                 iter->buffer[iter->buffer_idx] = 0;
1836                 ftrace_match_records(iter->buffer, iter->buffer_idx, enable);
1837         }
1838
1839         mutex_lock(&ftrace_lock);
1840         if (ftrace_start_up && ftrace_enabled)
1841                 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1842         mutex_unlock(&ftrace_lock);
1843
1844         kfree(iter);
1845         mutex_unlock(&ftrace_regex_lock);
1846         return 0;
1847 }
1848
1849 static int
1850 ftrace_filter_release(struct inode *inode, struct file *file)
1851 {
1852         return ftrace_regex_release(inode, file, 1);
1853 }
1854
1855 static int
1856 ftrace_notrace_release(struct inode *inode, struct file *file)
1857 {
1858         return ftrace_regex_release(inode, file, 0);
1859 }
1860
1861 static const struct file_operations ftrace_avail_fops = {
1862         .open = ftrace_avail_open,
1863         .read = seq_read,
1864         .llseek = seq_lseek,
1865         .release = ftrace_avail_release,
1866 };
1867
1868 static const struct file_operations ftrace_failures_fops = {
1869         .open = ftrace_failures_open,
1870         .read = seq_read,
1871         .llseek = seq_lseek,
1872         .release = ftrace_avail_release,
1873 };
1874
1875 static const struct file_operations ftrace_filter_fops = {
1876         .open = ftrace_filter_open,
1877         .read = seq_read,
1878         .write = ftrace_filter_write,
1879         .llseek = ftrace_regex_lseek,
1880         .release = ftrace_filter_release,
1881 };
1882
1883 static const struct file_operations ftrace_notrace_fops = {
1884         .open = ftrace_notrace_open,
1885         .read = seq_read,
1886         .write = ftrace_notrace_write,
1887         .llseek = ftrace_regex_lseek,
1888         .release = ftrace_notrace_release,
1889 };
1890
1891 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1892
1893 static DEFINE_MUTEX(graph_lock);
1894
1895 int ftrace_graph_count;
1896 unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
1897
1898 static void *
1899 g_next(struct seq_file *m, void *v, loff_t *pos)
1900 {
1901         unsigned long *array = m->private;
1902         int index = *pos;
1903
1904         (*pos)++;
1905
1906         if (index >= ftrace_graph_count)
1907                 return NULL;
1908
1909         return &array[index];
1910 }
1911
1912 static void *g_start(struct seq_file *m, loff_t *pos)
1913 {
1914         void *p = NULL;
1915
1916         mutex_lock(&graph_lock);
1917
1918         /* Nothing, tell g_show to print all functions are enabled */
1919         if (!ftrace_graph_count && !*pos)
1920                 return (void *)1;
1921
1922         p = g_next(m, p, pos);
1923
1924         return p;
1925 }
1926
1927 static void g_stop(struct seq_file *m, void *p)
1928 {
1929         mutex_unlock(&graph_lock);
1930 }
1931
1932 static int g_show(struct seq_file *m, void *v)
1933 {
1934         unsigned long *ptr = v;
1935         char str[KSYM_SYMBOL_LEN];
1936
1937         if (!ptr)
1938                 return 0;
1939
1940         if (ptr == (unsigned long *)1) {
1941                 seq_printf(m, "#### all functions enabled ####\n");
1942                 return 0;
1943         }
1944
1945         kallsyms_lookup(*ptr, NULL, NULL, NULL, str);
1946
1947         seq_printf(m, "%s\n", str);
1948
1949         return 0;
1950 }
1951
1952 static struct seq_operations ftrace_graph_seq_ops = {
1953         .start = g_start,
1954         .next = g_next,
1955         .stop = g_stop,
1956         .show = g_show,
1957 };
1958
1959 static int
1960 ftrace_graph_open(struct inode *inode, struct file *file)
1961 {
1962         int ret = 0;
1963
1964         if (unlikely(ftrace_disabled))
1965                 return -ENODEV;
1966
1967         mutex_lock(&graph_lock);
1968         if ((file->f_mode & FMODE_WRITE) &&
1969             !(file->f_flags & O_APPEND)) {
1970                 ftrace_graph_count = 0;
1971                 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
1972         }
1973
1974         if (file->f_mode & FMODE_READ) {
1975                 ret = seq_open(file, &ftrace_graph_seq_ops);
1976                 if (!ret) {
1977                         struct seq_file *m = file->private_data;
1978                         m->private = ftrace_graph_funcs;
1979                 }
1980         } else
1981                 file->private_data = ftrace_graph_funcs;
1982         mutex_unlock(&graph_lock);
1983
1984         return ret;
1985 }
1986
1987 static int
1988 ftrace_set_func(unsigned long *array, int *idx, char *buffer)
1989 {
1990         struct dyn_ftrace *rec;
1991         struct ftrace_page *pg;
1992         int search_len;
1993         int found = 0;
1994         int type, not;
1995         char *search;
1996         bool exists;
1997         int i;
1998
1999         if (ftrace_disabled)
2000                 return -ENODEV;
2001
2002         /* decode regex */
2003         type = ftrace_setup_glob(buffer, strlen(buffer), &search, &not);
2004         if (not)
2005                 return -EINVAL;
2006
2007         search_len = strlen(search);
2008
2009         mutex_lock(&ftrace_lock);
2010         do_for_each_ftrace_rec(pg, rec) {
2011
2012                 if (*idx >= FTRACE_GRAPH_MAX_FUNCS)
2013                         break;
2014
2015                 if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE))
2016                         continue;
2017
2018                 if (ftrace_match_record(rec, search, search_len, type)) {
2019                         /* ensure it is not already in the array */
2020                         exists = false;
2021                         for (i = 0; i < *idx; i++)
2022                                 if (array[i] == rec->ip) {
2023                                         exists = true;
2024                                         break;
2025                                 }
2026                         if (!exists) {
2027                                 array[(*idx)++] = rec->ip;
2028                                 found = 1;
2029                         }
2030                 }
2031         } while_for_each_ftrace_rec();
2032
2033         mutex_unlock(&ftrace_lock);
2034
2035         return found ? 0 : -EINVAL;
2036 }
2037
2038 static ssize_t
2039 ftrace_graph_write(struct file *file, const char __user *ubuf,
2040                    size_t cnt, loff_t *ppos)
2041 {
2042         unsigned char buffer[FTRACE_BUFF_MAX+1];
2043         unsigned long *array;
2044         size_t read = 0;
2045         ssize_t ret;
2046         int index = 0;
2047         char ch;
2048
2049         if (!cnt || cnt < 0)
2050                 return 0;
2051
2052         mutex_lock(&graph_lock);
2053
2054         if (ftrace_graph_count >= FTRACE_GRAPH_MAX_FUNCS) {
2055                 ret = -EBUSY;
2056                 goto out;
2057         }
2058
2059         if (file->f_mode & FMODE_READ) {
2060                 struct seq_file *m = file->private_data;
2061                 array = m->private;
2062         } else
2063                 array = file->private_data;
2064
2065         ret = get_user(ch, ubuf++);
2066         if (ret)
2067                 goto out;
2068         read++;
2069         cnt--;
2070
2071         /* skip white space */
2072         while (cnt && isspace(ch)) {
2073                 ret = get_user(ch, ubuf++);
2074                 if (ret)
2075                         goto out;
2076                 read++;
2077                 cnt--;
2078         }
2079
2080         if (isspace(ch)) {
2081                 *ppos += read;
2082                 ret = read;
2083                 goto out;
2084         }
2085
2086         while (cnt && !isspace(ch)) {
2087                 if (index < FTRACE_BUFF_MAX)
2088                         buffer[index++] = ch;
2089                 else {
2090                         ret = -EINVAL;
2091                         goto out;
2092                 }
2093                 ret = get_user(ch, ubuf++);
2094                 if (ret)
2095                         goto out;
2096                 read++;
2097                 cnt--;
2098         }
2099         buffer[index] = 0;
2100
2101         /* we allow only one expression at a time */
2102         ret = ftrace_set_func(array, &ftrace_graph_count, buffer);
2103         if (ret)
2104                 goto out;
2105
2106         file->f_pos += read;
2107
2108         ret = read;
2109  out:
2110         mutex_unlock(&graph_lock);
2111
2112         return ret;
2113 }
2114
2115 static const struct file_operations ftrace_graph_fops = {
2116         .open = ftrace_graph_open,
2117         .read = seq_read,
2118         .write = ftrace_graph_write,
2119 };
2120 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2121
2122 static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
2123 {
2124         struct dentry *entry;
2125
2126         entry = debugfs_create_file("available_filter_functions", 0444,
2127                                     d_tracer, NULL, &ftrace_avail_fops);
2128         if (!entry)
2129                 pr_warning("Could not create debugfs "
2130                            "'available_filter_functions' entry\n");
2131
2132         entry = debugfs_create_file("failures", 0444,
2133                                     d_tracer, NULL, &ftrace_failures_fops);
2134         if (!entry)
2135                 pr_warning("Could not create debugfs 'failures' entry\n");
2136
2137         entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
2138                                     NULL, &ftrace_filter_fops);
2139         if (!entry)
2140                 pr_warning("Could not create debugfs "
2141                            "'set_ftrace_filter' entry\n");
2142
2143         entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
2144                                     NULL, &ftrace_notrace_fops);
2145         if (!entry)
2146                 pr_warning("Could not create debugfs "
2147                            "'set_ftrace_notrace' entry\n");
2148
2149 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2150         entry = debugfs_create_file("set_graph_function", 0444, d_tracer,
2151                                     NULL,
2152                                     &ftrace_graph_fops);
2153         if (!entry)
2154                 pr_warning("Could not create debugfs "
2155                            "'set_graph_function' entry\n");
2156 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2157
2158         return 0;
2159 }
2160
2161 static int ftrace_convert_nops(struct module *mod,
2162                                unsigned long *start,
2163                                unsigned long *end)
2164 {
2165         unsigned long *p;
2166         unsigned long addr;
2167         unsigned long flags;
2168
2169         mutex_lock(&ftrace_lock);
2170         p = start;
2171         while (p < end) {
2172                 addr = ftrace_call_adjust(*p++);
2173                 /*
2174                  * Some architecture linkers will pad between
2175                  * the different mcount_loc sections of different
2176                  * object files to satisfy alignments.
2177                  * Skip any NULL pointers.
2178                  */
2179                 if (!addr)
2180                         continue;
2181                 ftrace_record_ip(addr);
2182         }
2183
2184         /* disable interrupts to prevent kstop machine */
2185         local_irq_save(flags);
2186         ftrace_update_code(mod);
2187         local_irq_restore(flags);
2188         mutex_unlock(&ftrace_lock);
2189
2190         return 0;
2191 }
2192
2193 void ftrace_init_module(struct module *mod,
2194                         unsigned long *start, unsigned long *end)
2195 {
2196         if (ftrace_disabled || start == end)
2197                 return;
2198         ftrace_convert_nops(mod, start, end);
2199 }
2200
2201 extern unsigned long __start_mcount_loc[];
2202 extern unsigned long __stop_mcount_loc[];
2203
2204 void __init ftrace_init(void)
2205 {
2206         unsigned long count, addr, flags;
2207         int ret;
2208
2209         /* Keep the ftrace pointer to the stub */
2210         addr = (unsigned long)ftrace_stub;
2211
2212         local_irq_save(flags);
2213         ftrace_dyn_arch_init(&addr);
2214         local_irq_restore(flags);
2215
2216         /* ftrace_dyn_arch_init places the return code in addr */
2217         if (addr)
2218                 goto failed;
2219
2220         count = __stop_mcount_loc - __start_mcount_loc;
2221
2222         ret = ftrace_dyn_table_alloc(count);
2223         if (ret)
2224                 goto failed;
2225
2226         last_ftrace_enabled = ftrace_enabled = 1;
2227
2228         ret = ftrace_convert_nops(NULL,
2229                                   __start_mcount_loc,
2230                                   __stop_mcount_loc);
2231
2232         return;
2233  failed:
2234         ftrace_disabled = 1;
2235 }
2236
2237 #else
2238
2239 static int __init ftrace_nodyn_init(void)
2240 {
2241         ftrace_enabled = 1;
2242         return 0;
2243 }
2244 device_initcall(ftrace_nodyn_init);
2245
2246 static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
2247 static inline void ftrace_startup_enable(int command) { }
2248 /* Keep as macros so we do not need to define the commands */
2249 # define ftrace_startup(command)        do { } while (0)
2250 # define ftrace_shutdown(command)       do { } while (0)
2251 # define ftrace_startup_sysctl()        do { } while (0)
2252 # define ftrace_shutdown_sysctl()       do { } while (0)
2253 #endif /* CONFIG_DYNAMIC_FTRACE */
2254
2255 static ssize_t
2256 ftrace_pid_read(struct file *file, char __user *ubuf,
2257                        size_t cnt, loff_t *ppos)
2258 {
2259         char buf[64];
2260         int r;
2261
2262         if (ftrace_pid_trace == ftrace_swapper_pid)
2263                 r = sprintf(buf, "swapper tasks\n");
2264         else if (ftrace_pid_trace)
2265                 r = sprintf(buf, "%u\n", pid_nr(ftrace_pid_trace));
2266         else
2267                 r = sprintf(buf, "no pid\n");
2268
2269         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2270 }
2271
2272 static void clear_ftrace_swapper(void)
2273 {
2274         struct task_struct *p;
2275         int cpu;
2276
2277         get_online_cpus();
2278         for_each_online_cpu(cpu) {
2279                 p = idle_task(cpu);
2280                 clear_tsk_trace_trace(p);
2281         }
2282         put_online_cpus();
2283 }
2284
2285 static void set_ftrace_swapper(void)
2286 {
2287         struct task_struct *p;
2288         int cpu;
2289
2290         get_online_cpus();
2291         for_each_online_cpu(cpu) {
2292                 p = idle_task(cpu);
2293                 set_tsk_trace_trace(p);
2294         }
2295         put_online_cpus();
2296 }
2297
2298 static void clear_ftrace_pid(struct pid *pid)
2299 {
2300         struct task_struct *p;
2301
2302         rcu_read_lock();
2303         do_each_pid_task(pid, PIDTYPE_PID, p) {
2304                 clear_tsk_trace_trace(p);
2305         } while_each_pid_task(pid, PIDTYPE_PID, p);
2306         rcu_read_unlock();
2307
2308         put_pid(pid);
2309 }
2310
2311 static void set_ftrace_pid(struct pid *pid)
2312 {
2313         struct task_struct *p;
2314
2315         rcu_read_lock();
2316         do_each_pid_task(pid, PIDTYPE_PID, p) {
2317                 set_tsk_trace_trace(p);
2318         } while_each_pid_task(pid, PIDTYPE_PID, p);
2319         rcu_read_unlock();
2320 }
2321
2322 static void clear_ftrace_pid_task(struct pid **pid)
2323 {
2324         if (*pid == ftrace_swapper_pid)
2325                 clear_ftrace_swapper();
2326         else
2327                 clear_ftrace_pid(*pid);
2328
2329         *pid = NULL;
2330 }
2331
2332 static void set_ftrace_pid_task(struct pid *pid)
2333 {
2334         if (pid == ftrace_swapper_pid)
2335                 set_ftrace_swapper();
2336         else
2337                 set_ftrace_pid(pid);
2338 }
2339
2340 static ssize_t
2341 ftrace_pid_write(struct file *filp, const char __user *ubuf,
2342                    size_t cnt, loff_t *ppos)
2343 {
2344         struct pid *pid;
2345         char buf[64];
2346         long val;
2347         int ret;
2348
2349         if (cnt >= sizeof(buf))
2350                 return -EINVAL;
2351
2352         if (copy_from_user(&buf, ubuf, cnt))
2353                 return -EFAULT;
2354
2355         buf[cnt] = 0;
2356
2357         ret = strict_strtol(buf, 10, &val);
2358         if (ret < 0)
2359                 return ret;
2360
2361         mutex_lock(&ftrace_lock);
2362         if (val < 0) {
2363                 /* disable pid tracing */
2364                 if (!ftrace_pid_trace)
2365                         goto out;
2366
2367                 clear_ftrace_pid_task(&ftrace_pid_trace);
2368
2369         } else {
2370                 /* swapper task is special */
2371                 if (!val) {
2372                         pid = ftrace_swapper_pid;
2373                         if (pid == ftrace_pid_trace)
2374                                 goto out;
2375                 } else {
2376                         pid = find_get_pid(val);
2377
2378                         if (pid == ftrace_pid_trace) {
2379                                 put_pid(pid);
2380                                 goto out;
2381                         }
2382                 }
2383
2384                 if (ftrace_pid_trace)
2385                         clear_ftrace_pid_task(&ftrace_pid_trace);
2386
2387                 if (!pid)
2388                         goto out;
2389
2390                 ftrace_pid_trace = pid;
2391
2392                 set_ftrace_pid_task(ftrace_pid_trace);
2393         }
2394
2395         /* update the function call */
2396         ftrace_update_pid_func();
2397         ftrace_startup_enable(0);
2398
2399  out:
2400         mutex_unlock(&ftrace_lock);
2401
2402         return cnt;
2403 }
2404
2405 static const struct file_operations ftrace_pid_fops = {
2406         .read = ftrace_pid_read,
2407         .write = ftrace_pid_write,
2408 };
2409
2410 static __init int ftrace_init_debugfs(void)
2411 {
2412         struct dentry *d_tracer;
2413         struct dentry *entry;
2414
2415         d_tracer = tracing_init_dentry();
2416         if (!d_tracer)
2417                 return 0;
2418
2419         ftrace_init_dyn_debugfs(d_tracer);
2420
2421         entry = debugfs_create_file("set_ftrace_pid", 0644, d_tracer,
2422                                     NULL, &ftrace_pid_fops);
2423         if (!entry)
2424                 pr_warning("Could not create debugfs "
2425                            "'set_ftrace_pid' entry\n");
2426         return 0;
2427 }
2428 fs_initcall(ftrace_init_debugfs);
2429
2430 /**
2431  * ftrace_kill - kill ftrace
2432  *
2433  * This function should be used by panic code. It stops ftrace
2434  * but in a not so nice way. If you need to simply kill ftrace
2435  * from a non-atomic section, use ftrace_kill.
2436  */
2437 void ftrace_kill(void)
2438 {
2439         ftrace_disabled = 1;
2440         ftrace_enabled = 0;
2441         clear_ftrace_function();
2442 }
2443
2444 /**
2445  * register_ftrace_function - register a function for profiling
2446  * @ops - ops structure that holds the function for profiling.
2447  *
2448  * Register a function to be called by all functions in the
2449  * kernel.
2450  *
2451  * Note: @ops->func and all the functions it calls must be labeled
2452  *       with "notrace", otherwise it will go into a
2453  *       recursive loop.
2454  */
2455 int register_ftrace_function(struct ftrace_ops *ops)
2456 {
2457         int ret;
2458
2459         if (unlikely(ftrace_disabled))
2460                 return -1;
2461
2462         mutex_lock(&ftrace_lock);
2463
2464         ret = __register_ftrace_function(ops);
2465         ftrace_startup(0);
2466
2467         mutex_unlock(&ftrace_lock);
2468         return ret;
2469 }
2470
2471 /**
2472  * unregister_ftrace_function - unregister a function for profiling.
2473  * @ops - ops structure that holds the function to unregister
2474  *
2475  * Unregister a function that was added to be called by ftrace profiling.
2476  */
2477 int unregister_ftrace_function(struct ftrace_ops *ops)
2478 {
2479         int ret;
2480
2481         mutex_lock(&ftrace_lock);
2482         ret = __unregister_ftrace_function(ops);
2483         ftrace_shutdown(0);
2484         mutex_unlock(&ftrace_lock);
2485
2486         return ret;
2487 }
2488
2489 int
2490 ftrace_enable_sysctl(struct ctl_table *table, int write,
2491                      struct file *file, void __user *buffer, size_t *lenp,
2492                      loff_t *ppos)
2493 {
2494         int ret;
2495
2496         if (unlikely(ftrace_disabled))
2497                 return -ENODEV;
2498
2499         mutex_lock(&ftrace_lock);
2500
2501         ret  = proc_dointvec(table, write, file, buffer, lenp, ppos);
2502
2503         if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
2504                 goto out;
2505
2506         last_ftrace_enabled = ftrace_enabled;
2507
2508         if (ftrace_enabled) {
2509
2510                 ftrace_startup_sysctl();
2511
2512                 /* we are starting ftrace again */
2513                 if (ftrace_list != &ftrace_list_end) {
2514                         if (ftrace_list->next == &ftrace_list_end)
2515                                 ftrace_trace_function = ftrace_list->func;
2516                         else
2517                                 ftrace_trace_function = ftrace_list_func;
2518                 }
2519
2520         } else {
2521                 /* stopping ftrace calls (just send to ftrace_stub) */
2522                 ftrace_trace_function = ftrace_stub;
2523
2524                 ftrace_shutdown_sysctl();
2525         }
2526
2527  out:
2528         mutex_unlock(&ftrace_lock);
2529         return ret;
2530 }
2531
2532 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2533
2534 static atomic_t ftrace_graph_active;
2535 static struct notifier_block ftrace_suspend_notifier;
2536
2537 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
2538 {
2539         return 0;
2540 }
2541
2542 /* The callbacks that hook a function */
2543 trace_func_graph_ret_t ftrace_graph_return =
2544                         (trace_func_graph_ret_t)ftrace_stub;
2545 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
2546
2547 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
2548 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
2549 {
2550         int i;
2551         int ret = 0;
2552         unsigned long flags;
2553         int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
2554         struct task_struct *g, *t;
2555
2556         for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
2557                 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
2558                                         * sizeof(struct ftrace_ret_stack),
2559                                         GFP_KERNEL);
2560                 if (!ret_stack_list[i]) {
2561                         start = 0;
2562                         end = i;
2563                         ret = -ENOMEM;
2564                         goto free;
2565                 }
2566         }
2567
2568         read_lock_irqsave(&tasklist_lock, flags);
2569         do_each_thread(g, t) {
2570                 if (start == end) {
2571                         ret = -EAGAIN;
2572                         goto unlock;
2573                 }
2574
2575                 if (t->ret_stack == NULL) {
2576                         t->curr_ret_stack = -1;
2577                         /* Make sure IRQs see the -1 first: */
2578                         barrier();
2579                         t->ret_stack = ret_stack_list[start++];
2580                         atomic_set(&t->tracing_graph_pause, 0);
2581                         atomic_set(&t->trace_overrun, 0);
2582                 }
2583         } while_each_thread(g, t);
2584
2585 unlock:
2586         read_unlock_irqrestore(&tasklist_lock, flags);
2587 free:
2588         for (i = start; i < end; i++)
2589                 kfree(ret_stack_list[i]);
2590         return ret;
2591 }
2592
2593 /* Allocate a return stack for each task */
2594 static int start_graph_tracing(void)
2595 {
2596         struct ftrace_ret_stack **ret_stack_list;
2597         int ret, cpu;
2598
2599         ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
2600                                 sizeof(struct ftrace_ret_stack *),
2601                                 GFP_KERNEL);
2602
2603         if (!ret_stack_list)
2604                 return -ENOMEM;
2605
2606         /* The cpu_boot init_task->ret_stack will never be freed */
2607         for_each_online_cpu(cpu)
2608                 ftrace_graph_init_task(idle_task(cpu));
2609
2610         do {
2611                 ret = alloc_retstack_tasklist(ret_stack_list);
2612         } while (ret == -EAGAIN);
2613
2614         kfree(ret_stack_list);
2615         return ret;
2616 }
2617
2618 /*
2619  * Hibernation protection.
2620  * The state of the current task is too much unstable during
2621  * suspend/restore to disk. We want to protect against that.
2622  */
2623 static int
2624 ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
2625                                                         void *unused)
2626 {
2627         switch (state) {
2628         case PM_HIBERNATION_PREPARE:
2629                 pause_graph_tracing();
2630                 break;
2631
2632         case PM_POST_HIBERNATION:
2633                 unpause_graph_tracing();
2634                 break;
2635         }
2636         return NOTIFY_DONE;
2637 }
2638
2639 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
2640                         trace_func_graph_ent_t entryfunc)
2641 {
2642         int ret = 0;
2643
2644         mutex_lock(&ftrace_lock);
2645
2646         ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
2647         register_pm_notifier(&ftrace_suspend_notifier);
2648
2649         atomic_inc(&ftrace_graph_active);
2650         ret = start_graph_tracing();
2651         if (ret) {
2652                 atomic_dec(&ftrace_graph_active);
2653                 goto out;
2654         }
2655
2656         ftrace_graph_return = retfunc;
2657         ftrace_graph_entry = entryfunc;
2658
2659         ftrace_startup(FTRACE_START_FUNC_RET);
2660
2661 out:
2662         mutex_unlock(&ftrace_lock);
2663         return ret;
2664 }
2665
2666 void unregister_ftrace_graph(void)
2667 {
2668         mutex_lock(&ftrace_lock);
2669
2670         atomic_dec(&ftrace_graph_active);
2671         ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
2672         ftrace_graph_entry = ftrace_graph_entry_stub;
2673         ftrace_shutdown(FTRACE_STOP_FUNC_RET);
2674         unregister_pm_notifier(&ftrace_suspend_notifier);
2675
2676         mutex_unlock(&ftrace_lock);
2677 }
2678
2679 /* Allocate a return stack for newly created task */
2680 void ftrace_graph_init_task(struct task_struct *t)
2681 {
2682         if (atomic_read(&ftrace_graph_active)) {
2683                 t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
2684                                 * sizeof(struct ftrace_ret_stack),
2685                                 GFP_KERNEL);
2686                 if (!t->ret_stack)
2687                         return;
2688                 t->curr_ret_stack = -1;
2689                 atomic_set(&t->tracing_graph_pause, 0);
2690                 atomic_set(&t->trace_overrun, 0);
2691         } else
2692                 t->ret_stack = NULL;
2693 }
2694
2695 void ftrace_graph_exit_task(struct task_struct *t)
2696 {
2697         struct ftrace_ret_stack *ret_stack = t->ret_stack;
2698
2699         t->ret_stack = NULL;
2700         /* NULL must become visible to IRQs before we free it: */
2701         barrier();
2702
2703         kfree(ret_stack);
2704 }
2705
2706 void ftrace_graph_stop(void)
2707 {
2708         ftrace_stop();
2709 }
2710 #endif
2711