tracing: clean up splice code
[linux-2.6] / kernel / trace / trace_functions.c
1 /*
2  * ring buffer based function tracer
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Based on code from the latency_tracer, that is:
8  *
9  *  Copyright (C) 2004-2006 Ingo Molnar
10  *  Copyright (C) 2004 William Lee Irwin III
11  */
12 #include <linux/debugfs.h>
13 #include <linux/uaccess.h>
14 #include <linux/ftrace.h>
15 #include <linux/fs.h>
16
17 #include "trace.h"
18
19 /* function tracing enabled */
20 static int                      ftrace_function_enabled;
21
22 static struct trace_array       *func_trace;
23
24 static void tracing_start_function_trace(void);
25 static void tracing_stop_function_trace(void);
26
27 static int function_trace_init(struct trace_array *tr)
28 {
29         func_trace = tr;
30         tr->cpu = get_cpu();
31         put_cpu();
32
33         tracing_start_cmdline_record();
34         tracing_start_function_trace();
35         return 0;
36 }
37
38 static void function_trace_reset(struct trace_array *tr)
39 {
40         tracing_stop_function_trace();
41         tracing_stop_cmdline_record();
42 }
43
44 static void function_trace_start(struct trace_array *tr)
45 {
46         tracing_reset_online_cpus(tr);
47 }
48
49 static void
50 function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
51 {
52         struct trace_array *tr = func_trace;
53         struct trace_array_cpu *data;
54         unsigned long flags;
55         long disabled;
56         int cpu, resched;
57         int pc;
58
59         if (unlikely(!ftrace_function_enabled))
60                 return;
61
62         pc = preempt_count();
63         resched = ftrace_preempt_disable();
64         local_save_flags(flags);
65         cpu = raw_smp_processor_id();
66         data = tr->data[cpu];
67         disabled = atomic_inc_return(&data->disabled);
68
69         if (likely(disabled == 1))
70                 trace_function(tr, ip, parent_ip, flags, pc);
71
72         atomic_dec(&data->disabled);
73         ftrace_preempt_enable(resched);
74 }
75
76 static void
77 function_trace_call(unsigned long ip, unsigned long parent_ip)
78 {
79         struct trace_array *tr = func_trace;
80         struct trace_array_cpu *data;
81         unsigned long flags;
82         long disabled;
83         int cpu;
84         int pc;
85
86         if (unlikely(!ftrace_function_enabled))
87                 return;
88
89         /*
90          * Need to use raw, since this must be called before the
91          * recursive protection is performed.
92          */
93         local_irq_save(flags);
94         cpu = raw_smp_processor_id();
95         data = tr->data[cpu];
96         disabled = atomic_inc_return(&data->disabled);
97
98         if (likely(disabled == 1)) {
99                 pc = preempt_count();
100                 trace_function(tr, ip, parent_ip, flags, pc);
101         }
102
103         atomic_dec(&data->disabled);
104         local_irq_restore(flags);
105 }
106
107 static void
108 function_stack_trace_call(unsigned long ip, unsigned long parent_ip)
109 {
110         struct trace_array *tr = func_trace;
111         struct trace_array_cpu *data;
112         unsigned long flags;
113         long disabled;
114         int cpu;
115         int pc;
116
117         if (unlikely(!ftrace_function_enabled))
118                 return;
119
120         /*
121          * Need to use raw, since this must be called before the
122          * recursive protection is performed.
123          */
124         local_irq_save(flags);
125         cpu = raw_smp_processor_id();
126         data = tr->data[cpu];
127         disabled = atomic_inc_return(&data->disabled);
128
129         if (likely(disabled == 1)) {
130                 pc = preempt_count();
131                 trace_function(tr, ip, parent_ip, flags, pc);
132                 /*
133                  * skip over 5 funcs:
134                  *    __ftrace_trace_stack,
135                  *    __trace_stack,
136                  *    function_stack_trace_call
137                  *    ftrace_list_func
138                  *    ftrace_call
139                  */
140                 __trace_stack(tr, flags, 5, pc);
141         }
142
143         atomic_dec(&data->disabled);
144         local_irq_restore(flags);
145 }
146
147
148 static struct ftrace_ops trace_ops __read_mostly =
149 {
150         .func = function_trace_call,
151 };
152
153 static struct ftrace_ops trace_stack_ops __read_mostly =
154 {
155         .func = function_stack_trace_call,
156 };
157
158 /* Our two options */
159 enum {
160         TRACE_FUNC_OPT_STACK = 0x1,
161 };
162
163 static struct tracer_opt func_opts[] = {
164 #ifdef CONFIG_STACKTRACE
165         { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
166 #endif
167         { } /* Always set a last empty entry */
168 };
169
170 static struct tracer_flags func_flags = {
171         .val = 0, /* By default: all flags disabled */
172         .opts = func_opts
173 };
174
175 static void tracing_start_function_trace(void)
176 {
177         ftrace_function_enabled = 0;
178
179         if (trace_flags & TRACE_ITER_PREEMPTONLY)
180                 trace_ops.func = function_trace_call_preempt_only;
181         else
182                 trace_ops.func = function_trace_call;
183
184         if (func_flags.val & TRACE_FUNC_OPT_STACK)
185                 register_ftrace_function(&trace_stack_ops);
186         else
187                 register_ftrace_function(&trace_ops);
188
189         ftrace_function_enabled = 1;
190 }
191
192 static void tracing_stop_function_trace(void)
193 {
194         ftrace_function_enabled = 0;
195         /* OK if they are not registered */
196         unregister_ftrace_function(&trace_stack_ops);
197         unregister_ftrace_function(&trace_ops);
198 }
199
200 static int func_set_flag(u32 old_flags, u32 bit, int set)
201 {
202         if (bit == TRACE_FUNC_OPT_STACK) {
203                 /* do nothing if already set */
204                 if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
205                         return 0;
206
207                 if (set) {
208                         unregister_ftrace_function(&trace_ops);
209                         register_ftrace_function(&trace_stack_ops);
210                 } else {
211                         unregister_ftrace_function(&trace_stack_ops);
212                         register_ftrace_function(&trace_ops);
213                 }
214
215                 return 0;
216         }
217
218         return -EINVAL;
219 }
220
221 static struct tracer function_trace __read_mostly =
222 {
223         .name           = "function",
224         .init           = function_trace_init,
225         .reset          = function_trace_reset,
226         .start          = function_trace_start,
227         .flags          = &func_flags,
228         .set_flag       = func_set_flag,
229 #ifdef CONFIG_FTRACE_SELFTEST
230         .selftest       = trace_selftest_startup_function,
231 #endif
232 };
233
234 static __init int init_function_trace(void)
235 {
236         return register_tracer(&function_trace);
237 }
238
239 device_initcall(init_function_trace);