tracing/filters: a better event parser
[linux-2.6] / kernel / trace / trace_syscalls.c
1 #include <trace/syscall.h>
2 #include <linux/kernel.h>
3 #include <asm/syscall.h>
4
5 #include "trace_output.h"
6 #include "trace.h"
7
8 /* Keep a counter of the syscall tracing users */
9 static int refcount;
10
11 /* Prevent from races on thread flags toggling */
12 static DEFINE_MUTEX(syscall_trace_lock);
13
14 /* Option to display the parameters types */
15 enum {
16         TRACE_SYSCALLS_OPT_TYPES = 0x1,
17 };
18
19 static struct tracer_opt syscalls_opts[] = {
20         { TRACER_OPT(syscall_arg_type, TRACE_SYSCALLS_OPT_TYPES) },
21         { }
22 };
23
24 static struct tracer_flags syscalls_flags = {
25         .val = 0, /* By default: no parameters types */
26         .opts = syscalls_opts
27 };
28
29 enum print_line_t
30 print_syscall_enter(struct trace_iterator *iter, int flags)
31 {
32         struct trace_seq *s = &iter->seq;
33         struct trace_entry *ent = iter->ent;
34         struct syscall_trace_enter *trace;
35         struct syscall_metadata *entry;
36         int i, ret, syscall;
37
38         trace_assign_type(trace, ent);
39
40         syscall = trace->nr;
41
42         entry = syscall_nr_to_meta(syscall);
43         if (!entry)
44                 goto end;
45
46         ret = trace_seq_printf(s, "%s(", entry->name);
47         if (!ret)
48                 return TRACE_TYPE_PARTIAL_LINE;
49
50         for (i = 0; i < entry->nb_args; i++) {
51                 /* parameter types */
52                 if (syscalls_flags.val & TRACE_SYSCALLS_OPT_TYPES) {
53                         ret = trace_seq_printf(s, "%s ", entry->types[i]);
54                         if (!ret)
55                                 return TRACE_TYPE_PARTIAL_LINE;
56                 }
57                 /* parameter values */
58                 ret = trace_seq_printf(s, "%s: %lx%s ", entry->args[i],
59                                        trace->args[i],
60                                        i == entry->nb_args - 1 ? ")" : ",");
61                 if (!ret)
62                         return TRACE_TYPE_PARTIAL_LINE;
63         }
64
65 end:
66         trace_seq_printf(s, "\n");
67         return TRACE_TYPE_HANDLED;
68 }
69
70 enum print_line_t
71 print_syscall_exit(struct trace_iterator *iter, int flags)
72 {
73         struct trace_seq *s = &iter->seq;
74         struct trace_entry *ent = iter->ent;
75         struct syscall_trace_exit *trace;
76         int syscall;
77         struct syscall_metadata *entry;
78         int ret;
79
80         trace_assign_type(trace, ent);
81
82         syscall = trace->nr;
83
84         entry = syscall_nr_to_meta(syscall);
85         if (!entry) {
86                 trace_seq_printf(s, "\n");
87                 return TRACE_TYPE_HANDLED;
88         }
89
90         ret = trace_seq_printf(s, "%s -> 0x%lx\n", entry->name,
91                                 trace->ret);
92         if (!ret)
93                 return TRACE_TYPE_PARTIAL_LINE;
94
95         return TRACE_TYPE_HANDLED;
96 }
97
98 void start_ftrace_syscalls(void)
99 {
100         unsigned long flags;
101         struct task_struct *g, *t;
102
103         mutex_lock(&syscall_trace_lock);
104
105         /* Don't enable the flag on the tasks twice */
106         if (++refcount != 1)
107                 goto unlock;
108
109         arch_init_ftrace_syscalls();
110         read_lock_irqsave(&tasklist_lock, flags);
111
112         do_each_thread(g, t) {
113                 set_tsk_thread_flag(t, TIF_SYSCALL_FTRACE);
114         } while_each_thread(g, t);
115
116         read_unlock_irqrestore(&tasklist_lock, flags);
117
118 unlock:
119         mutex_unlock(&syscall_trace_lock);
120 }
121
122 void stop_ftrace_syscalls(void)
123 {
124         unsigned long flags;
125         struct task_struct *g, *t;
126
127         mutex_lock(&syscall_trace_lock);
128
129         /* There are perhaps still some users */
130         if (--refcount)
131                 goto unlock;
132
133         read_lock_irqsave(&tasklist_lock, flags);
134
135         do_each_thread(g, t) {
136                 clear_tsk_thread_flag(t, TIF_SYSCALL_FTRACE);
137         } while_each_thread(g, t);
138
139         read_unlock_irqrestore(&tasklist_lock, flags);
140
141 unlock:
142         mutex_unlock(&syscall_trace_lock);
143 }
144
145 void ftrace_syscall_enter(struct pt_regs *regs)
146 {
147         struct syscall_trace_enter *entry;
148         struct syscall_metadata *sys_data;
149         struct ring_buffer_event *event;
150         int size;
151         int syscall_nr;
152
153         syscall_nr = syscall_get_nr(current, regs);
154
155         sys_data = syscall_nr_to_meta(syscall_nr);
156         if (!sys_data)
157                 return;
158
159         size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args;
160
161         event = trace_current_buffer_lock_reserve(TRACE_SYSCALL_ENTER, size,
162                                                         0, 0);
163         if (!event)
164                 return;
165
166         entry = ring_buffer_event_data(event);
167         entry->nr = syscall_nr;
168         syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args);
169
170         trace_current_buffer_unlock_commit(event, 0, 0);
171         trace_wake_up();
172 }
173
174 void ftrace_syscall_exit(struct pt_regs *regs)
175 {
176         struct syscall_trace_exit *entry;
177         struct syscall_metadata *sys_data;
178         struct ring_buffer_event *event;
179         int syscall_nr;
180
181         syscall_nr = syscall_get_nr(current, regs);
182
183         sys_data = syscall_nr_to_meta(syscall_nr);
184         if (!sys_data)
185                 return;
186
187         event = trace_current_buffer_lock_reserve(TRACE_SYSCALL_EXIT,
188                                 sizeof(*entry), 0, 0);
189         if (!event)
190                 return;
191
192         entry = ring_buffer_event_data(event);
193         entry->nr = syscall_nr;
194         entry->ret = syscall_get_return_value(current, regs);
195
196         trace_current_buffer_unlock_commit(event, 0, 0);
197         trace_wake_up();
198 }
199
200 static int init_syscall_tracer(struct trace_array *tr)
201 {
202         start_ftrace_syscalls();
203
204         return 0;
205 }
206
207 static void reset_syscall_tracer(struct trace_array *tr)
208 {
209         stop_ftrace_syscalls();
210         tracing_reset_online_cpus(tr);
211 }
212
213 static struct trace_event syscall_enter_event = {
214         .type           = TRACE_SYSCALL_ENTER,
215         .trace          = print_syscall_enter,
216 };
217
218 static struct trace_event syscall_exit_event = {
219         .type           = TRACE_SYSCALL_EXIT,
220         .trace          = print_syscall_exit,
221 };
222
223 static struct tracer syscall_tracer __read_mostly = {
224         .name           = "syscall",
225         .init           = init_syscall_tracer,
226         .reset          = reset_syscall_tracer,
227         .flags          = &syscalls_flags,
228 };
229
230 __init int register_ftrace_syscalls(void)
231 {
232         int ret;
233
234         ret = register_ftrace_event(&syscall_enter_event);
235         if (!ret) {
236                 printk(KERN_WARNING "event %d failed to register\n",
237                        syscall_enter_event.type);
238                 WARN_ON_ONCE(1);
239         }
240
241         ret = register_ftrace_event(&syscall_exit_event);
242         if (!ret) {
243                 printk(KERN_WARNING "event %d failed to register\n",
244                        syscall_exit_event.type);
245                 WARN_ON_ONCE(1);
246         }
247
248         return register_tracer(&syscall_tracer);
249 }
250 device_initcall(register_ftrace_syscalls);