Merge branches 'tracing/ftrace', 'tracing/fastboot', 'tracing/nmisafe' and 'tracing...
[linux-2.6] / kernel / trace / trace_stack.c
1 /*
2  * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
3  *
4  */
5 #include <linux/stacktrace.h>
6 #include <linux/kallsyms.h>
7 #include <linux/seq_file.h>
8 #include <linux/spinlock.h>
9 #include <linux/uaccess.h>
10 #include <linux/debugfs.h>
11 #include <linux/ftrace.h>
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/fs.h>
15 #include "trace.h"
16
17 #define STACK_TRACE_ENTRIES 500
18
19 static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
20          { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
21 static unsigned stack_dump_index[STACK_TRACE_ENTRIES];
22
23 static struct stack_trace max_stack_trace = {
24         .max_entries            = STACK_TRACE_ENTRIES,
25         .entries                = stack_dump_trace,
26 };
27
28 static unsigned long max_stack_size;
29 static raw_spinlock_t max_stack_lock =
30         (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
31
32 static int stack_trace_disabled __read_mostly;
33 static DEFINE_PER_CPU(int, trace_active);
34
35 static inline void check_stack(void)
36 {
37         unsigned long this_size, flags;
38         unsigned long *p, *top, *start;
39         int i;
40
41         this_size = ((unsigned long)&this_size) & (THREAD_SIZE-1);
42         this_size = THREAD_SIZE - this_size;
43
44         if (this_size <= max_stack_size)
45                 return;
46
47         /* we do not handle interrupt stacks yet */
48         if (!object_is_on_stack(&this_size))
49                 return;
50
51         raw_local_irq_save(flags);
52         __raw_spin_lock(&max_stack_lock);
53
54         /* a race could have already updated it */
55         if (this_size <= max_stack_size)
56                 goto out;
57
58         max_stack_size = this_size;
59
60         max_stack_trace.nr_entries      = 0;
61         max_stack_trace.skip            = 3;
62
63         save_stack_trace(&max_stack_trace);
64
65         /*
66          * Now find where in the stack these are.
67          */
68         i = 0;
69         start = &this_size;
70         top = (unsigned long *)
71                 (((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
72
73         /*
74          * Loop through all the entries. One of the entries may
75          * for some reason be missed on the stack, so we may
76          * have to account for them. If they are all there, this
77          * loop will only happen once. This code only takes place
78          * on a new max, so it is far from a fast path.
79          */
80         while (i < max_stack_trace.nr_entries) {
81
82                 stack_dump_index[i] = this_size;
83                 p = start;
84
85                 for (; p < top && i < max_stack_trace.nr_entries; p++) {
86                         if (*p == stack_dump_trace[i]) {
87                                 this_size = stack_dump_index[i++] =
88                                         (top - p) * sizeof(unsigned long);
89                                 /* Start the search from here */
90                                 start = p + 1;
91                         }
92                 }
93
94                 i++;
95         }
96
97  out:
98         __raw_spin_unlock(&max_stack_lock);
99         raw_local_irq_restore(flags);
100 }
101
102 static void
103 stack_trace_call(unsigned long ip, unsigned long parent_ip)
104 {
105         int cpu, resched;
106
107         if (unlikely(!ftrace_enabled || stack_trace_disabled))
108                 return;
109
110         resched = ftrace_preempt_disable();
111
112         cpu = raw_smp_processor_id();
113         /* no atomic needed, we only modify this variable by this cpu */
114         if (per_cpu(trace_active, cpu)++ != 0)
115                 goto out;
116
117         check_stack();
118
119  out:
120         per_cpu(trace_active, cpu)--;
121         /* prevent recursion in schedule */
122         ftrace_preempt_enable(resched);
123 }
124
125 static struct ftrace_ops trace_ops __read_mostly =
126 {
127         .func = stack_trace_call,
128 };
129
130 static ssize_t
131 stack_max_size_read(struct file *filp, char __user *ubuf,
132                     size_t count, loff_t *ppos)
133 {
134         unsigned long *ptr = filp->private_data;
135         char buf[64];
136         int r;
137
138         r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
139         if (r > sizeof(buf))
140                 r = sizeof(buf);
141         return simple_read_from_buffer(ubuf, count, ppos, buf, r);
142 }
143
144 static ssize_t
145 stack_max_size_write(struct file *filp, const char __user *ubuf,
146                      size_t count, loff_t *ppos)
147 {
148         long *ptr = filp->private_data;
149         unsigned long val, flags;
150         char buf[64];
151         int ret;
152
153         if (count >= sizeof(buf))
154                 return -EINVAL;
155
156         if (copy_from_user(&buf, ubuf, count))
157                 return -EFAULT;
158
159         buf[count] = 0;
160
161         ret = strict_strtoul(buf, 10, &val);
162         if (ret < 0)
163                 return ret;
164
165         raw_local_irq_save(flags);
166         __raw_spin_lock(&max_stack_lock);
167         *ptr = val;
168         __raw_spin_unlock(&max_stack_lock);
169         raw_local_irq_restore(flags);
170
171         return count;
172 }
173
174 static struct file_operations stack_max_size_fops = {
175         .open           = tracing_open_generic,
176         .read           = stack_max_size_read,
177         .write          = stack_max_size_write,
178 };
179
180 static void *
181 t_next(struct seq_file *m, void *v, loff_t *pos)
182 {
183         long i = (long)m->private;
184
185         (*pos)++;
186
187         i++;
188
189         if (i >= max_stack_trace.nr_entries ||
190             stack_dump_trace[i] == ULONG_MAX)
191                 return NULL;
192
193         m->private = (void *)i;
194
195         return &m->private;
196 }
197
198 static void *t_start(struct seq_file *m, loff_t *pos)
199 {
200         void *t = &m->private;
201         loff_t l = 0;
202
203         local_irq_disable();
204         __raw_spin_lock(&max_stack_lock);
205
206         for (; t && l < *pos; t = t_next(m, t, &l))
207                 ;
208
209         return t;
210 }
211
212 static void t_stop(struct seq_file *m, void *p)
213 {
214         __raw_spin_unlock(&max_stack_lock);
215         local_irq_enable();
216 }
217
218 static int trace_lookup_stack(struct seq_file *m, long i)
219 {
220         unsigned long addr = stack_dump_trace[i];
221 #ifdef CONFIG_KALLSYMS
222         char str[KSYM_SYMBOL_LEN];
223
224         sprint_symbol(str, addr);
225
226         return seq_printf(m, "%s\n", str);
227 #else
228         return seq_printf(m, "%p\n", (void*)addr);
229 #endif
230 }
231
232 static int t_show(struct seq_file *m, void *v)
233 {
234         long i = *(long *)v;
235         int size;
236
237         if (i < 0) {
238                 seq_printf(m, "        Depth   Size      Location"
239                            "    (%d entries)\n"
240                            "        -----   ----      --------\n",
241                            max_stack_trace.nr_entries);
242                 return 0;
243         }
244
245         if (i >= max_stack_trace.nr_entries ||
246             stack_dump_trace[i] == ULONG_MAX)
247                 return 0;
248
249         if (i+1 == max_stack_trace.nr_entries ||
250             stack_dump_trace[i+1] == ULONG_MAX)
251                 size = stack_dump_index[i];
252         else
253                 size = stack_dump_index[i] - stack_dump_index[i+1];
254
255         seq_printf(m, "%3ld) %8d   %5d   ", i, stack_dump_index[i], size);
256
257         trace_lookup_stack(m, i);
258
259         return 0;
260 }
261
262 static struct seq_operations stack_trace_seq_ops = {
263         .start          = t_start,
264         .next           = t_next,
265         .stop           = t_stop,
266         .show           = t_show,
267 };
268
269 static int stack_trace_open(struct inode *inode, struct file *file)
270 {
271         int ret;
272
273         ret = seq_open(file, &stack_trace_seq_ops);
274         if (!ret) {
275                 struct seq_file *m = file->private_data;
276                 m->private = (void *)-1;
277         }
278
279         return ret;
280 }
281
282 static struct file_operations stack_trace_fops = {
283         .open           = stack_trace_open,
284         .read           = seq_read,
285         .llseek         = seq_lseek,
286 };
287
288 static __init int stack_trace_init(void)
289 {
290         struct dentry *d_tracer;
291         struct dentry *entry;
292
293         d_tracer = tracing_init_dentry();
294
295         entry = debugfs_create_file("stack_max_size", 0644, d_tracer,
296                                     &max_stack_size, &stack_max_size_fops);
297         if (!entry)
298                 pr_warning("Could not create debugfs 'stack_max_size' entry\n");
299
300         entry = debugfs_create_file("stack_trace", 0444, d_tracer,
301                                     NULL, &stack_trace_fops);
302         if (!entry)
303                 pr_warning("Could not create debugfs 'stack_trace' entry\n");
304
305         register_ftrace_function(&trace_ops);
306
307         return 0;
308 }
309
310 device_initcall(stack_trace_init);