Merge branch 'oprofile/ring_buffer' into oprofile/oprofile-for-tip
[linux-2.6] / kernel / trace / trace_stack.c
1 /*
2  * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
3  *
4  */
5 #include <linux/stacktrace.h>
6 #include <linux/kallsyms.h>
7 #include <linux/seq_file.h>
8 #include <linux/spinlock.h>
9 #include <linux/uaccess.h>
10 #include <linux/debugfs.h>
11 #include <linux/ftrace.h>
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/fs.h>
15 #include "trace.h"
16
17 #define STACK_TRACE_ENTRIES 500
18
19 static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
20          { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
21 static unsigned stack_dump_index[STACK_TRACE_ENTRIES];
22
23 static struct stack_trace max_stack_trace = {
24         .max_entries            = STACK_TRACE_ENTRIES,
25         .entries                = stack_dump_trace,
26 };
27
28 static unsigned long max_stack_size;
29 static raw_spinlock_t max_stack_lock =
30         (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
31
32 static int stack_trace_disabled __read_mostly;
33 static DEFINE_PER_CPU(int, trace_active);
34
35 static inline void check_stack(void)
36 {
37         unsigned long this_size, flags;
38         unsigned long *p, *top, *start;
39         int i;
40
41         this_size = ((unsigned long)&this_size) & (THREAD_SIZE-1);
42         this_size = THREAD_SIZE - this_size;
43
44         if (this_size <= max_stack_size)
45                 return;
46
47         /* we do not handle interrupt stacks yet */
48         if (!object_is_on_stack(&this_size))
49                 return;
50
51         raw_local_irq_save(flags);
52         __raw_spin_lock(&max_stack_lock);
53
54         /* a race could have already updated it */
55         if (this_size <= max_stack_size)
56                 goto out;
57
58         max_stack_size = this_size;
59
60         max_stack_trace.nr_entries      = 0;
61         max_stack_trace.skip            = 3;
62
63         save_stack_trace(&max_stack_trace);
64
65         /*
66          * Now find where in the stack these are.
67          */
68         i = 0;
69         start = &this_size;
70         top = (unsigned long *)
71                 (((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
72
73         /*
74          * Loop through all the entries. One of the entries may
75          * for some reason be missed on the stack, so we may
76          * have to account for them. If they are all there, this
77          * loop will only happen once. This code only takes place
78          * on a new max, so it is far from a fast path.
79          */
80         while (i < max_stack_trace.nr_entries) {
81
82                 stack_dump_index[i] = this_size;
83                 p = start;
84
85                 for (; p < top && i < max_stack_trace.nr_entries; p++) {
86                         if (*p == stack_dump_trace[i]) {
87                                 this_size = stack_dump_index[i++] =
88                                         (top - p) * sizeof(unsigned long);
89                                 /* Start the search from here */
90                                 start = p + 1;
91                         }
92                 }
93
94                 i++;
95         }
96
97  out:
98         __raw_spin_unlock(&max_stack_lock);
99         raw_local_irq_restore(flags);
100 }
101
102 static void
103 stack_trace_call(unsigned long ip, unsigned long parent_ip)
104 {
105         int cpu, resched;
106
107         if (unlikely(!ftrace_enabled || stack_trace_disabled))
108                 return;
109
110         resched = need_resched();
111         preempt_disable_notrace();
112
113         cpu = raw_smp_processor_id();
114         /* no atomic needed, we only modify this variable by this cpu */
115         if (per_cpu(trace_active, cpu)++ != 0)
116                 goto out;
117
118         check_stack();
119
120  out:
121         per_cpu(trace_active, cpu)--;
122         /* prevent recursion in schedule */
123         if (resched)
124                 preempt_enable_no_resched_notrace();
125         else
126                 preempt_enable_notrace();
127 }
128
129 static struct ftrace_ops trace_ops __read_mostly =
130 {
131         .func = stack_trace_call,
132 };
133
134 static ssize_t
135 stack_max_size_read(struct file *filp, char __user *ubuf,
136                     size_t count, loff_t *ppos)
137 {
138         unsigned long *ptr = filp->private_data;
139         char buf[64];
140         int r;
141
142         r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
143         if (r > sizeof(buf))
144                 r = sizeof(buf);
145         return simple_read_from_buffer(ubuf, count, ppos, buf, r);
146 }
147
148 static ssize_t
149 stack_max_size_write(struct file *filp, const char __user *ubuf,
150                      size_t count, loff_t *ppos)
151 {
152         long *ptr = filp->private_data;
153         unsigned long val, flags;
154         char buf[64];
155         int ret;
156
157         if (count >= sizeof(buf))
158                 return -EINVAL;
159
160         if (copy_from_user(&buf, ubuf, count))
161                 return -EFAULT;
162
163         buf[count] = 0;
164
165         ret = strict_strtoul(buf, 10, &val);
166         if (ret < 0)
167                 return ret;
168
169         raw_local_irq_save(flags);
170         __raw_spin_lock(&max_stack_lock);
171         *ptr = val;
172         __raw_spin_unlock(&max_stack_lock);
173         raw_local_irq_restore(flags);
174
175         return count;
176 }
177
178 static struct file_operations stack_max_size_fops = {
179         .open           = tracing_open_generic,
180         .read           = stack_max_size_read,
181         .write          = stack_max_size_write,
182 };
183
184 static void *
185 t_next(struct seq_file *m, void *v, loff_t *pos)
186 {
187         long i;
188
189         (*pos)++;
190
191         if (v == SEQ_START_TOKEN)
192                 i = 0;
193         else {
194                 i = *(long *)v;
195                 i++;
196         }
197
198         if (i >= max_stack_trace.nr_entries ||
199             stack_dump_trace[i] == ULONG_MAX)
200                 return NULL;
201
202         m->private = (void *)i;
203
204         return &m->private;
205 }
206
207 static void *t_start(struct seq_file *m, loff_t *pos)
208 {
209         void *t = SEQ_START_TOKEN;
210         loff_t l = 0;
211
212         local_irq_disable();
213         __raw_spin_lock(&max_stack_lock);
214
215         if (*pos == 0)
216                 return SEQ_START_TOKEN;
217
218         for (; t && l < *pos; t = t_next(m, t, &l))
219                 ;
220
221         return t;
222 }
223
224 static void t_stop(struct seq_file *m, void *p)
225 {
226         __raw_spin_unlock(&max_stack_lock);
227         local_irq_enable();
228 }
229
230 static int trace_lookup_stack(struct seq_file *m, long i)
231 {
232         unsigned long addr = stack_dump_trace[i];
233 #ifdef CONFIG_KALLSYMS
234         char str[KSYM_SYMBOL_LEN];
235
236         sprint_symbol(str, addr);
237
238         return seq_printf(m, "%s\n", str);
239 #else
240         return seq_printf(m, "%p\n", (void*)addr);
241 #endif
242 }
243
244 static int t_show(struct seq_file *m, void *v)
245 {
246         long i;
247         int size;
248
249         if (v == SEQ_START_TOKEN) {
250                 seq_printf(m, "        Depth   Size      Location"
251                            "    (%d entries)\n"
252                            "        -----   ----      --------\n",
253                            max_stack_trace.nr_entries);
254                 return 0;
255         }
256
257         i = *(long *)v;
258
259         if (i >= max_stack_trace.nr_entries ||
260             stack_dump_trace[i] == ULONG_MAX)
261                 return 0;
262
263         if (i+1 == max_stack_trace.nr_entries ||
264             stack_dump_trace[i+1] == ULONG_MAX)
265                 size = stack_dump_index[i];
266         else
267                 size = stack_dump_index[i] - stack_dump_index[i+1];
268
269         seq_printf(m, "%3ld) %8d   %5d   ", i, stack_dump_index[i], size);
270
271         trace_lookup_stack(m, i);
272
273         return 0;
274 }
275
276 static struct seq_operations stack_trace_seq_ops = {
277         .start          = t_start,
278         .next           = t_next,
279         .stop           = t_stop,
280         .show           = t_show,
281 };
282
283 static int stack_trace_open(struct inode *inode, struct file *file)
284 {
285         int ret;
286
287         ret = seq_open(file, &stack_trace_seq_ops);
288
289         return ret;
290 }
291
292 static struct file_operations stack_trace_fops = {
293         .open           = stack_trace_open,
294         .read           = seq_read,
295         .llseek         = seq_lseek,
296 };
297
298 static __init int stack_trace_init(void)
299 {
300         struct dentry *d_tracer;
301         struct dentry *entry;
302
303         d_tracer = tracing_init_dentry();
304
305         entry = debugfs_create_file("stack_max_size", 0644, d_tracer,
306                                     &max_stack_size, &stack_max_size_fops);
307         if (!entry)
308                 pr_warning("Could not create debugfs 'stack_max_size' entry\n");
309
310         entry = debugfs_create_file("stack_trace", 0444, d_tracer,
311                                     NULL, &stack_trace_fops);
312         if (!entry)
313                 pr_warning("Could not create debugfs 'stack_trace' entry\n");
314
315         register_ftrace_function(&trace_ops);
316
317         return 0;
318 }
319
320 device_initcall(stack_trace_init);