2 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
5 #include <linux/stacktrace.h>
6 #include <linux/kallsyms.h>
7 #include <linux/seq_file.h>
8 #include <linux/spinlock.h>
9 #include <linux/uaccess.h>
10 #include <linux/debugfs.h>
11 #include <linux/ftrace.h>
12 #include <linux/module.h>
13 #include <linux/init.h>
17 #define STACK_TRACE_ENTRIES 500
19 static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
20 { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
21 static unsigned stack_dump_index[STACK_TRACE_ENTRIES];
23 static struct stack_trace max_stack_trace = {
24 .max_entries = STACK_TRACE_ENTRIES,
25 .entries = stack_dump_trace,
28 static unsigned long max_stack_size;
29 static raw_spinlock_t max_stack_lock =
30 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
32 static int stack_trace_disabled __read_mostly;
33 static DEFINE_PER_CPU(int, trace_active);
35 static inline void check_stack(void)
37 unsigned long this_size, flags;
38 unsigned long *p, *top, *start;
41 this_size = ((unsigned long)&this_size) & (THREAD_SIZE-1);
42 this_size = THREAD_SIZE - this_size;
44 if (this_size <= max_stack_size)
47 /* we do not handle interrupt stacks yet */
48 if (!object_is_on_stack(&this_size))
51 raw_local_irq_save(flags);
52 __raw_spin_lock(&max_stack_lock);
54 /* a race could have already updated it */
55 if (this_size <= max_stack_size)
58 max_stack_size = this_size;
60 max_stack_trace.nr_entries = 0;
61 max_stack_trace.skip = 3;
63 save_stack_trace(&max_stack_trace);
66 * Now find where in the stack these are.
70 top = (unsigned long *)
71 (((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
74 * Loop through all the entries. One of the entries may
75 * for some reason be missed on the stack, so we may
76 * have to account for them. If they are all there, this
77 * loop will only happen once. This code only takes place
78 * on a new max, so it is far from a fast path.
80 while (i < max_stack_trace.nr_entries) {
82 stack_dump_index[i] = this_size;
85 for (; p < top && i < max_stack_trace.nr_entries; p++) {
86 if (*p == stack_dump_trace[i]) {
87 this_size = stack_dump_index[i++] =
88 (top - p) * sizeof(unsigned long);
89 /* Start the search from here */
98 __raw_spin_unlock(&max_stack_lock);
99 raw_local_irq_restore(flags);
103 stack_trace_call(unsigned long ip, unsigned long parent_ip)
107 if (unlikely(!ftrace_enabled || stack_trace_disabled))
110 resched = need_resched();
111 preempt_disable_notrace();
113 cpu = raw_smp_processor_id();
114 /* no atomic needed, we only modify this variable by this cpu */
115 if (per_cpu(trace_active, cpu)++ != 0)
121 per_cpu(trace_active, cpu)--;
122 /* prevent recursion in schedule */
124 preempt_enable_no_resched_notrace();
126 preempt_enable_notrace();
129 static struct ftrace_ops trace_ops __read_mostly =
131 .func = stack_trace_call,
135 stack_max_size_read(struct file *filp, char __user *ubuf,
136 size_t count, loff_t *ppos)
138 unsigned long *ptr = filp->private_data;
142 r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
145 return simple_read_from_buffer(ubuf, count, ppos, buf, r);
149 stack_max_size_write(struct file *filp, const char __user *ubuf,
150 size_t count, loff_t *ppos)
152 long *ptr = filp->private_data;
153 unsigned long val, flags;
157 if (count >= sizeof(buf))
160 if (copy_from_user(&buf, ubuf, count))
165 ret = strict_strtoul(buf, 10, &val);
169 raw_local_irq_save(flags);
170 __raw_spin_lock(&max_stack_lock);
172 __raw_spin_unlock(&max_stack_lock);
173 raw_local_irq_restore(flags);
178 static struct file_operations stack_max_size_fops = {
179 .open = tracing_open_generic,
180 .read = stack_max_size_read,
181 .write = stack_max_size_write,
185 t_next(struct seq_file *m, void *v, loff_t *pos)
191 if (v == SEQ_START_TOKEN)
198 if (i >= max_stack_trace.nr_entries ||
199 stack_dump_trace[i] == ULONG_MAX)
202 m->private = (void *)i;
207 static void *t_start(struct seq_file *m, loff_t *pos)
209 void *t = SEQ_START_TOKEN;
213 __raw_spin_lock(&max_stack_lock);
216 return SEQ_START_TOKEN;
218 for (; t && l < *pos; t = t_next(m, t, &l))
224 static void t_stop(struct seq_file *m, void *p)
226 __raw_spin_unlock(&max_stack_lock);
230 static int trace_lookup_stack(struct seq_file *m, long i)
232 unsigned long addr = stack_dump_trace[i];
233 #ifdef CONFIG_KALLSYMS
234 char str[KSYM_SYMBOL_LEN];
236 sprint_symbol(str, addr);
238 return seq_printf(m, "%s\n", str);
240 return seq_printf(m, "%p\n", (void*)addr);
244 static int t_show(struct seq_file *m, void *v)
249 if (v == SEQ_START_TOKEN) {
250 seq_printf(m, " Depth Size Location"
252 " ----- ---- --------\n",
253 max_stack_trace.nr_entries);
259 if (i >= max_stack_trace.nr_entries ||
260 stack_dump_trace[i] == ULONG_MAX)
263 if (i+1 == max_stack_trace.nr_entries ||
264 stack_dump_trace[i+1] == ULONG_MAX)
265 size = stack_dump_index[i];
267 size = stack_dump_index[i] - stack_dump_index[i+1];
269 seq_printf(m, "%3ld) %8d %5d ", i, stack_dump_index[i], size);
271 trace_lookup_stack(m, i);
276 static struct seq_operations stack_trace_seq_ops = {
283 static int stack_trace_open(struct inode *inode, struct file *file)
287 ret = seq_open(file, &stack_trace_seq_ops);
292 static struct file_operations stack_trace_fops = {
293 .open = stack_trace_open,
298 static __init int stack_trace_init(void)
300 struct dentry *d_tracer;
301 struct dentry *entry;
303 d_tracer = tracing_init_dentry();
305 entry = debugfs_create_file("stack_max_size", 0644, d_tracer,
306 &max_stack_size, &stack_max_size_fops);
308 pr_warning("Could not create debugfs 'stack_max_size' entry\n");
310 entry = debugfs_create_file("stack_trace", 0444, d_tracer,
311 NULL, &stack_trace_fops);
313 pr_warning("Could not create debugfs 'stack_trace' entry\n");
315 register_ftrace_function(&trace_ops);
320 device_initcall(stack_trace_init);