2 * Kernel unwinding support
4 * (c) 2002-2004 Randolph Chung <tausq@debian.org>
6 * Derived partially from the IA64 implementation. The PA-RISC
7 * Runtime Architecture Document is also a useful reference to
8 * understand what is happening here
11 #include <linux/kernel.h>
12 #include <linux/init.h>
13 #include <linux/sched.h>
14 #include <linux/slab.h>
15 #include <linux/kallsyms.h>
17 #include <asm/uaccess.h>
18 #include <asm/assembly.h>
19 #include <asm/asm-offsets.h>
20 #include <asm/ptrace.h>
22 #include <asm/unwind.h>
26 #define dbg(x...) printk(x)
31 #define KERNEL_START (KERNEL_BINARY_TEXT_START - 0x1000)
33 extern struct unwind_table_entry __start___unwind[];
34 extern struct unwind_table_entry __stop___unwind[];
36 static spinlock_t unwind_lock;
38 * the kernel unwind block is not dynamically allocated so that
39 * we can call unwind_init as early in the bootup process as
40 * possible (before the slab allocator is initialized)
42 static struct unwind_table kernel_unwind_table __read_mostly;
43 static LIST_HEAD(unwind_tables);
45 static inline const struct unwind_table_entry *
46 find_unwind_entry_in_table(const struct unwind_table *table, unsigned long addr)
48 const struct unwind_table_entry *e = NULL;
49 unsigned long lo, hi, mid;
52 hi = table->length - 1;
55 mid = (hi - lo) / 2 + lo;
56 e = &table->table[mid];
57 if (addr < e->region_start)
59 else if (addr > e->region_end)
68 static const struct unwind_table_entry *
69 find_unwind_entry(unsigned long addr)
71 struct unwind_table *table;
72 const struct unwind_table_entry *e = NULL;
74 if (addr >= kernel_unwind_table.start &&
75 addr <= kernel_unwind_table.end)
76 e = find_unwind_entry_in_table(&kernel_unwind_table, addr);
78 list_for_each_entry(table, &unwind_tables, list) {
79 if (addr >= table->start &&
81 e = find_unwind_entry_in_table(table, addr);
90 unwind_table_init(struct unwind_table *table, const char *name,
91 unsigned long base_addr, unsigned long gp,
92 void *table_start, void *table_end)
94 struct unwind_table_entry *start = table_start;
95 struct unwind_table_entry *end =
96 (struct unwind_table_entry *)table_end - 1;
99 table->base_addr = base_addr;
101 table->start = base_addr + start->region_start;
102 table->end = base_addr + end->region_end;
103 table->table = (struct unwind_table_entry *)table_start;
104 table->length = end - start + 1;
105 INIT_LIST_HEAD(&table->list);
107 for (; start <= end; start++) {
109 start->region_end > (start+1)->region_start) {
110 printk("WARNING: Out of order unwind entry! %p and %p\n", start, start+1);
113 start->region_start += base_addr;
114 start->region_end += base_addr;
119 unwind_table_sort(struct unwind_table_entry *start,
120 struct unwind_table_entry *finish)
122 struct unwind_table_entry el, *p, *q;
124 for (p = start + 1; p < finish; ++p) {
125 if (p[0].region_start < p[-1].region_start) {
131 } while (q > start &&
132 el.region_start < q[-1].region_start);
138 struct unwind_table *
139 unwind_table_add(const char *name, unsigned long base_addr,
141 void *start, void *end)
143 struct unwind_table *table;
145 struct unwind_table_entry *s = (struct unwind_table_entry *)start;
146 struct unwind_table_entry *e = (struct unwind_table_entry *)end;
148 unwind_table_sort(s, e);
150 table = kmalloc(sizeof(struct unwind_table), GFP_USER);
153 unwind_table_init(table, name, base_addr, gp, start, end);
154 spin_lock_irqsave(&unwind_lock, flags);
155 list_add_tail(&table->list, &unwind_tables);
156 spin_unlock_irqrestore(&unwind_lock, flags);
161 void unwind_table_remove(struct unwind_table *table)
165 spin_lock_irqsave(&unwind_lock, flags);
166 list_del(&table->list);
167 spin_unlock_irqrestore(&unwind_lock, flags);
172 /* Called from setup_arch to import the kernel unwind info */
173 static int unwind_init(void)
176 register unsigned long gp __asm__ ("r27");
178 start = (long)&__start___unwind[0];
179 stop = (long)&__stop___unwind[0];
181 spin_lock_init(&unwind_lock);
183 printk("unwind_init: start = 0x%lx, end = 0x%lx, entries = %lu\n",
185 (stop - start) / sizeof(struct unwind_table_entry));
187 unwind_table_init(&kernel_unwind_table, "kernel", KERNEL_START,
189 &__start___unwind[0], &__stop___unwind[0]);
193 for (i = 0; i < 10; i++)
195 printk("region 0x%x-0x%x\n",
196 __start___unwind[i].region_start,
197 __start___unwind[i].region_end);
205 #define get_func_addr(fptr) fptr[2]
207 #define get_func_addr(fptr) fptr[0]
210 static int unwind_special(struct unwind_frame_info *info, unsigned long pc, int frame_size)
212 void handle_interruption(int, struct pt_regs *);
213 static unsigned long *hi = (unsigned long)&handle_interruption;
215 if (pc == get_func_addr(hi)) {
216 struct pt_regs *regs = (struct pt_regs *)(info->sp - frame_size - PT_SZ_ALGN);
217 dbg("Unwinding through handle_interruption()\n");
218 info->prev_sp = regs->gr[30];
219 info->prev_ip = regs->iaoq[0];
227 static void unwind_frame_regs(struct unwind_frame_info *info)
229 const struct unwind_table_entry *e;
233 int looking_for_rp, rpoffset = 0;
235 e = find_unwind_entry(info->ip);
238 extern char _stext[], _etext[];
240 dbg("Cannot find unwind entry for 0x%lx; forced unwinding\n", info->ip);
242 #ifdef CONFIG_KALLSYMS
243 /* Handle some frequent special cases.... */
245 char symname[KSYM_NAME_LEN];
248 kallsyms_lookup(info->ip, NULL, NULL, &modname,
251 dbg("info->ip = 0x%lx, name = %s\n", info->ip, symname);
253 if (strcmp(symname, "_switch_to_ret") == 0) {
254 info->prev_sp = info->sp - CALLEE_SAVE_FRAME_SIZE;
255 info->prev_ip = *(unsigned long *)(info->prev_sp - RP_OFFSET);
256 dbg("_switch_to_ret @ %lx - setting "
257 "prev_sp=%lx prev_ip=%lx\n",
258 info->ip, info->prev_sp,
261 } else if (strcmp(symname, "ret_from_kernel_thread") == 0 ||
262 strcmp(symname, "syscall_exit") == 0) {
263 info->prev_ip = info->prev_sp = 0;
269 /* Since we are doing the unwinding blind, we don't know if
270 we are adjusting the stack correctly or extracting the rp
271 correctly. The rp is checked to see if it belongs to the
272 kernel text section, if not we assume we don't have a
273 correct stack frame and we continue to unwind the stack.
274 This is not quite correct, and will fail for loadable
280 info->prev_sp = sp - 64;
282 if (get_user(tmp, (unsigned long *)(info->prev_sp - RP_OFFSET)))
286 } while (info->prev_ip < (unsigned long)_stext ||
287 info->prev_ip > (unsigned long)_etext);
291 dbg("analyzing func @ %lx with no unwind info, setting "
292 "prev_sp=%lx prev_ip=%lx\n", info->ip,
293 info->prev_sp, info->prev_ip);
295 dbg("e->start = 0x%x, e->end = 0x%x, Save_SP = %d, "
296 "Save_RP = %d, Millicode = %d size = %u\n",
297 e->region_start, e->region_end, e->Save_SP, e->Save_RP,
298 e->Millicode, e->Total_frame_size);
300 looking_for_rp = e->Save_RP;
302 for (npc = e->region_start;
303 (frame_size < (e->Total_frame_size << 3) ||
308 insn = *(unsigned int *)npc;
310 if ((insn & 0xffffc000) == 0x37de0000 ||
311 (insn & 0xffe00000) == 0x6fc00000) {
312 /* ldo X(sp), sp, or stwm X,D(sp) */
313 frame_size += (insn & 0x1 ? -1 << 13 : 0) |
314 ((insn & 0x3fff) >> 1);
315 dbg("analyzing func @ %lx, insn=%08x @ "
316 "%lx, frame_size = %ld\n", info->ip,
317 insn, npc, frame_size);
318 } else if ((insn & 0xffe00008) == 0x73c00008) {
320 frame_size += (insn & 0x1 ? -1 << 13 : 0) |
321 (((insn >> 4) & 0x3ff) << 3);
322 dbg("analyzing func @ %lx, insn=%08x @ "
323 "%lx, frame_size = %ld\n", info->ip,
324 insn, npc, frame_size);
325 } else if (insn == 0x6bc23fd9) {
329 dbg("analyzing func @ %lx, insn=stw rp,"
330 "-20(sp) @ %lx\n", info->ip, npc);
331 } else if (insn == 0x0fc212c1) {
332 /* std rp,-16(sr0,sp) */
335 dbg("analyzing func @ %lx, insn=std rp,"
336 "-16(sp) @ %lx\n", info->ip, npc);
340 if (!unwind_special(info, e->region_start, frame_size)) {
341 info->prev_sp = info->sp - frame_size;
343 info->rp = info->r31;
345 info->rp = *(unsigned long *)(info->prev_sp - rpoffset);
346 info->prev_ip = info->rp;
350 dbg("analyzing func @ %lx, setting prev_sp=%lx "
351 "prev_ip=%lx npc=%lx\n", info->ip, info->prev_sp,
356 void unwind_frame_init(struct unwind_frame_info *info, struct task_struct *t,
357 struct pt_regs *regs)
359 memset(info, 0, sizeof(struct unwind_frame_info));
361 info->sp = regs->gr[30];
362 info->ip = regs->iaoq[0];
363 info->rp = regs->gr[2];
364 info->r31 = regs->gr[31];
366 dbg("(%d) Start unwind from sp=%08lx ip=%08lx\n",
367 t ? (int)t->pid : -1, info->sp, info->ip);
370 void unwind_frame_init_from_blocked_task(struct unwind_frame_info *info, struct task_struct *t)
372 struct pt_regs *r = &t->thread.regs;
375 r2 = kmalloc(sizeof(struct pt_regs), GFP_KERNEL);
380 r2->iaoq[0] = r->kpc;
381 unwind_frame_init(info, t, r2);
385 void unwind_frame_init_running(struct unwind_frame_info *info, struct pt_regs *regs)
387 unwind_frame_init(info, current, regs);
390 int unwind_once(struct unwind_frame_info *next_frame)
392 unwind_frame_regs(next_frame);
394 if (next_frame->prev_sp == 0 ||
395 next_frame->prev_ip == 0)
398 next_frame->sp = next_frame->prev_sp;
399 next_frame->ip = next_frame->prev_ip;
400 next_frame->prev_sp = 0;
401 next_frame->prev_ip = 0;
403 dbg("(%d) Continue unwind to sp=%08lx ip=%08lx\n",
404 next_frame->t ? (int)next_frame->t->pid : -1,
405 next_frame->sp, next_frame->ip);
410 int unwind_to_user(struct unwind_frame_info *info)
415 ret = unwind_once(info);
416 } while (!ret && !(info->ip & 3));
421 module_init(unwind_init);