2 * Copyright (C) 1999-2004 Hewlett-Packard Co
3 * David Mosberger-Tang <davidm@hpl.hp.com>
4 * Copyright (C) 2003 Fenghua Yu <fenghua.yu@intel.com>
5 * - Change pt_regs_off() to make it less dependent on pt_regs structure.
8 * This file implements call frame unwind support for the Linux
9 * kernel. Parsing and processing the unwind information is
10 * time-consuming, so this implementation translates the unwind
11 * descriptors into unwind scripts. These scripts are very simple
12 * (basically a sequence of assignments) and efficient to execute.
13 * They are cached for later re-use. Each script is specific for a
14 * given instruction pointer address and the set of predicate values
15 * that the script depends on (most unwind descriptors are
16 * unconditional and scripts often do not depend on predicates at
17 * all). This code is based on the unwind conventions described in
18 * the "IA-64 Software Conventions and Runtime Architecture" manual.
21 * o updates to the global unwind data (in structure "unw") are serialized
22 * by the unw.lock spinlock
23 * o each unwind script has its own read-write lock; a thread must acquire
24 * a read lock before executing a script and must acquire a write lock
25 * before modifying a script
26 * o if both the unw.lock spinlock and a script's read-write lock must be
27 * acquired, then the read-write lock must be acquired first.
29 #include <linux/module.h>
30 #include <linux/bootmem.h>
31 #include <linux/elf.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
36 #include <asm/unwind.h>
38 #include <asm/delay.h>
40 #include <asm/ptrace.h>
41 #include <asm/ptrace_offsets.h>
43 #include <asm/sections.h>
44 #include <asm/system.h>
45 #include <asm/uaccess.h>
50 #define UNW_LOG_CACHE_SIZE 7 /* each unw_script is ~256 bytes in size */
51 #define UNW_CACHE_SIZE (1 << UNW_LOG_CACHE_SIZE)
53 #define UNW_LOG_HASH_SIZE (UNW_LOG_CACHE_SIZE + 1)
54 #define UNW_HASH_SIZE (1 << UNW_LOG_HASH_SIZE)
56 #define UNW_STATS 0 /* WARNING: this disabled interrupts for long time-spans!! */
59 static unsigned int unw_debug_level = UNW_DEBUG;
60 # define UNW_DEBUG_ON(n) unw_debug_level >= n
61 /* Do not code a printk level, not all debug lines end in newline */
62 # define UNW_DPRINT(n, ...) if (UNW_DEBUG_ON(n)) printk(__VA_ARGS__)
65 #else /* !UNW_DEBUG */
66 # define UNW_DEBUG_ON(n) 0
67 # define UNW_DPRINT(n, ...)
68 #endif /* UNW_DEBUG */
76 #define alloc_reg_state() kmalloc(sizeof(struct unw_reg_state), GFP_ATOMIC)
77 #define free_reg_state(usr) kfree(usr)
78 #define alloc_labeled_state() kmalloc(sizeof(struct unw_labeled_state), GFP_ATOMIC)
79 #define free_labeled_state(usr) kfree(usr)
81 typedef unsigned long unw_word;
82 typedef unsigned char unw_hash_index_t;
85 spinlock_t lock; /* spinlock for unwind data */
87 /* list of unwind tables (one per load-module) */
88 struct unw_table *tables;
90 unsigned long r0; /* constant 0 for r0 */
92 /* table of registers that prologues can save (and order in which they're saved): */
93 const unsigned char save_order[8];
95 /* maps a preserved register index (preg_index) to corresponding switch_stack offset: */
96 unsigned short sw_off[sizeof(struct unw_frame_info) / 8];
98 unsigned short lru_head; /* index of lead-recently used script */
99 unsigned short lru_tail; /* index of most-recently used script */
101 /* index into unw_frame_info for preserved register i */
102 unsigned short preg_index[UNW_NUM_REGS];
104 short pt_regs_offsets[32];
106 /* unwind table for the kernel: */
107 struct unw_table kernel_table;
109 /* unwind table describing the gate page (kernel code that is mapped into user space): */
110 size_t gate_table_size;
111 unsigned long *gate_table;
113 /* hash table that maps instruction pointer to script index: */
114 unsigned short hash[UNW_HASH_SIZE];
117 struct unw_script cache[UNW_CACHE_SIZE];
120 const char *preg_name[UNW_NUM_REGS];
128 int collision_chain_traversals;
131 unsigned long build_time;
132 unsigned long run_time;
133 unsigned long parse_time;
140 unsigned long init_time;
141 unsigned long unwind_time;
148 .tables = &unw.kernel_table,
149 .lock = __SPIN_LOCK_UNLOCKED(unw.lock),
151 UNW_REG_RP, UNW_REG_PFS, UNW_REG_PSP, UNW_REG_PR,
152 UNW_REG_UNAT, UNW_REG_LC, UNW_REG_FPSR, UNW_REG_PRI_UNAT_GR
155 offsetof(struct unw_frame_info, pri_unat_loc)/8, /* PRI_UNAT_GR */
156 offsetof(struct unw_frame_info, pri_unat_loc)/8, /* PRI_UNAT_MEM */
157 offsetof(struct unw_frame_info, bsp_loc)/8,
158 offsetof(struct unw_frame_info, bspstore_loc)/8,
159 offsetof(struct unw_frame_info, pfs_loc)/8,
160 offsetof(struct unw_frame_info, rnat_loc)/8,
161 offsetof(struct unw_frame_info, psp)/8,
162 offsetof(struct unw_frame_info, rp_loc)/8,
163 offsetof(struct unw_frame_info, r4)/8,
164 offsetof(struct unw_frame_info, r5)/8,
165 offsetof(struct unw_frame_info, r6)/8,
166 offsetof(struct unw_frame_info, r7)/8,
167 offsetof(struct unw_frame_info, unat_loc)/8,
168 offsetof(struct unw_frame_info, pr_loc)/8,
169 offsetof(struct unw_frame_info, lc_loc)/8,
170 offsetof(struct unw_frame_info, fpsr_loc)/8,
171 offsetof(struct unw_frame_info, b1_loc)/8,
172 offsetof(struct unw_frame_info, b2_loc)/8,
173 offsetof(struct unw_frame_info, b3_loc)/8,
174 offsetof(struct unw_frame_info, b4_loc)/8,
175 offsetof(struct unw_frame_info, b5_loc)/8,
176 offsetof(struct unw_frame_info, f2_loc)/8,
177 offsetof(struct unw_frame_info, f3_loc)/8,
178 offsetof(struct unw_frame_info, f4_loc)/8,
179 offsetof(struct unw_frame_info, f5_loc)/8,
180 offsetof(struct unw_frame_info, fr_loc[16 - 16])/8,
181 offsetof(struct unw_frame_info, fr_loc[17 - 16])/8,
182 offsetof(struct unw_frame_info, fr_loc[18 - 16])/8,
183 offsetof(struct unw_frame_info, fr_loc[19 - 16])/8,
184 offsetof(struct unw_frame_info, fr_loc[20 - 16])/8,
185 offsetof(struct unw_frame_info, fr_loc[21 - 16])/8,
186 offsetof(struct unw_frame_info, fr_loc[22 - 16])/8,
187 offsetof(struct unw_frame_info, fr_loc[23 - 16])/8,
188 offsetof(struct unw_frame_info, fr_loc[24 - 16])/8,
189 offsetof(struct unw_frame_info, fr_loc[25 - 16])/8,
190 offsetof(struct unw_frame_info, fr_loc[26 - 16])/8,
191 offsetof(struct unw_frame_info, fr_loc[27 - 16])/8,
192 offsetof(struct unw_frame_info, fr_loc[28 - 16])/8,
193 offsetof(struct unw_frame_info, fr_loc[29 - 16])/8,
194 offsetof(struct unw_frame_info, fr_loc[30 - 16])/8,
195 offsetof(struct unw_frame_info, fr_loc[31 - 16])/8,
199 offsetof(struct pt_regs, r1),
200 offsetof(struct pt_regs, r2),
201 offsetof(struct pt_regs, r3),
202 [4] = -1, [5] = -1, [6] = -1, [7] = -1,
203 offsetof(struct pt_regs, r8),
204 offsetof(struct pt_regs, r9),
205 offsetof(struct pt_regs, r10),
206 offsetof(struct pt_regs, r11),
207 offsetof(struct pt_regs, r12),
208 offsetof(struct pt_regs, r13),
209 offsetof(struct pt_regs, r14),
210 offsetof(struct pt_regs, r15),
211 offsetof(struct pt_regs, r16),
212 offsetof(struct pt_regs, r17),
213 offsetof(struct pt_regs, r18),
214 offsetof(struct pt_regs, r19),
215 offsetof(struct pt_regs, r20),
216 offsetof(struct pt_regs, r21),
217 offsetof(struct pt_regs, r22),
218 offsetof(struct pt_regs, r23),
219 offsetof(struct pt_regs, r24),
220 offsetof(struct pt_regs, r25),
221 offsetof(struct pt_regs, r26),
222 offsetof(struct pt_regs, r27),
223 offsetof(struct pt_regs, r28),
224 offsetof(struct pt_regs, r29),
225 offsetof(struct pt_regs, r30),
226 offsetof(struct pt_regs, r31),
228 .hash = { [0 ... UNW_HASH_SIZE - 1] = -1 },
231 "pri_unat_gr", "pri_unat_mem", "bsp", "bspstore", "ar.pfs", "ar.rnat", "psp", "rp",
232 "r4", "r5", "r6", "r7",
233 "ar.unat", "pr", "ar.lc", "ar.fpsr",
234 "b1", "b2", "b3", "b4", "b5",
235 "f2", "f3", "f4", "f5",
236 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
237 "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31"
243 read_only (void *addr)
245 return (unsigned long) ((char *) addr - (char *) &unw.r0) < sizeof(unw.r0);
249 * Returns offset of rREG in struct pt_regs.
251 static inline unsigned long
252 pt_regs_off (unsigned long reg)
256 if (reg < ARRAY_SIZE(unw.pt_regs_offsets))
257 off = unw.pt_regs_offsets[reg];
260 UNW_DPRINT(0, "unwind.%s: bad scratch reg r%lu\n", __FUNCTION__, reg);
263 return (unsigned long) off;
266 static inline struct pt_regs *
267 get_scratch_regs (struct unw_frame_info *info)
270 /* This should not happen with valid unwind info. */
271 UNW_DPRINT(0, "unwind.%s: bad unwind info: resetting info->pt\n", __FUNCTION__);
272 if (info->flags & UNW_FLAG_INTERRUPT_FRAME)
273 info->pt = (unsigned long) ((struct pt_regs *) info->psp - 1);
275 info->pt = info->sp - 16;
277 UNW_DPRINT(3, "unwind.%s: sp 0x%lx pt 0x%lx\n", __FUNCTION__, info->sp, info->pt);
278 return (struct pt_regs *) info->pt;
281 /* Unwind accessors. */
284 unw_access_gr (struct unw_frame_info *info, int regnum, unsigned long *val, char *nat, int write)
286 unsigned long *addr, *nat_addr, nat_mask = 0, dummy_nat;
287 struct unw_ireg *ireg;
290 if ((unsigned) regnum - 1 >= 127) {
291 if (regnum == 0 && !write) {
292 *val = 0; /* read r0 always returns 0 */
296 UNW_DPRINT(0, "unwind.%s: trying to access non-existent r%u\n",
297 __FUNCTION__, regnum);
302 if (regnum >= 4 && regnum <= 7) {
303 /* access a preserved register */
304 ireg = &info->r4 + (regnum - 4);
307 nat_addr = addr + ireg->nat.off;
308 switch (ireg->nat.type) {
310 /* simulate getf.sig/setf.sig */
313 /* write NaTVal and be done with it */
320 if (addr[0] == 0 && addr[1] == 0x1ffe) {
321 /* return NaT and be done with it */
330 nat_addr = &dummy_nat;
334 nat_mask = (1UL << ((long) addr & 0x1f8)/8);
338 nat_addr = ia64_rse_rnat_addr(addr);
339 if ((unsigned long) addr < info->regstk.limit
340 || (unsigned long) addr >= info->regstk.top)
342 UNW_DPRINT(0, "unwind.%s: %p outside of regstk "
344 __FUNCTION__, (void *) addr,
349 if ((unsigned long) nat_addr >= info->regstk.top)
350 nat_addr = &info->sw->ar_rnat;
351 nat_mask = (1UL << ia64_rse_slot_num(addr));
355 addr = &info->sw->r4 + (regnum - 4);
356 nat_addr = &info->sw->ar_unat;
357 nat_mask = (1UL << ((long) addr & 0x1f8)/8);
360 /* access a scratch register */
361 pt = get_scratch_regs(info);
362 addr = (unsigned long *) ((unsigned long)pt + pt_regs_off(regnum));
363 if (info->pri_unat_loc)
364 nat_addr = info->pri_unat_loc;
366 nat_addr = &info->sw->caller_unat;
367 nat_mask = (1UL << ((long) addr & 0x1f8)/8);
370 /* access a stacked register */
371 addr = ia64_rse_skip_regs((unsigned long *) info->bsp, regnum - 32);
372 nat_addr = ia64_rse_rnat_addr(addr);
373 if ((unsigned long) addr < info->regstk.limit
374 || (unsigned long) addr >= info->regstk.top)
376 UNW_DPRINT(0, "unwind.%s: ignoring attempt to access register outside "
377 "of rbs\n", __FUNCTION__);
380 if ((unsigned long) nat_addr >= info->regstk.top)
381 nat_addr = &info->sw->ar_rnat;
382 nat_mask = (1UL << ia64_rse_slot_num(addr));
386 if (read_only(addr)) {
387 UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
392 *nat_addr |= nat_mask;
394 *nat_addr &= ~nat_mask;
397 if ((*nat_addr & nat_mask) == 0) {
401 *val = 0; /* if register is a NaT, *addr may contain kernel data! */
407 EXPORT_SYMBOL(unw_access_gr);
410 unw_access_br (struct unw_frame_info *info, int regnum, unsigned long *val, int write)
417 case 0: pt = get_scratch_regs(info); addr = &pt->b0; break;
418 case 6: pt = get_scratch_regs(info); addr = &pt->b6; break;
419 case 7: pt = get_scratch_regs(info); addr = &pt->b7; break;
422 case 1: case 2: case 3: case 4: case 5:
423 addr = *(&info->b1_loc + (regnum - 1));
425 addr = &info->sw->b1 + (regnum - 1);
429 UNW_DPRINT(0, "unwind.%s: trying to access non-existent b%u\n",
430 __FUNCTION__, regnum);
434 if (read_only(addr)) {
435 UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
443 EXPORT_SYMBOL(unw_access_br);
446 unw_access_fr (struct unw_frame_info *info, int regnum, struct ia64_fpreg *val, int write)
448 struct ia64_fpreg *addr = NULL;
451 if ((unsigned) (regnum - 2) >= 126) {
452 UNW_DPRINT(0, "unwind.%s: trying to access non-existent f%u\n",
453 __FUNCTION__, regnum);
458 addr = *(&info->f2_loc + (regnum - 2));
460 addr = &info->sw->f2 + (regnum - 2);
461 } else if (regnum <= 15) {
463 pt = get_scratch_regs(info);
464 addr = &pt->f6 + (regnum - 6);
467 addr = &info->sw->f12 + (regnum - 12);
468 } else if (regnum <= 31) {
469 addr = info->fr_loc[regnum - 16];
471 addr = &info->sw->f16 + (regnum - 16);
473 struct task_struct *t = info->task;
479 addr = t->thread.fph + (regnum - 32);
483 if (read_only(addr)) {
484 UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
492 EXPORT_SYMBOL(unw_access_fr);
495 unw_access_ar (struct unw_frame_info *info, int regnum, unsigned long *val, int write)
502 addr = info->bsp_loc;
504 addr = &info->sw->ar_bspstore;
507 case UNW_AR_BSPSTORE:
508 addr = info->bspstore_loc;
510 addr = &info->sw->ar_bspstore;
514 addr = info->pfs_loc;
516 addr = &info->sw->ar_pfs;
520 addr = info->rnat_loc;
522 addr = &info->sw->ar_rnat;
526 addr = info->unat_loc;
528 addr = &info->sw->caller_unat;
534 addr = &info->sw->ar_lc;
542 (*info->cfm_loc & ~(0x3fUL << 52)) | ((*val & 0x3f) << 52);
544 *val = (*info->cfm_loc >> 52) & 0x3f;
548 addr = info->fpsr_loc;
550 addr = &info->sw->ar_fpsr;
554 pt = get_scratch_regs(info);
559 pt = get_scratch_regs(info);
564 pt = get_scratch_regs(info);
569 pt = get_scratch_regs(info);
574 UNW_DPRINT(0, "unwind.%s: trying to access non-existent ar%u\n",
575 __FUNCTION__, regnum);
580 if (read_only(addr)) {
581 UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
589 EXPORT_SYMBOL(unw_access_ar);
592 unw_access_pr (struct unw_frame_info *info, unsigned long *val, int write)
598 addr = &info->sw->pr;
601 if (read_only(addr)) {
602 UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
610 EXPORT_SYMBOL(unw_access_pr);
613 /* Routines to manipulate the state stack. */
616 push (struct unw_state_record *sr)
618 struct unw_reg_state *rs;
620 rs = alloc_reg_state();
622 printk(KERN_ERR "unwind: cannot stack reg state!\n");
625 memcpy(rs, &sr->curr, sizeof(*rs));
630 pop (struct unw_state_record *sr)
632 struct unw_reg_state *rs = sr->curr.next;
635 printk(KERN_ERR "unwind: stack underflow!\n");
638 memcpy(&sr->curr, rs, sizeof(*rs));
642 /* Make a copy of the state stack. Non-recursive to avoid stack overflows. */
643 static struct unw_reg_state *
644 dup_state_stack (struct unw_reg_state *rs)
646 struct unw_reg_state *copy, *prev = NULL, *first = NULL;
649 copy = alloc_reg_state();
651 printk(KERN_ERR "unwind.dup_state_stack: out of memory\n");
654 memcpy(copy, rs, sizeof(*copy));
665 /* Free all stacked register states (but not RS itself). */
667 free_state_stack (struct unw_reg_state *rs)
669 struct unw_reg_state *p, *next;
671 for (p = rs->next; p != NULL; p = next) {
678 /* Unwind decoder routines */
680 static enum unw_register_index __attribute_const__
681 decode_abreg (unsigned char abreg, int memory)
684 case 0x04 ... 0x07: return UNW_REG_R4 + (abreg - 0x04);
685 case 0x22 ... 0x25: return UNW_REG_F2 + (abreg - 0x22);
686 case 0x30 ... 0x3f: return UNW_REG_F16 + (abreg - 0x30);
687 case 0x41 ... 0x45: return UNW_REG_B1 + (abreg - 0x41);
688 case 0x60: return UNW_REG_PR;
689 case 0x61: return UNW_REG_PSP;
690 case 0x62: return memory ? UNW_REG_PRI_UNAT_MEM : UNW_REG_PRI_UNAT_GR;
691 case 0x63: return UNW_REG_RP;
692 case 0x64: return UNW_REG_BSP;
693 case 0x65: return UNW_REG_BSPSTORE;
694 case 0x66: return UNW_REG_RNAT;
695 case 0x67: return UNW_REG_UNAT;
696 case 0x68: return UNW_REG_FPSR;
697 case 0x69: return UNW_REG_PFS;
698 case 0x6a: return UNW_REG_LC;
702 UNW_DPRINT(0, "unwind.%s: bad abreg=0x%x\n", __FUNCTION__, abreg);
707 set_reg (struct unw_reg_info *reg, enum unw_where where, int when, unsigned long val)
711 if (reg->when == UNW_WHEN_NEVER)
716 alloc_spill_area (unsigned long *offp, unsigned long regsize,
717 struct unw_reg_info *lo, struct unw_reg_info *hi)
719 struct unw_reg_info *reg;
721 for (reg = hi; reg >= lo; --reg) {
722 if (reg->where == UNW_WHERE_SPILL_HOME) {
723 reg->where = UNW_WHERE_PSPREL;
731 spill_next_when (struct unw_reg_info **regp, struct unw_reg_info *lim, unw_word t)
733 struct unw_reg_info *reg;
735 for (reg = *regp; reg <= lim; ++reg) {
736 if (reg->where == UNW_WHERE_SPILL_HOME) {
742 UNW_DPRINT(0, "unwind.%s: excess spill!\n", __FUNCTION__);
746 finish_prologue (struct unw_state_record *sr)
748 struct unw_reg_info *reg;
753 * First, resolve implicit register save locations (see Section "11.4.2.3 Rules
754 * for Using Unwind Descriptors", rule 3):
756 for (i = 0; i < (int) ARRAY_SIZE(unw.save_order); ++i) {
757 reg = sr->curr.reg + unw.save_order[i];
758 if (reg->where == UNW_WHERE_GR_SAVE) {
759 reg->where = UNW_WHERE_GR;
760 reg->val = sr->gr_save_loc++;
765 * Next, compute when the fp, general, and branch registers get
766 * saved. This must come before alloc_spill_area() because
767 * we need to know which registers are spilled to their home
771 unsigned char kind, mask = 0, *cp = sr->imask;
773 static const unsigned char limit[3] = {
774 UNW_REG_F31, UNW_REG_R7, UNW_REG_B5
776 struct unw_reg_info *(regs[3]);
778 regs[0] = sr->curr.reg + UNW_REG_F2;
779 regs[1] = sr->curr.reg + UNW_REG_R4;
780 regs[2] = sr->curr.reg + UNW_REG_B1;
782 for (t = 0; t < sr->region_len; ++t) {
785 kind = (mask >> 2*(3-(t & 3))) & 3;
787 spill_next_when(®s[kind - 1], sr->curr.reg + limit[kind - 1],
788 sr->region_start + t);
792 * Next, lay out the memory stack spill area:
794 if (sr->any_spills) {
795 off = sr->spill_offset;
796 alloc_spill_area(&off, 16, sr->curr.reg + UNW_REG_F2, sr->curr.reg + UNW_REG_F31);
797 alloc_spill_area(&off, 8, sr->curr.reg + UNW_REG_B1, sr->curr.reg + UNW_REG_B5);
798 alloc_spill_area(&off, 8, sr->curr.reg + UNW_REG_R4, sr->curr.reg + UNW_REG_R7);
803 * Region header descriptors.
807 desc_prologue (int body, unw_word rlen, unsigned char mask, unsigned char grsave,
808 struct unw_state_record *sr)
812 if (!(sr->in_body || sr->first_region))
814 sr->first_region = 0;
816 /* check if we're done: */
817 if (sr->when_target < sr->region_start + sr->region_len) {
822 region_start = sr->region_start + sr->region_len;
824 for (i = 0; i < sr->epilogue_count; ++i)
826 sr->epilogue_count = 0;
827 sr->epilogue_start = UNW_WHEN_NEVER;
829 sr->region_start = region_start;
830 sr->region_len = rlen;
836 for (i = 0; i < 4; ++i) {
838 set_reg(sr->curr.reg + unw.save_order[i], UNW_WHERE_GR,
839 sr->region_start + sr->region_len - 1, grsave++);
842 sr->gr_save_loc = grsave;
845 sr->spill_offset = 0x10; /* default to psp+16 */
850 * Prologue descriptors.
854 desc_abi (unsigned char abi, unsigned char context, struct unw_state_record *sr)
856 if (abi == 3 && context == 'i') {
857 sr->flags |= UNW_FLAG_INTERRUPT_FRAME;
858 UNW_DPRINT(3, "unwind.%s: interrupt frame\n", __FUNCTION__);
861 UNW_DPRINT(0, "unwind%s: ignoring unwabi(abi=0x%x,context=0x%x)\n",
862 __FUNCTION__, abi, context);
866 desc_br_gr (unsigned char brmask, unsigned char gr, struct unw_state_record *sr)
870 for (i = 0; i < 5; ++i) {
872 set_reg(sr->curr.reg + UNW_REG_B1 + i, UNW_WHERE_GR,
873 sr->region_start + sr->region_len - 1, gr++);
879 desc_br_mem (unsigned char brmask, struct unw_state_record *sr)
883 for (i = 0; i < 5; ++i) {
885 set_reg(sr->curr.reg + UNW_REG_B1 + i, UNW_WHERE_SPILL_HOME,
886 sr->region_start + sr->region_len - 1, 0);
894 desc_frgr_mem (unsigned char grmask, unw_word frmask, struct unw_state_record *sr)
898 for (i = 0; i < 4; ++i) {
899 if ((grmask & 1) != 0) {
900 set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_SPILL_HOME,
901 sr->region_start + sr->region_len - 1, 0);
906 for (i = 0; i < 20; ++i) {
907 if ((frmask & 1) != 0) {
908 int base = (i < 4) ? UNW_REG_F2 : UNW_REG_F16 - 4;
909 set_reg(sr->curr.reg + base + i, UNW_WHERE_SPILL_HOME,
910 sr->region_start + sr->region_len - 1, 0);
918 desc_fr_mem (unsigned char frmask, struct unw_state_record *sr)
922 for (i = 0; i < 4; ++i) {
923 if ((frmask & 1) != 0) {
924 set_reg(sr->curr.reg + UNW_REG_F2 + i, UNW_WHERE_SPILL_HOME,
925 sr->region_start + sr->region_len - 1, 0);
933 desc_gr_gr (unsigned char grmask, unsigned char gr, struct unw_state_record *sr)
937 for (i = 0; i < 4; ++i) {
938 if ((grmask & 1) != 0)
939 set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_GR,
940 sr->region_start + sr->region_len - 1, gr++);
946 desc_gr_mem (unsigned char grmask, struct unw_state_record *sr)
950 for (i = 0; i < 4; ++i) {
951 if ((grmask & 1) != 0) {
952 set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_SPILL_HOME,
953 sr->region_start + sr->region_len - 1, 0);
961 desc_mem_stack_f (unw_word t, unw_word size, struct unw_state_record *sr)
963 set_reg(sr->curr.reg + UNW_REG_PSP, UNW_WHERE_NONE,
964 sr->region_start + min_t(int, t, sr->region_len - 1), 16*size);
968 desc_mem_stack_v (unw_word t, struct unw_state_record *sr)
970 sr->curr.reg[UNW_REG_PSP].when = sr->region_start + min_t(int, t, sr->region_len - 1);
974 desc_reg_gr (unsigned char reg, unsigned char dst, struct unw_state_record *sr)
976 set_reg(sr->curr.reg + reg, UNW_WHERE_GR, sr->region_start + sr->region_len - 1, dst);
980 desc_reg_psprel (unsigned char reg, unw_word pspoff, struct unw_state_record *sr)
982 set_reg(sr->curr.reg + reg, UNW_WHERE_PSPREL, sr->region_start + sr->region_len - 1,
987 desc_reg_sprel (unsigned char reg, unw_word spoff, struct unw_state_record *sr)
989 set_reg(sr->curr.reg + reg, UNW_WHERE_SPREL, sr->region_start + sr->region_len - 1,
994 desc_rp_br (unsigned char dst, struct unw_state_record *sr)
996 sr->return_link_reg = dst;
1000 desc_reg_when (unsigned char regnum, unw_word t, struct unw_state_record *sr)
1002 struct unw_reg_info *reg = sr->curr.reg + regnum;
1004 if (reg->where == UNW_WHERE_NONE)
1005 reg->where = UNW_WHERE_GR_SAVE;
1006 reg->when = sr->region_start + min_t(int, t, sr->region_len - 1);
1010 desc_spill_base (unw_word pspoff, struct unw_state_record *sr)
1012 sr->spill_offset = 0x10 - 4*pspoff;
1015 static inline unsigned char *
1016 desc_spill_mask (unsigned char *imaskp, struct unw_state_record *sr)
1019 return imaskp + (2*sr->region_len + 7)/8;
1026 desc_epilogue (unw_word t, unw_word ecount, struct unw_state_record *sr)
1028 sr->epilogue_start = sr->region_start + sr->region_len - 1 - t;
1029 sr->epilogue_count = ecount + 1;
1033 desc_copy_state (unw_word label, struct unw_state_record *sr)
1035 struct unw_labeled_state *ls;
1037 for (ls = sr->labeled_states; ls; ls = ls->next) {
1038 if (ls->label == label) {
1039 free_state_stack(&sr->curr);
1040 memcpy(&sr->curr, &ls->saved_state, sizeof(sr->curr));
1041 sr->curr.next = dup_state_stack(ls->saved_state.next);
1045 printk(KERN_ERR "unwind: failed to find state labeled 0x%lx\n", label);
1049 desc_label_state (unw_word label, struct unw_state_record *sr)
1051 struct unw_labeled_state *ls;
1053 ls = alloc_labeled_state();
1055 printk(KERN_ERR "unwind.desc_label_state(): out of memory\n");
1059 memcpy(&ls->saved_state, &sr->curr, sizeof(ls->saved_state));
1060 ls->saved_state.next = dup_state_stack(sr->curr.next);
1062 /* insert into list of labeled states: */
1063 ls->next = sr->labeled_states;
1064 sr->labeled_states = ls;
1068 * General descriptors.
1072 desc_is_active (unsigned char qp, unw_word t, struct unw_state_record *sr)
1074 if (sr->when_target <= sr->region_start + min_t(int, t, sr->region_len - 1))
1077 if ((sr->pr_val & (1UL << qp)) == 0)
1079 sr->pr_mask |= (1UL << qp);
1085 desc_restore_p (unsigned char qp, unw_word t, unsigned char abreg, struct unw_state_record *sr)
1087 struct unw_reg_info *r;
1089 if (!desc_is_active(qp, t, sr))
1092 r = sr->curr.reg + decode_abreg(abreg, 0);
1093 r->where = UNW_WHERE_NONE;
1094 r->when = UNW_WHEN_NEVER;
1099 desc_spill_reg_p (unsigned char qp, unw_word t, unsigned char abreg, unsigned char x,
1100 unsigned char ytreg, struct unw_state_record *sr)
1102 enum unw_where where = UNW_WHERE_GR;
1103 struct unw_reg_info *r;
1105 if (!desc_is_active(qp, t, sr))
1109 where = UNW_WHERE_BR;
1110 else if (ytreg & 0x80)
1111 where = UNW_WHERE_FR;
1113 r = sr->curr.reg + decode_abreg(abreg, 0);
1115 r->when = sr->region_start + min_t(int, t, sr->region_len - 1);
1116 r->val = (ytreg & 0x7f);
1120 desc_spill_psprel_p (unsigned char qp, unw_word t, unsigned char abreg, unw_word pspoff,
1121 struct unw_state_record *sr)
1123 struct unw_reg_info *r;
1125 if (!desc_is_active(qp, t, sr))
1128 r = sr->curr.reg + decode_abreg(abreg, 1);
1129 r->where = UNW_WHERE_PSPREL;
1130 r->when = sr->region_start + min_t(int, t, sr->region_len - 1);
1131 r->val = 0x10 - 4*pspoff;
1135 desc_spill_sprel_p (unsigned char qp, unw_word t, unsigned char abreg, unw_word spoff,
1136 struct unw_state_record *sr)
1138 struct unw_reg_info *r;
1140 if (!desc_is_active(qp, t, sr))
1143 r = sr->curr.reg + decode_abreg(abreg, 1);
1144 r->where = UNW_WHERE_SPREL;
1145 r->when = sr->region_start + min_t(int, t, sr->region_len - 1);
1149 #define UNW_DEC_BAD_CODE(code) printk(KERN_ERR "unwind: unknown code 0x%02x\n", \
1155 #define UNW_DEC_PROLOGUE_GR(fmt,r,m,gr,arg) desc_prologue(0,r,m,gr,arg)
1156 #define UNW_DEC_PROLOGUE(fmt,b,r,arg) desc_prologue(b,r,0,32,arg)
1158 * prologue descriptors:
1160 #define UNW_DEC_ABI(fmt,a,c,arg) desc_abi(a,c,arg)
1161 #define UNW_DEC_BR_GR(fmt,b,g,arg) desc_br_gr(b,g,arg)
1162 #define UNW_DEC_BR_MEM(fmt,b,arg) desc_br_mem(b,arg)
1163 #define UNW_DEC_FRGR_MEM(fmt,g,f,arg) desc_frgr_mem(g,f,arg)
1164 #define UNW_DEC_FR_MEM(fmt,f,arg) desc_fr_mem(f,arg)
1165 #define UNW_DEC_GR_GR(fmt,m,g,arg) desc_gr_gr(m,g,arg)
1166 #define UNW_DEC_GR_MEM(fmt,m,arg) desc_gr_mem(m,arg)
1167 #define UNW_DEC_MEM_STACK_F(fmt,t,s,arg) desc_mem_stack_f(t,s,arg)
1168 #define UNW_DEC_MEM_STACK_V(fmt,t,arg) desc_mem_stack_v(t,arg)
1169 #define UNW_DEC_REG_GR(fmt,r,d,arg) desc_reg_gr(r,d,arg)
1170 #define UNW_DEC_REG_PSPREL(fmt,r,o,arg) desc_reg_psprel(r,o,arg)
1171 #define UNW_DEC_REG_SPREL(fmt,r,o,arg) desc_reg_sprel(r,o,arg)
1172 #define UNW_DEC_REG_WHEN(fmt,r,t,arg) desc_reg_when(r,t,arg)
1173 #define UNW_DEC_PRIUNAT_WHEN_GR(fmt,t,arg) desc_reg_when(UNW_REG_PRI_UNAT_GR,t,arg)
1174 #define UNW_DEC_PRIUNAT_WHEN_MEM(fmt,t,arg) desc_reg_when(UNW_REG_PRI_UNAT_MEM,t,arg)
1175 #define UNW_DEC_PRIUNAT_GR(fmt,r,arg) desc_reg_gr(UNW_REG_PRI_UNAT_GR,r,arg)
1176 #define UNW_DEC_PRIUNAT_PSPREL(fmt,o,arg) desc_reg_psprel(UNW_REG_PRI_UNAT_MEM,o,arg)
1177 #define UNW_DEC_PRIUNAT_SPREL(fmt,o,arg) desc_reg_sprel(UNW_REG_PRI_UNAT_MEM,o,arg)
1178 #define UNW_DEC_RP_BR(fmt,d,arg) desc_rp_br(d,arg)
1179 #define UNW_DEC_SPILL_BASE(fmt,o,arg) desc_spill_base(o,arg)
1180 #define UNW_DEC_SPILL_MASK(fmt,m,arg) (m = desc_spill_mask(m,arg))
1184 #define UNW_DEC_EPILOGUE(fmt,t,c,arg) desc_epilogue(t,c,arg)
1185 #define UNW_DEC_COPY_STATE(fmt,l,arg) desc_copy_state(l,arg)
1186 #define UNW_DEC_LABEL_STATE(fmt,l,arg) desc_label_state(l,arg)
1188 * general unwind descriptors:
1190 #define UNW_DEC_SPILL_REG_P(f,p,t,a,x,y,arg) desc_spill_reg_p(p,t,a,x,y,arg)
1191 #define UNW_DEC_SPILL_REG(f,t,a,x,y,arg) desc_spill_reg_p(0,t,a,x,y,arg)
1192 #define UNW_DEC_SPILL_PSPREL_P(f,p,t,a,o,arg) desc_spill_psprel_p(p,t,a,o,arg)
1193 #define UNW_DEC_SPILL_PSPREL(f,t,a,o,arg) desc_spill_psprel_p(0,t,a,o,arg)
1194 #define UNW_DEC_SPILL_SPREL_P(f,p,t,a,o,arg) desc_spill_sprel_p(p,t,a,o,arg)
1195 #define UNW_DEC_SPILL_SPREL(f,t,a,o,arg) desc_spill_sprel_p(0,t,a,o,arg)
1196 #define UNW_DEC_RESTORE_P(f,p,t,a,arg) desc_restore_p(p,t,a,arg)
1197 #define UNW_DEC_RESTORE(f,t,a,arg) desc_restore_p(0,t,a,arg)
1199 #include "unwind_decoder.c"
1202 /* Unwind scripts. */
1204 static inline unw_hash_index_t
1205 hash (unsigned long ip)
1207 # define hashmagic 0x9e3779b97f4a7c16UL /* based on (sqrt(5)/2-1)*2^64 */
1209 return (ip >> 4)*hashmagic >> (64 - UNW_LOG_HASH_SIZE);
1214 cache_match (struct unw_script *script, unsigned long ip, unsigned long pr)
1216 read_lock(&script->lock);
1217 if (ip == script->ip && ((pr ^ script->pr_val) & script->pr_mask) == 0)
1218 /* keep the read lock... */
1220 read_unlock(&script->lock);
1224 static inline struct unw_script *
1225 script_lookup (struct unw_frame_info *info)
1227 struct unw_script *script = unw.cache + info->hint;
1228 unsigned short index;
1229 unsigned long ip, pr;
1231 if (UNW_DEBUG_ON(0))
1232 return NULL; /* Always regenerate scripts in debug mode */
1234 STAT(++unw.stat.cache.lookups);
1239 if (cache_match(script, ip, pr)) {
1240 STAT(++unw.stat.cache.hinted_hits);
1244 index = unw.hash[hash(ip)];
1245 if (index >= UNW_CACHE_SIZE)
1248 script = unw.cache + index;
1250 if (cache_match(script, ip, pr)) {
1251 /* update hint; no locking required as single-word writes are atomic */
1252 STAT(++unw.stat.cache.normal_hits);
1253 unw.cache[info->prev_script].hint = script - unw.cache;
1256 if (script->coll_chain >= UNW_HASH_SIZE)
1258 script = unw.cache + script->coll_chain;
1259 STAT(++unw.stat.cache.collision_chain_traversals);
1264 * On returning, a write lock for the SCRIPT is still being held.
1266 static inline struct unw_script *
1267 script_new (unsigned long ip)
1269 struct unw_script *script, *prev, *tmp;
1270 unw_hash_index_t index;
1271 unsigned short head;
1273 STAT(++unw.stat.script.news);
1276 * Can't (easily) use cmpxchg() here because of ABA problem
1277 * that is intrinsic in cmpxchg()...
1279 head = unw.lru_head;
1280 script = unw.cache + head;
1281 unw.lru_head = script->lru_chain;
1284 * We'd deadlock here if we interrupted a thread that is holding a read lock on
1285 * script->lock. Thus, if the write_trylock() fails, we simply bail out. The
1286 * alternative would be to disable interrupts whenever we hold a read-lock, but
1289 if (!write_trylock(&script->lock))
1292 /* re-insert script at the tail of the LRU chain: */
1293 unw.cache[unw.lru_tail].lru_chain = head;
1294 unw.lru_tail = head;
1296 /* remove the old script from the hash table (if it's there): */
1298 index = hash(script->ip);
1299 tmp = unw.cache + unw.hash[index];
1302 if (tmp == script) {
1304 prev->coll_chain = tmp->coll_chain;
1306 unw.hash[index] = tmp->coll_chain;
1310 if (tmp->coll_chain >= UNW_CACHE_SIZE)
1311 /* old script wasn't in the hash-table */
1313 tmp = unw.cache + tmp->coll_chain;
1317 /* enter new script in the hash table */
1319 script->coll_chain = unw.hash[index];
1320 unw.hash[index] = script - unw.cache;
1322 script->ip = ip; /* set new IP while we're holding the locks */
1324 STAT(if (script->coll_chain < UNW_CACHE_SIZE) ++unw.stat.script.collisions);
1333 script_finalize (struct unw_script *script, struct unw_state_record *sr)
1335 script->pr_mask = sr->pr_mask;
1336 script->pr_val = sr->pr_val;
1338 * We could down-grade our write-lock on script->lock here but
1339 * the rwlock API doesn't offer atomic lock downgrading, so
1340 * we'll just keep the write-lock and release it later when
1341 * we're done using the script.
1346 script_emit (struct unw_script *script, struct unw_insn insn)
1348 if (script->count >= UNW_MAX_SCRIPT_LEN) {
1349 UNW_DPRINT(0, "unwind.%s: script exceeds maximum size of %u instructions!\n",
1350 __FUNCTION__, UNW_MAX_SCRIPT_LEN);
1353 script->insn[script->count++] = insn;
1357 emit_nat_info (struct unw_state_record *sr, int i, struct unw_script *script)
1359 struct unw_reg_info *r = sr->curr.reg + i;
1360 enum unw_insn_opcode opc;
1361 struct unw_insn insn;
1362 unsigned long val = 0;
1367 /* register got spilled to a stacked register */
1368 opc = UNW_INSN_SETNAT_TYPE;
1369 val = UNW_NAT_REGSTK;
1371 /* register got spilled to a scratch register */
1372 opc = UNW_INSN_SETNAT_MEMSTK;
1376 opc = UNW_INSN_SETNAT_TYPE;
1381 opc = UNW_INSN_SETNAT_TYPE;
1385 case UNW_WHERE_PSPREL:
1386 case UNW_WHERE_SPREL:
1387 opc = UNW_INSN_SETNAT_MEMSTK;
1391 UNW_DPRINT(0, "unwind.%s: don't know how to emit nat info for where = %u\n",
1392 __FUNCTION__, r->where);
1396 insn.dst = unw.preg_index[i];
1398 script_emit(script, insn);
1402 compile_reg (struct unw_state_record *sr, int i, struct unw_script *script)
1404 struct unw_reg_info *r = sr->curr.reg + i;
1405 enum unw_insn_opcode opc;
1406 unsigned long val, rval;
1407 struct unw_insn insn;
1410 if (r->where == UNW_WHERE_NONE || r->when >= sr->when_target)
1413 opc = UNW_INSN_MOVE;
1414 val = rval = r->val;
1415 need_nat_info = (i >= UNW_REG_R4 && i <= UNW_REG_R7);
1420 opc = UNW_INSN_MOVE_STACKED;
1422 } else if (rval >= 4 && rval <= 7) {
1423 if (need_nat_info) {
1424 opc = UNW_INSN_MOVE2;
1427 val = unw.preg_index[UNW_REG_R4 + (rval - 4)];
1428 } else if (rval == 0) {
1429 opc = UNW_INSN_MOVE_CONST;
1432 /* register got spilled to a scratch register */
1433 opc = UNW_INSN_MOVE_SCRATCH;
1434 val = pt_regs_off(rval);
1440 val = unw.preg_index[UNW_REG_F2 + (rval - 2)];
1441 else if (rval >= 16 && rval <= 31)
1442 val = unw.preg_index[UNW_REG_F16 + (rval - 16)];
1444 opc = UNW_INSN_MOVE_SCRATCH;
1446 val = offsetof(struct pt_regs, f6) + 16*(rval - 6);
1448 UNW_DPRINT(0, "unwind.%s: kernel may not touch f%lu\n",
1449 __FUNCTION__, rval);
1454 if (rval >= 1 && rval <= 5)
1455 val = unw.preg_index[UNW_REG_B1 + (rval - 1)];
1457 opc = UNW_INSN_MOVE_SCRATCH;
1459 val = offsetof(struct pt_regs, b0);
1461 val = offsetof(struct pt_regs, b6);
1463 val = offsetof(struct pt_regs, b7);
1467 case UNW_WHERE_SPREL:
1468 opc = UNW_INSN_ADD_SP;
1471 case UNW_WHERE_PSPREL:
1472 opc = UNW_INSN_ADD_PSP;
1476 UNW_DPRINT(0, "unwind%s: register %u has unexpected `where' value of %u\n",
1477 __FUNCTION__, i, r->where);
1481 insn.dst = unw.preg_index[i];
1483 script_emit(script, insn);
1485 emit_nat_info(sr, i, script);
1487 if (i == UNW_REG_PSP) {
1489 * info->psp must contain the _value_ of the previous
1490 * sp, not it's save location. We get this by
1491 * dereferencing the value we just stored in
1494 insn.opc = UNW_INSN_LOAD;
1495 insn.dst = insn.val = unw.preg_index[UNW_REG_PSP];
1496 script_emit(script, insn);
1500 static inline const struct unw_table_entry *
1501 lookup (struct unw_table *table, unsigned long rel_ip)
1503 const struct unw_table_entry *e = NULL;
1504 unsigned long lo, hi, mid;
1506 /* do a binary search for right entry: */
1507 for (lo = 0, hi = table->length; lo < hi; ) {
1508 mid = (lo + hi) / 2;
1509 e = &table->array[mid];
1510 if (rel_ip < e->start_offset)
1512 else if (rel_ip >= e->end_offset)
1517 if (rel_ip < e->start_offset || rel_ip >= e->end_offset)
1523 * Build an unwind script that unwinds from state OLD_STATE to the
1524 * entrypoint of the function that called OLD_STATE.
1526 static inline struct unw_script *
1527 build_script (struct unw_frame_info *info)
1529 const struct unw_table_entry *e = NULL;
1530 struct unw_script *script = NULL;
1531 struct unw_labeled_state *ls, *next;
1532 unsigned long ip = info->ip;
1533 struct unw_state_record sr;
1534 struct unw_table *table;
1535 struct unw_reg_info *r;
1536 struct unw_insn insn;
1540 STAT(unsigned long start, parse_start;)
1542 STAT(++unw.stat.script.builds; start = ia64_get_itc());
1544 /* build state record */
1545 memset(&sr, 0, sizeof(sr));
1546 for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r)
1547 r->when = UNW_WHEN_NEVER;
1548 sr.pr_val = info->pr;
1550 UNW_DPRINT(3, "unwind.%s: ip 0x%lx\n", __FUNCTION__, ip);
1551 script = script_new(ip);
1553 UNW_DPRINT(0, "unwind.%s: failed to create unwind script\n", __FUNCTION__);
1554 STAT(unw.stat.script.build_time += ia64_get_itc() - start);
1557 unw.cache[info->prev_script].hint = script - unw.cache;
1559 /* search the kernels and the modules' unwind tables for IP: */
1561 STAT(parse_start = ia64_get_itc());
1563 for (table = unw.tables; table; table = table->next) {
1564 if (ip >= table->start && ip < table->end) {
1565 e = lookup(table, ip - table->segment_base);
1570 /* no info, return default unwinder (leaf proc, no mem stack, no saved regs) */
1571 UNW_DPRINT(1, "unwind.%s: no unwind info for ip=0x%lx (prev ip=0x%lx)\n",
1572 __FUNCTION__, ip, unw.cache[info->prev_script].ip);
1573 sr.curr.reg[UNW_REG_RP].where = UNW_WHERE_BR;
1574 sr.curr.reg[UNW_REG_RP].when = -1;
1575 sr.curr.reg[UNW_REG_RP].val = 0;
1576 compile_reg(&sr, UNW_REG_RP, script);
1577 script_finalize(script, &sr);
1578 STAT(unw.stat.script.parse_time += ia64_get_itc() - parse_start);
1579 STAT(unw.stat.script.build_time += ia64_get_itc() - start);
1583 sr.when_target = (3*((ip & ~0xfUL) - (table->segment_base + e->start_offset))/16
1585 hdr = *(u64 *) (table->segment_base + e->info_offset);
1586 dp = (u8 *) (table->segment_base + e->info_offset + 8);
1587 desc_end = dp + 8*UNW_LENGTH(hdr);
1589 while (!sr.done && dp < desc_end)
1590 dp = unw_decode(dp, sr.in_body, &sr);
1592 if (sr.when_target > sr.epilogue_start) {
1594 * sp has been restored and all values on the memory stack below
1595 * psp also have been restored.
1597 sr.curr.reg[UNW_REG_PSP].val = 0;
1598 sr.curr.reg[UNW_REG_PSP].where = UNW_WHERE_NONE;
1599 sr.curr.reg[UNW_REG_PSP].when = UNW_WHEN_NEVER;
1600 for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r)
1601 if ((r->where == UNW_WHERE_PSPREL && r->val <= 0x10)
1602 || r->where == UNW_WHERE_SPREL)
1605 r->where = UNW_WHERE_NONE;
1606 r->when = UNW_WHEN_NEVER;
1610 script->flags = sr.flags;
1613 * If RP did't get saved, generate entry for the return link
1616 if (sr.curr.reg[UNW_REG_RP].when >= sr.when_target) {
1617 sr.curr.reg[UNW_REG_RP].where = UNW_WHERE_BR;
1618 sr.curr.reg[UNW_REG_RP].when = -1;
1619 sr.curr.reg[UNW_REG_RP].val = sr.return_link_reg;
1620 UNW_DPRINT(1, "unwind.%s: using default for rp at ip=0x%lx where=%d val=0x%lx\n",
1621 __FUNCTION__, ip, sr.curr.reg[UNW_REG_RP].where,
1622 sr.curr.reg[UNW_REG_RP].val);
1626 UNW_DPRINT(1, "unwind.%s: state record for func 0x%lx, t=%u:\n",
1627 __FUNCTION__, table->segment_base + e->start_offset, sr.when_target);
1628 for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r) {
1629 if (r->where != UNW_WHERE_NONE || r->when != UNW_WHEN_NEVER) {
1630 UNW_DPRINT(1, " %s <- ", unw.preg_name[r - sr.curr.reg]);
1632 case UNW_WHERE_GR: UNW_DPRINT(1, "r%lu", r->val); break;
1633 case UNW_WHERE_FR: UNW_DPRINT(1, "f%lu", r->val); break;
1634 case UNW_WHERE_BR: UNW_DPRINT(1, "b%lu", r->val); break;
1635 case UNW_WHERE_SPREL: UNW_DPRINT(1, "[sp+0x%lx]", r->val); break;
1636 case UNW_WHERE_PSPREL: UNW_DPRINT(1, "[psp+0x%lx]", r->val); break;
1637 case UNW_WHERE_NONE:
1638 UNW_DPRINT(1, "%s+0x%lx", unw.preg_name[r - sr.curr.reg], r->val);
1642 UNW_DPRINT(1, "BADWHERE(%d)", r->where);
1645 UNW_DPRINT(1, "\t\t%d\n", r->when);
1650 STAT(unw.stat.script.parse_time += ia64_get_itc() - parse_start);
1652 /* translate state record into unwinder instructions: */
1655 * First, set psp if we're dealing with a fixed-size frame;
1656 * subsequent instructions may depend on this value.
1658 if (sr.when_target > sr.curr.reg[UNW_REG_PSP].when
1659 && (sr.curr.reg[UNW_REG_PSP].where == UNW_WHERE_NONE)
1660 && sr.curr.reg[UNW_REG_PSP].val != 0) {
1661 /* new psp is sp plus frame size */
1662 insn.opc = UNW_INSN_ADD;
1663 insn.dst = offsetof(struct unw_frame_info, psp)/8;
1664 insn.val = sr.curr.reg[UNW_REG_PSP].val; /* frame size */
1665 script_emit(script, insn);
1668 /* determine where the primary UNaT is: */
1669 if (sr.when_target < sr.curr.reg[UNW_REG_PRI_UNAT_GR].when)
1670 i = UNW_REG_PRI_UNAT_MEM;
1671 else if (sr.when_target < sr.curr.reg[UNW_REG_PRI_UNAT_MEM].when)
1672 i = UNW_REG_PRI_UNAT_GR;
1673 else if (sr.curr.reg[UNW_REG_PRI_UNAT_MEM].when > sr.curr.reg[UNW_REG_PRI_UNAT_GR].when)
1674 i = UNW_REG_PRI_UNAT_MEM;
1676 i = UNW_REG_PRI_UNAT_GR;
1678 compile_reg(&sr, i, script);
1680 for (i = UNW_REG_BSP; i < UNW_NUM_REGS; ++i)
1681 compile_reg(&sr, i, script);
1683 /* free labeled register states & stack: */
1685 STAT(parse_start = ia64_get_itc());
1686 for (ls = sr.labeled_states; ls; ls = next) {
1688 free_state_stack(&ls->saved_state);
1689 free_labeled_state(ls);
1691 free_state_stack(&sr.curr);
1692 STAT(unw.stat.script.parse_time += ia64_get_itc() - parse_start);
1694 script_finalize(script, &sr);
1695 STAT(unw.stat.script.build_time += ia64_get_itc() - start);
1700 * Apply the unwinding actions represented by OPS and update SR to
1701 * reflect the state that existed upon entry to the function that this
1702 * unwinder represents.
1705 run_script (struct unw_script *script, struct unw_frame_info *state)
1707 struct unw_insn *ip, *limit, next_insn;
1708 unsigned long opc, dst, val, off;
1709 unsigned long *s = (unsigned long *) state;
1710 STAT(unsigned long start;)
1712 STAT(++unw.stat.script.runs; start = ia64_get_itc());
1713 state->flags = script->flags;
1715 limit = script->insn + script->count;
1718 while (ip++ < limit) {
1719 opc = next_insn.opc;
1720 dst = next_insn.dst;
1721 val = next_insn.val;
1730 case UNW_INSN_MOVE2:
1733 s[dst+1] = s[val+1];
1743 case UNW_INSN_MOVE_SCRATCH:
1745 s[dst] = (unsigned long) get_scratch_regs(state) + val;
1748 UNW_DPRINT(0, "unwind.%s: no state->pt, dst=%ld, val=%ld\n",
1749 __FUNCTION__, dst, val);
1753 case UNW_INSN_MOVE_CONST:
1755 s[dst] = (unsigned long) &unw.r0;
1758 UNW_DPRINT(0, "unwind.%s: UNW_INSN_MOVE_CONST bad val=%ld\n",
1764 case UNW_INSN_MOVE_STACKED:
1765 s[dst] = (unsigned long) ia64_rse_skip_regs((unsigned long *)state->bsp,
1769 case UNW_INSN_ADD_PSP:
1770 s[dst] = state->psp + val;
1773 case UNW_INSN_ADD_SP:
1774 s[dst] = state->sp + val;
1777 case UNW_INSN_SETNAT_MEMSTK:
1778 if (!state->pri_unat_loc)
1779 state->pri_unat_loc = &state->sw->caller_unat;
1780 /* register off. is a multiple of 8, so the least 3 bits (type) are 0 */
1781 s[dst+1] = ((unsigned long) state->pri_unat_loc - s[dst]) | UNW_NAT_MEMSTK;
1784 case UNW_INSN_SETNAT_TYPE:
1790 if ((s[val] & (local_cpu_data->unimpl_va_mask | 0x7)) != 0
1791 || s[val] < TASK_SIZE)
1793 UNW_DPRINT(0, "unwind.%s: rejecting bad psp=0x%lx\n",
1794 __FUNCTION__, s[val]);
1798 s[dst] = *(unsigned long *) s[val];
1802 STAT(unw.stat.script.run_time += ia64_get_itc() - start);
1806 off = unw.sw_off[val];
1807 s[val] = (unsigned long) state->sw + off;
1808 if (off >= offsetof(struct switch_stack, r4) && off <= offsetof(struct switch_stack, r7))
1810 * We're initializing a general register: init NaT info, too. Note that
1811 * the offset is a multiple of 8 which gives us the 3 bits needed for
1814 s[val+1] = (offsetof(struct switch_stack, ar_unat) - off) | UNW_NAT_MEMSTK;
1819 find_save_locs (struct unw_frame_info *info)
1821 int have_write_lock = 0;
1822 struct unw_script *scr;
1823 unsigned long flags = 0;
1825 if ((info->ip & (local_cpu_data->unimpl_va_mask | 0xf)) || info->ip < TASK_SIZE) {
1826 /* don't let obviously bad addresses pollute the cache */
1827 /* FIXME: should really be level 0 but it occurs too often. KAO */
1828 UNW_DPRINT(1, "unwind.%s: rejecting bad ip=0x%lx\n", __FUNCTION__, info->ip);
1829 info->rp_loc = NULL;
1833 scr = script_lookup(info);
1835 spin_lock_irqsave(&unw.lock, flags);
1836 scr = build_script(info);
1838 spin_unlock_irqrestore(&unw.lock, flags);
1840 "unwind.%s: failed to locate/build unwind script for ip %lx\n",
1841 __FUNCTION__, info->ip);
1844 have_write_lock = 1;
1846 info->hint = scr->hint;
1847 info->prev_script = scr - unw.cache;
1849 run_script(scr, info);
1851 if (have_write_lock) {
1852 write_unlock(&scr->lock);
1853 spin_unlock_irqrestore(&unw.lock, flags);
1855 read_unlock(&scr->lock);
1860 unw_unwind (struct unw_frame_info *info)
1862 unsigned long prev_ip, prev_sp, prev_bsp;
1863 unsigned long ip, pr, num_regs;
1864 STAT(unsigned long start, flags;)
1867 STAT(local_irq_save(flags); ++unw.stat.api.unwinds; start = ia64_get_itc());
1871 prev_bsp = info->bsp;
1873 /* restore the ip */
1874 if (!info->rp_loc) {
1875 /* FIXME: should really be level 0 but it occurs too often. KAO */
1876 UNW_DPRINT(1, "unwind.%s: failed to locate return link (ip=0x%lx)!\n",
1877 __FUNCTION__, info->ip);
1878 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1881 ip = info->ip = *info->rp_loc;
1882 if (ip < GATE_ADDR) {
1883 UNW_DPRINT(2, "unwind.%s: reached user-space (ip=0x%lx)\n", __FUNCTION__, ip);
1884 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1888 /* restore the cfm: */
1889 if (!info->pfs_loc) {
1890 UNW_DPRINT(0, "unwind.%s: failed to locate ar.pfs!\n", __FUNCTION__);
1891 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1894 info->cfm_loc = info->pfs_loc;
1896 /* restore the bsp: */
1899 if ((info->flags & UNW_FLAG_INTERRUPT_FRAME)) {
1900 info->pt = info->sp + 16;
1901 if ((pr & (1UL << PRED_NON_SYSCALL)) != 0)
1902 num_regs = *info->cfm_loc & 0x7f; /* size of frame */
1904 (unsigned long *) (info->pt + offsetof(struct pt_regs, ar_pfs));
1905 UNW_DPRINT(3, "unwind.%s: interrupt_frame pt 0x%lx\n", __FUNCTION__, info->pt);
1907 num_regs = (*info->cfm_loc >> 7) & 0x7f; /* size of locals */
1908 info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->bsp, -num_regs);
1909 if (info->bsp < info->regstk.limit || info->bsp > info->regstk.top) {
1910 UNW_DPRINT(0, "unwind.%s: bsp (0x%lx) out of range [0x%lx-0x%lx]\n",
1911 __FUNCTION__, info->bsp, info->regstk.limit, info->regstk.top);
1912 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1916 /* restore the sp: */
1917 info->sp = info->psp;
1918 if (info->sp < info->memstk.top || info->sp > info->memstk.limit) {
1919 UNW_DPRINT(0, "unwind.%s: sp (0x%lx) out of range [0x%lx-0x%lx]\n",
1920 __FUNCTION__, info->sp, info->memstk.top, info->memstk.limit);
1921 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1925 if (info->ip == prev_ip && info->sp == prev_sp && info->bsp == prev_bsp) {
1926 UNW_DPRINT(0, "unwind.%s: ip, sp, bsp unchanged; stopping here (ip=0x%lx)\n",
1928 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1932 /* as we unwind, the saved ar.unat becomes the primary unat: */
1933 info->pri_unat_loc = info->unat_loc;
1935 /* finally, restore the predicates: */
1936 unw_get_pr(info, &info->pr);
1938 retval = find_save_locs(info);
1939 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1942 EXPORT_SYMBOL(unw_unwind);
1945 unw_unwind_to_user (struct unw_frame_info *info)
1947 unsigned long ip, sp, pr = info->pr;
1950 unw_get_sp(info, &sp);
1951 if ((long)((unsigned long)info->task + IA64_STK_OFFSET - sp)
1952 < IA64_PT_REGS_SIZE) {
1953 UNW_DPRINT(0, "unwind.%s: ran off the top of the kernel stack\n",
1957 if (unw_is_intr_frame(info) &&
1958 (pr & (1UL << PRED_USER_STACK)))
1960 if (unw_get_pr (info, &pr) < 0) {
1961 unw_get_rp(info, &ip);
1962 UNW_DPRINT(0, "unwind.%s: failed to read "
1963 "predicate register (ip=0x%lx)\n",
1967 } while (unw_unwind(info) >= 0);
1968 unw_get_ip(info, &ip);
1969 UNW_DPRINT(0, "unwind.%s: failed to unwind to user-level (ip=0x%lx)\n",
1973 EXPORT_SYMBOL(unw_unwind_to_user);
1976 init_frame_info (struct unw_frame_info *info, struct task_struct *t,
1977 struct switch_stack *sw, unsigned long stktop)
1979 unsigned long rbslimit, rbstop, stklimit;
1980 STAT(unsigned long start, flags;)
1982 STAT(local_irq_save(flags); ++unw.stat.api.inits; start = ia64_get_itc());
1985 * Subtle stuff here: we _could_ unwind through the switch_stack frame but we
1986 * don't want to do that because it would be slow as each preserved register would
1987 * have to be processed. Instead, what we do here is zero out the frame info and
1988 * start the unwind process at the function that created the switch_stack frame.
1989 * When a preserved value in switch_stack needs to be accessed, run_script() will
1990 * initialize the appropriate pointer on demand.
1992 memset(info, 0, sizeof(*info));
1994 rbslimit = (unsigned long) t + IA64_RBS_OFFSET;
1995 rbstop = sw->ar_bspstore;
1996 if (rbstop - (unsigned long) t >= IA64_STK_OFFSET)
1999 stklimit = (unsigned long) t + IA64_STK_OFFSET;
2000 if (stktop <= rbstop)
2003 info->regstk.limit = rbslimit;
2004 info->regstk.top = rbstop;
2005 info->memstk.limit = stklimit;
2006 info->memstk.top = stktop;
2009 info->sp = info->psp = stktop;
2011 UNW_DPRINT(3, "unwind.%s:\n"
2013 " rbs = [0x%lx-0x%lx)\n"
2014 " stk = [0x%lx-0x%lx)\n"
2018 __FUNCTION__, (unsigned long) t, rbslimit, rbstop, stktop, stklimit,
2019 info->pr, (unsigned long) info->sw, info->sp);
2020 STAT(unw.stat.api.init_time += ia64_get_itc() - start; local_irq_restore(flags));
2024 unw_init_frame_info (struct unw_frame_info *info, struct task_struct *t, struct switch_stack *sw)
2028 init_frame_info(info, t, sw, (unsigned long) (sw + 1) - 16);
2029 info->cfm_loc = &sw->ar_pfs;
2030 sol = (*info->cfm_loc >> 7) & 0x7f;
2031 info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->regstk.top, -sol);
2033 UNW_DPRINT(3, "unwind.%s:\n"
2037 __FUNCTION__, info->bsp, sol, info->ip);
2038 find_save_locs(info);
2041 EXPORT_SYMBOL(unw_init_frame_info);
2044 unw_init_from_blocked_task (struct unw_frame_info *info, struct task_struct *t)
2046 struct switch_stack *sw = (struct switch_stack *) (t->thread.ksp + 16);
2048 UNW_DPRINT(1, "unwind.%s\n", __FUNCTION__);
2049 unw_init_frame_info(info, t, sw);
2051 EXPORT_SYMBOL(unw_init_from_blocked_task);
2054 init_unwind_table (struct unw_table *table, const char *name, unsigned long segment_base,
2055 unsigned long gp, const void *table_start, const void *table_end)
2057 const struct unw_table_entry *start = table_start, *end = table_end;
2060 table->segment_base = segment_base;
2062 table->start = segment_base + start[0].start_offset;
2063 table->end = segment_base + end[-1].end_offset;
2064 table->array = start;
2065 table->length = end - start;
2069 unw_add_unwind_table (const char *name, unsigned long segment_base, unsigned long gp,
2070 const void *table_start, const void *table_end)
2072 const struct unw_table_entry *start = table_start, *end = table_end;
2073 struct unw_table *table;
2074 unsigned long flags;
2076 if (end - start <= 0) {
2077 UNW_DPRINT(0, "unwind.%s: ignoring attempt to insert empty unwind table\n",
2082 table = kmalloc(sizeof(*table), GFP_USER);
2086 init_unwind_table(table, name, segment_base, gp, table_start, table_end);
2088 spin_lock_irqsave(&unw.lock, flags);
2090 /* keep kernel unwind table at the front (it's searched most commonly): */
2091 table->next = unw.tables->next;
2092 unw.tables->next = table;
2094 spin_unlock_irqrestore(&unw.lock, flags);
2100 unw_remove_unwind_table (void *handle)
2102 struct unw_table *table, *prev;
2103 struct unw_script *tmp;
2104 unsigned long flags;
2108 UNW_DPRINT(0, "unwind.%s: ignoring attempt to remove non-existent unwind table\n",
2114 if (table == &unw.kernel_table) {
2115 UNW_DPRINT(0, "unwind.%s: sorry, freeing the kernel's unwind table is a "
2116 "no-can-do!\n", __FUNCTION__);
2120 spin_lock_irqsave(&unw.lock, flags);
2122 /* first, delete the table: */
2124 for (prev = (struct unw_table *) &unw.tables; prev; prev = prev->next)
2125 if (prev->next == table)
2128 UNW_DPRINT(0, "unwind.%s: failed to find unwind table %p\n",
2129 __FUNCTION__, (void *) table);
2130 spin_unlock_irqrestore(&unw.lock, flags);
2133 prev->next = table->next;
2135 spin_unlock_irqrestore(&unw.lock, flags);
2137 /* next, remove hash table entries for this table */
2139 for (index = 0; index <= UNW_HASH_SIZE; ++index) {
2140 tmp = unw.cache + unw.hash[index];
2141 if (unw.hash[index] >= UNW_CACHE_SIZE
2142 || tmp->ip < table->start || tmp->ip >= table->end)
2145 write_lock(&tmp->lock);
2147 if (tmp->ip >= table->start && tmp->ip < table->end) {
2148 unw.hash[index] = tmp->coll_chain;
2152 write_unlock(&tmp->lock);
2159 create_gate_table (void)
2161 const struct unw_table_entry *entry, *start, *end;
2162 unsigned long *lp, segbase = GATE_ADDR;
2163 size_t info_size, size;
2165 Elf64_Phdr *punw = NULL, *phdr = (Elf64_Phdr *) (GATE_ADDR + GATE_EHDR->e_phoff);
2168 for (i = 0; i < GATE_EHDR->e_phnum; ++i, ++phdr)
2169 if (phdr->p_type == PT_IA_64_UNWIND) {
2175 printk("%s: failed to find gate DSO's unwind table!\n", __FUNCTION__);
2179 start = (const struct unw_table_entry *) punw->p_vaddr;
2180 end = (struct unw_table_entry *) ((char *) start + punw->p_memsz);
2183 unw_add_unwind_table("linux-gate.so", segbase, 0, start, end);
2185 for (entry = start; entry < end; ++entry)
2186 size += 3*8 + 8 + 8*UNW_LENGTH(*(u64 *) (segbase + entry->info_offset));
2187 size += 8; /* reserve space for "end of table" marker */
2189 unw.gate_table = kmalloc(size, GFP_KERNEL);
2190 if (!unw.gate_table) {
2191 unw.gate_table_size = 0;
2192 printk(KERN_ERR "%s: unable to create unwind data for gate page!\n", __FUNCTION__);
2195 unw.gate_table_size = size;
2197 lp = unw.gate_table;
2198 info = (char *) unw.gate_table + size;
2200 for (entry = start; entry < end; ++entry, lp += 3) {
2201 info_size = 8 + 8*UNW_LENGTH(*(u64 *) (segbase + entry->info_offset));
2203 memcpy(info, (char *) segbase + entry->info_offset, info_size);
2205 lp[0] = segbase + entry->start_offset; /* start */
2206 lp[1] = segbase + entry->end_offset; /* end */
2207 lp[2] = info - (char *) unw.gate_table; /* info */
2209 *lp = 0; /* end-of-table marker */
2213 __initcall(create_gate_table);
2219 extern void unw_hash_index_t_is_too_narrow (void);
2222 if (8*sizeof(unw_hash_index_t) < UNW_LOG_HASH_SIZE)
2223 unw_hash_index_t_is_too_narrow();
2225 unw.sw_off[unw.preg_index[UNW_REG_PRI_UNAT_GR]] = SW(CALLER_UNAT);
2226 unw.sw_off[unw.preg_index[UNW_REG_BSPSTORE]] = SW(AR_BSPSTORE);
2227 unw.sw_off[unw.preg_index[UNW_REG_PFS]] = SW(AR_PFS);
2228 unw.sw_off[unw.preg_index[UNW_REG_RP]] = SW(B0);
2229 unw.sw_off[unw.preg_index[UNW_REG_UNAT]] = SW(CALLER_UNAT);
2230 unw.sw_off[unw.preg_index[UNW_REG_PR]] = SW(PR);
2231 unw.sw_off[unw.preg_index[UNW_REG_LC]] = SW(AR_LC);
2232 unw.sw_off[unw.preg_index[UNW_REG_FPSR]] = SW(AR_FPSR);
2233 for (i = UNW_REG_R4, off = SW(R4); i <= UNW_REG_R7; ++i, off += 8)
2234 unw.sw_off[unw.preg_index[i]] = off;
2235 for (i = UNW_REG_B1, off = SW(B1); i <= UNW_REG_B5; ++i, off += 8)
2236 unw.sw_off[unw.preg_index[i]] = off;
2237 for (i = UNW_REG_F2, off = SW(F2); i <= UNW_REG_F5; ++i, off += 16)
2238 unw.sw_off[unw.preg_index[i]] = off;
2239 for (i = UNW_REG_F16, off = SW(F16); i <= UNW_REG_F31; ++i, off += 16)
2240 unw.sw_off[unw.preg_index[i]] = off;
2242 for (i = 0; i < UNW_CACHE_SIZE; ++i) {
2244 unw.cache[i].lru_chain = (i - 1);
2245 unw.cache[i].coll_chain = -1;
2246 rwlock_init(&unw.cache[i].lock);
2248 unw.lru_head = UNW_CACHE_SIZE - 1;
2251 init_unwind_table(&unw.kernel_table, "kernel", KERNEL_START, (unsigned long) __gp,
2252 __start_unwind, __end_unwind);
2256 * DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED
2258 * This system call has been deprecated. The new and improved way to get
2259 * at the kernel's unwind info is via the gate DSO. The address of the
2260 * ELF header for this DSO is passed to user-level via AT_SYSINFO_EHDR.
2262 * DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED
2264 * This system call copies the unwind data into the buffer pointed to by BUF and returns
2265 * the size of the unwind data. If BUF_SIZE is smaller than the size of the unwind data
2266 * or if BUF is NULL, nothing is copied, but the system call still returns the size of the
2269 * The first portion of the unwind data contains an unwind table and rest contains the
2270 * associated unwind info (in no particular order). The unwind table consists of a table
2271 * of entries of the form:
2273 * u64 start; (64-bit address of start of function)
2274 * u64 end; (64-bit address of start of function)
2275 * u64 info; (BUF-relative offset to unwind info)
2277 * The end of the unwind table is indicated by an entry with a START address of zero.
2279 * Please see the IA-64 Software Conventions and Runtime Architecture manual for details
2280 * on the format of the unwind info.
2283 * EFAULT BUF points outside your accessible address space.
2286 sys_getunwind (void __user *buf, size_t buf_size)
2288 if (buf && buf_size >= unw.gate_table_size)
2289 if (copy_to_user(buf, unw.gate_table, unw.gate_table_size) != 0)
2291 return unw.gate_table_size;