2 * IA-64-specific support for kernel module loader.
4 * Copyright (C) 2003 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com>
7 * Loosely based on patch by Rusty Russell.
10 /* relocs tested so far:
21 PCREL21B (for br.call only; br.cond is not supported out of modules!)
22 PCREL60B (for brl.cond only; brl.call is not supported for modules!)
29 #include <linux/kernel.h>
30 #include <linux/sched.h>
31 #include <linux/elf.h>
32 #include <linux/moduleloader.h>
33 #include <linux/string.h>
34 #include <linux/uaccess.h>
35 #include <linux/vmalloc.h>
37 #include <asm/patch.h>
38 #include <asm/sections.h>
39 #include <asm/unaligned.h>
41 #define ARCH_MODULE_DEBUG 0
44 # define DEBUGP printk
47 # define DEBUGP(fmt , a...)
56 #define MAX_LTOFF ((uint64_t) (1 << 22)) /* max. allowable linkage-table offset */
58 /* Define some relocation helper macros/types: */
60 #define FORMAT_SHIFT 0
62 #define FORMAT_MASK ((1 << FORMAT_BITS) - 1)
65 #define VALUE_MASK ((1 << VALUE_BITS) - 1)
67 enum reloc_target_format {
68 /* direct encoded formats: */
78 /* formats that cannot be directly decoded: */
80 RF_INSN21B, /* imm21 form 1 */
81 RF_INSN21M, /* imm21 form 2 */
82 RF_INSN21F /* imm21 form 3 */
85 enum reloc_value_formula {
86 RV_DIRECT = 4, /* S + A */
87 RV_GPREL = 5, /* @gprel(S + A) */
88 RV_LTREL = 6, /* @ltoff(S + A) */
89 RV_PLTREL = 7, /* @pltoff(S + A) */
90 RV_FPTR = 8, /* @fptr(S + A) */
91 RV_PCREL = 9, /* S + A - P */
92 RV_LTREL_FPTR = 10, /* @ltoff(@fptr(S + A)) */
93 RV_SEGREL = 11, /* @segrel(S + A) */
94 RV_SECREL = 12, /* @secrel(S + A) */
95 RV_BDREL = 13, /* BD + A */
96 RV_LTV = 14, /* S + A (like RV_DIRECT, except frozen at static link-time) */
97 RV_PCREL2 = 15, /* S + A - P */
98 RV_SPECIAL = 16, /* various (see below) */
100 RV_TPREL = 18, /* @tprel(S + A) */
101 RV_LTREL_TPREL = 19, /* @ltoff(@tprel(S + A)) */
102 RV_DTPMOD = 20, /* @dtpmod(S + A) */
103 RV_LTREL_DTPMOD = 21, /* @ltoff(@dtpmod(S + A)) */
104 RV_DTPREL = 22, /* @dtprel(S + A) */
105 RV_LTREL_DTPREL = 23, /* @ltoff(@dtprel(S + A)) */
110 /* 28-31 reserved for implementation-specific purposes. */
113 #define N(reloc) [R_IA64_##reloc] = #reloc
115 static const char *reloc_name[256] = {
116 N(NONE), N(IMM14), N(IMM22), N(IMM64),
117 N(DIR32MSB), N(DIR32LSB), N(DIR64MSB), N(DIR64LSB),
118 N(GPREL22), N(GPREL64I), N(GPREL32MSB), N(GPREL32LSB),
119 N(GPREL64MSB), N(GPREL64LSB), N(LTOFF22), N(LTOFF64I),
120 N(PLTOFF22), N(PLTOFF64I), N(PLTOFF64MSB), N(PLTOFF64LSB),
121 N(FPTR64I), N(FPTR32MSB), N(FPTR32LSB), N(FPTR64MSB),
122 N(FPTR64LSB), N(PCREL60B), N(PCREL21B), N(PCREL21M),
123 N(PCREL21F), N(PCREL32MSB), N(PCREL32LSB), N(PCREL64MSB),
124 N(PCREL64LSB), N(LTOFF_FPTR22), N(LTOFF_FPTR64I), N(LTOFF_FPTR32MSB),
125 N(LTOFF_FPTR32LSB), N(LTOFF_FPTR64MSB), N(LTOFF_FPTR64LSB), N(SEGREL32MSB),
126 N(SEGREL32LSB), N(SEGREL64MSB), N(SEGREL64LSB), N(SECREL32MSB),
127 N(SECREL32LSB), N(SECREL64MSB), N(SECREL64LSB), N(REL32MSB),
128 N(REL32LSB), N(REL64MSB), N(REL64LSB), N(LTV32MSB),
129 N(LTV32LSB), N(LTV64MSB), N(LTV64LSB), N(PCREL21BI),
130 N(PCREL22), N(PCREL64I), N(IPLTMSB), N(IPLTLSB),
131 N(COPY), N(LTOFF22X), N(LDXMOV), N(TPREL14),
132 N(TPREL22), N(TPREL64I), N(TPREL64MSB), N(TPREL64LSB),
133 N(LTOFF_TPREL22), N(DTPMOD64MSB), N(DTPMOD64LSB), N(LTOFF_DTPMOD22),
134 N(DTPREL14), N(DTPREL22), N(DTPREL64I), N(DTPREL32MSB),
135 N(DTPREL32LSB), N(DTPREL64MSB), N(DTPREL64LSB), N(LTOFF_DTPREL22)
149 /* Opaque struct for insns, to protect against derefs. */
152 static inline uint64_t
153 bundle (const struct insn *insn)
155 return (uint64_t) insn & ~0xfUL;
159 slot (const struct insn *insn)
161 return (uint64_t) insn & 0x3;
165 apply_imm64 (struct module *mod, struct insn *insn, uint64_t val)
167 if (slot(insn) != 2) {
168 printk(KERN_ERR "%s: invalid slot number %d for IMM64\n",
169 mod->name, slot(insn));
172 ia64_patch_imm64((u64) insn, val);
177 apply_imm60 (struct module *mod, struct insn *insn, uint64_t val)
179 if (slot(insn) != 2) {
180 printk(KERN_ERR "%s: invalid slot number %d for IMM60\n",
181 mod->name, slot(insn));
184 if (val + ((uint64_t) 1 << 59) >= (1UL << 60)) {
185 printk(KERN_ERR "%s: value %ld out of IMM60 range\n", mod->name, (int64_t) val);
188 ia64_patch_imm60((u64) insn, val);
193 apply_imm22 (struct module *mod, struct insn *insn, uint64_t val)
195 if (val + (1 << 21) >= (1 << 22)) {
196 printk(KERN_ERR "%s: value %li out of IMM22 range\n", mod->name, (int64_t)val);
199 ia64_patch((u64) insn, 0x01fffcfe000UL, ( ((val & 0x200000UL) << 15) /* bit 21 -> 36 */
200 | ((val & 0x1f0000UL) << 6) /* bit 16 -> 22 */
201 | ((val & 0x00ff80UL) << 20) /* bit 7 -> 27 */
202 | ((val & 0x00007fUL) << 13) /* bit 0 -> 13 */));
207 apply_imm21b (struct module *mod, struct insn *insn, uint64_t val)
209 if (val + (1 << 20) >= (1 << 21)) {
210 printk(KERN_ERR "%s: value %li out of IMM21b range\n", mod->name, (int64_t)val);
213 ia64_patch((u64) insn, 0x11ffffe000UL, ( ((val & 0x100000UL) << 16) /* bit 20 -> 36 */
214 | ((val & 0x0fffffUL) << 13) /* bit 0 -> 13 */));
221 /* Three instruction bundles in PLT. */
222 unsigned char bundle[2][16];
225 static const struct plt_entry ia64_plt_template = {
228 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0 */
229 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, /* movl gp=TARGET_GP */
230 0x00, 0x00, 0x00, 0x60
233 0x05, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0 */
234 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* brl.many gp=TARGET_GP */
235 0x08, 0x00, 0x00, 0xc0
241 patch_plt (struct module *mod, struct plt_entry *plt, long target_ip, unsigned long target_gp)
243 if (apply_imm64(mod, (struct insn *) (plt->bundle[0] + 2), target_gp)
244 && apply_imm60(mod, (struct insn *) (plt->bundle[1] + 2),
245 (target_ip - (int64_t) plt->bundle[1]) / 16))
251 plt_target (struct plt_entry *plt)
253 uint64_t b0, b1, *b = (uint64_t *) plt->bundle[1];
256 b0 = b[0]; b1 = b[1];
257 off = ( ((b1 & 0x00fffff000000000UL) >> 36) /* imm20b -> bit 0 */
258 | ((b0 >> 48) << 20) | ((b1 & 0x7fffffUL) << 36) /* imm39 -> bit 20 */
259 | ((b1 & 0x0800000000000000UL) << 0)); /* i -> bit 59 */
260 return (long) plt->bundle[1] + 16*off;
266 /* Three instruction bundles in PLT. */
267 unsigned char bundle[3][16];
270 static const struct plt_entry ia64_plt_template = {
273 0x05, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0 */
274 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* movl r16=TARGET_IP */
275 0x02, 0x00, 0x00, 0x60
278 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0 */
279 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, /* movl gp=TARGET_GP */
280 0x00, 0x00, 0x00, 0x60
283 0x11, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MIB] nop.m 0 */
284 0x60, 0x80, 0x04, 0x80, 0x03, 0x00, /* mov b6=r16 */
285 0x60, 0x00, 0x80, 0x00 /* br.few b6 */
291 patch_plt (struct module *mod, struct plt_entry *plt, long target_ip, unsigned long target_gp)
293 if (apply_imm64(mod, (struct insn *) (plt->bundle[0] + 2), target_ip)
294 && apply_imm64(mod, (struct insn *) (plt->bundle[1] + 2), target_gp))
300 plt_target (struct plt_entry *plt)
302 uint64_t b0, b1, *b = (uint64_t *) plt->bundle[0];
304 b0 = b[0]; b1 = b[1];
305 return ( ((b1 & 0x000007f000000000) >> 36) /* imm7b -> bit 0 */
306 | ((b1 & 0x07fc000000000000) >> 43) /* imm9d -> bit 7 */
307 | ((b1 & 0x0003e00000000000) >> 29) /* imm5c -> bit 16 */
308 | ((b1 & 0x0000100000000000) >> 23) /* ic -> bit 21 */
309 | ((b0 >> 46) << 22) | ((b1 & 0x7fffff) << 40) /* imm41 -> bit 22 */
310 | ((b1 & 0x0800000000000000) << 4)); /* i -> bit 63 */
313 #endif /* !USE_BRL */
316 module_alloc (unsigned long size)
320 return vmalloc(size);
324 module_free (struct module *mod, void *module_region)
326 if (mod && mod->arch.init_unw_table &&
327 module_region == mod->module_init) {
328 unw_remove_unwind_table(mod->arch.init_unw_table);
329 mod->arch.init_unw_table = NULL;
331 vfree(module_region);
334 /* Have we already seen one of these relocations? */
335 /* FIXME: we could look in other sections, too --RR */
337 duplicate_reloc (const Elf64_Rela *rela, unsigned int num)
341 for (i = 0; i < num; i++) {
342 if (rela[i].r_info == rela[num].r_info && rela[i].r_addend == rela[num].r_addend)
348 /* Count how many GOT entries we may need */
350 count_gots (const Elf64_Rela *rela, unsigned int num)
352 unsigned int i, ret = 0;
354 /* Sure, this is order(n^2), but it's usually short, and not
356 for (i = 0; i < num; i++) {
357 switch (ELF64_R_TYPE(rela[i].r_info)) {
359 case R_IA64_LTOFF22X:
360 case R_IA64_LTOFF64I:
361 case R_IA64_LTOFF_FPTR22:
362 case R_IA64_LTOFF_FPTR64I:
363 case R_IA64_LTOFF_FPTR32MSB:
364 case R_IA64_LTOFF_FPTR32LSB:
365 case R_IA64_LTOFF_FPTR64MSB:
366 case R_IA64_LTOFF_FPTR64LSB:
367 if (!duplicate_reloc(rela, i))
375 /* Count how many PLT entries we may need */
377 count_plts (const Elf64_Rela *rela, unsigned int num)
379 unsigned int i, ret = 0;
381 /* Sure, this is order(n^2), but it's usually short, and not
383 for (i = 0; i < num; i++) {
384 switch (ELF64_R_TYPE(rela[i].r_info)) {
385 case R_IA64_PCREL21B:
386 case R_IA64_PLTOFF22:
387 case R_IA64_PLTOFF64I:
388 case R_IA64_PLTOFF64MSB:
389 case R_IA64_PLTOFF64LSB:
392 if (!duplicate_reloc(rela, i))
400 /* We need to create an function-descriptors for any internal function
401 which is referenced. */
403 count_fdescs (const Elf64_Rela *rela, unsigned int num)
405 unsigned int i, ret = 0;
407 /* Sure, this is order(n^2), but it's usually short, and not time critical. */
408 for (i = 0; i < num; i++) {
409 switch (ELF64_R_TYPE(rela[i].r_info)) {
411 case R_IA64_FPTR32LSB:
412 case R_IA64_FPTR32MSB:
413 case R_IA64_FPTR64LSB:
414 case R_IA64_FPTR64MSB:
415 case R_IA64_LTOFF_FPTR22:
416 case R_IA64_LTOFF_FPTR32LSB:
417 case R_IA64_LTOFF_FPTR32MSB:
418 case R_IA64_LTOFF_FPTR64I:
419 case R_IA64_LTOFF_FPTR64LSB:
420 case R_IA64_LTOFF_FPTR64MSB:
424 * Jumps to static functions sometimes go straight to their
425 * offset. Of course, that may not be possible if the jump is
426 * from init -> core or vice. versa, so we need to generate an
427 * FDESC (and PLT etc) for that.
429 case R_IA64_PCREL21B:
430 if (!duplicate_reloc(rela, i))
439 module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
442 unsigned long core_plts = 0, init_plts = 0, gots = 0, fdescs = 0;
443 Elf64_Shdr *s, *sechdrs_end = sechdrs + ehdr->e_shnum;
446 * To store the PLTs and function-descriptors, we expand the .text section for
447 * core module-code and the .init.text section for initialization code.
449 for (s = sechdrs; s < sechdrs_end; ++s)
450 if (strcmp(".core.plt", secstrings + s->sh_name) == 0)
451 mod->arch.core_plt = s;
452 else if (strcmp(".init.plt", secstrings + s->sh_name) == 0)
453 mod->arch.init_plt = s;
454 else if (strcmp(".got", secstrings + s->sh_name) == 0)
456 else if (strcmp(".opd", secstrings + s->sh_name) == 0)
458 else if (strcmp(".IA_64.unwind", secstrings + s->sh_name) == 0)
459 mod->arch.unwind = s;
461 if (!mod->arch.core_plt || !mod->arch.init_plt || !mod->arch.got || !mod->arch.opd) {
462 printk(KERN_ERR "%s: sections missing\n", mod->name);
466 /* GOT and PLTs can occur in any relocated section... */
467 for (s = sechdrs + 1; s < sechdrs_end; ++s) {
468 const Elf64_Rela *rels = (void *)ehdr + s->sh_offset;
469 unsigned long numrels = s->sh_size/sizeof(Elf64_Rela);
471 if (s->sh_type != SHT_RELA)
474 gots += count_gots(rels, numrels);
475 fdescs += count_fdescs(rels, numrels);
476 if (strstr(secstrings + s->sh_name, ".init"))
477 init_plts += count_plts(rels, numrels);
479 core_plts += count_plts(rels, numrels);
482 mod->arch.core_plt->sh_type = SHT_NOBITS;
483 mod->arch.core_plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
484 mod->arch.core_plt->sh_addralign = 16;
485 mod->arch.core_plt->sh_size = core_plts * sizeof(struct plt_entry);
486 mod->arch.init_plt->sh_type = SHT_NOBITS;
487 mod->arch.init_plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
488 mod->arch.init_plt->sh_addralign = 16;
489 mod->arch.init_plt->sh_size = init_plts * sizeof(struct plt_entry);
490 mod->arch.got->sh_type = SHT_NOBITS;
491 mod->arch.got->sh_flags = ARCH_SHF_SMALL | SHF_ALLOC;
492 mod->arch.got->sh_addralign = 8;
493 mod->arch.got->sh_size = gots * sizeof(struct got_entry);
494 mod->arch.opd->sh_type = SHT_NOBITS;
495 mod->arch.opd->sh_flags = SHF_ALLOC;
496 mod->arch.opd->sh_addralign = 8;
497 mod->arch.opd->sh_size = fdescs * sizeof(struct fdesc);
498 DEBUGP("%s: core.plt=%lx, init.plt=%lx, got=%lx, fdesc=%lx\n",
499 __func__, mod->arch.core_plt->sh_size, mod->arch.init_plt->sh_size,
500 mod->arch.got->sh_size, mod->arch.opd->sh_size);
505 in_init (const struct module *mod, uint64_t addr)
507 return addr - (uint64_t) mod->module_init < mod->init_size;
511 in_core (const struct module *mod, uint64_t addr)
513 return addr - (uint64_t) mod->module_core < mod->core_size;
517 is_internal (const struct module *mod, uint64_t value)
519 return in_init(mod, value) || in_core(mod, value);
523 * Get gp-relative offset for the linkage-table entry of VALUE.
526 get_ltoff (struct module *mod, uint64_t value, int *okp)
528 struct got_entry *got, *e;
533 got = (void *) mod->arch.got->sh_addr;
534 for (e = got; e < got + mod->arch.next_got_entry; ++e)
538 /* Not enough GOT entries? */
539 if (e >= (struct got_entry *) (mod->arch.got->sh_addr + mod->arch.got->sh_size))
543 ++mod->arch.next_got_entry;
545 return (uint64_t) e - mod->arch.gp;
549 gp_addressable (struct module *mod, uint64_t value)
551 return value - mod->arch.gp + MAX_LTOFF/2 < MAX_LTOFF;
554 /* Get PC-relative PLT entry for this value. Returns 0 on failure. */
556 get_plt (struct module *mod, const struct insn *insn, uint64_t value, int *okp)
558 struct plt_entry *plt, *plt_end;
559 uint64_t target_ip, target_gp;
564 if (in_init(mod, (uint64_t) insn)) {
565 plt = (void *) mod->arch.init_plt->sh_addr;
566 plt_end = (void *) plt + mod->arch.init_plt->sh_size;
568 plt = (void *) mod->arch.core_plt->sh_addr;
569 plt_end = (void *) plt + mod->arch.core_plt->sh_size;
572 /* "value" is a pointer to a function-descriptor; fetch the target ip/gp from it: */
573 target_ip = ((uint64_t *) value)[0];
574 target_gp = ((uint64_t *) value)[1];
576 /* Look for existing PLT entry. */
577 while (plt->bundle[0][0]) {
578 if (plt_target(plt) == target_ip)
580 if (++plt >= plt_end)
583 *plt = ia64_plt_template;
584 if (!patch_plt(mod, plt, target_ip, target_gp)) {
588 #if ARCH_MODULE_DEBUG
589 if (plt_target(plt) != target_ip) {
590 printk("%s: mistargeted PLT: wanted %lx, got %lx\n",
591 __func__, target_ip, plt_target(plt));
597 return (uint64_t) plt;
600 /* Get function descriptor for VALUE. */
602 get_fdesc (struct module *mod, uint64_t value, int *okp)
604 struct fdesc *fdesc = (void *) mod->arch.opd->sh_addr;
610 printk(KERN_ERR "%s: fdesc for zero requested!\n", mod->name);
614 if (!is_internal(mod, value))
616 * If it's not a module-local entry-point, "value" already points to a
617 * function-descriptor.
621 /* Look for existing function descriptor. */
623 if (fdesc->ip == value)
624 return (uint64_t)fdesc;
625 if ((uint64_t) ++fdesc >= mod->arch.opd->sh_addr + mod->arch.opd->sh_size)
631 fdesc->gp = mod->arch.gp;
632 return (uint64_t) fdesc;
636 do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
637 Elf64_Shdr *sec, void *location)
639 enum reloc_target_format format = (r_type >> FORMAT_SHIFT) & FORMAT_MASK;
640 enum reloc_value_formula formula = (r_type >> VALUE_SHIFT) & VALUE_MASK;
644 val = sym->st_value + addend;
647 case RV_SEGREL: /* segment base is arbitrarily chosen to be 0 for kernel modules */
651 case RV_GPREL: val -= mod->arch.gp; break;
652 case RV_LTREL: val = get_ltoff(mod, val, &ok); break;
653 case RV_PLTREL: val = get_plt(mod, location, val, &ok); break;
654 case RV_FPTR: val = get_fdesc(mod, val, &ok); break;
655 case RV_SECREL: val -= sec->sh_addr; break;
656 case RV_LTREL_FPTR: val = get_ltoff(mod, get_fdesc(mod, val, &ok), &ok); break;
660 case R_IA64_PCREL21B:
661 if ((in_init(mod, val) && in_core(mod, (uint64_t)location)) ||
662 (in_core(mod, val) && in_init(mod, (uint64_t)location))) {
664 * Init section may have been allocated far away from core,
665 * if the branch won't reach, then allocate a plt for it.
667 uint64_t delta = ((int64_t)val - (int64_t)location) / 16;
668 if (delta + (1 << 20) >= (1 << 21)) {
669 val = get_fdesc(mod, val, &ok);
670 val = get_plt(mod, location, val, &ok);
672 } else if (!is_internal(mod, val))
673 val = get_plt(mod, location, val, &ok);
676 val -= bundle(location);
679 case R_IA64_PCREL32MSB:
680 case R_IA64_PCREL32LSB:
681 case R_IA64_PCREL64MSB:
682 case R_IA64_PCREL64LSB:
683 val -= (uint64_t) location;
688 case R_IA64_PCREL60B: format = RF_INSN60; break;
689 case R_IA64_PCREL21B: format = RF_INSN21B; break;
690 case R_IA64_PCREL21M: format = RF_INSN21M; break;
691 case R_IA64_PCREL21F: format = RF_INSN21F; break;
697 val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
701 /* can link-time value relocs happen here? */
706 if (r_type == R_IA64_PCREL21BI) {
707 if (!is_internal(mod, val)) {
708 printk(KERN_ERR "%s: %s reloc against non-local symbol (%lx)\n",
709 __func__, reloc_name[r_type], val);
714 val -= bundle(location);
721 val = get_fdesc(mod, get_plt(mod, location, val, &ok), &ok);
723 if (r_type == R_IA64_IPLTMSB)
728 val = addend - sym->st_value;
732 case R_IA64_LTOFF22X:
733 if (gp_addressable(mod, val))
736 val = get_ltoff(mod, val, &ok);
741 if (gp_addressable(mod, val)) {
742 /* turn "ld8" into "mov": */
743 DEBUGP("%s: patching ld8 at %p to mov\n", __func__, location);
744 ia64_patch((u64) location, 0x1fff80fe000UL, 0x10000000000UL);
749 if (reloc_name[r_type])
750 printk(KERN_ERR "%s: special reloc %s not supported",
751 mod->name, reloc_name[r_type]);
753 printk(KERN_ERR "%s: unknown special reloc %x\n",
762 case RV_LTREL_DTPMOD:
764 case RV_LTREL_DTPREL:
765 printk(KERN_ERR "%s: %s reloc not supported\n",
766 mod->name, reloc_name[r_type] ? reloc_name[r_type] : "?");
770 printk(KERN_ERR "%s: unknown reloc %x\n", mod->name, r_type);
777 DEBUGP("%s: [%p]<-%016lx = %s(%lx)\n", __func__, location, val,
778 reloc_name[r_type] ? reloc_name[r_type] : "?", sym->st_value + addend);
781 case RF_INSN21B: ok = apply_imm21b(mod, location, (int64_t) val / 16); break;
782 case RF_INSN22: ok = apply_imm22(mod, location, val); break;
783 case RF_INSN64: ok = apply_imm64(mod, location, val); break;
784 case RF_INSN60: ok = apply_imm60(mod, location, (int64_t) val / 16); break;
785 case RF_32LSB: put_unaligned(val, (uint32_t *) location); break;
786 case RF_64LSB: put_unaligned(val, (uint64_t *) location); break;
787 case RF_32MSB: /* ia64 Linux is little-endian... */
788 case RF_64MSB: /* ia64 Linux is little-endian... */
789 case RF_INSN14: /* must be within-module, i.e., resolved by "ld -r" */
790 case RF_INSN21M: /* must be within-module, i.e., resolved by "ld -r" */
791 case RF_INSN21F: /* must be within-module, i.e., resolved by "ld -r" */
792 printk(KERN_ERR "%s: format %u needed by %s reloc is not supported\n",
793 mod->name, format, reloc_name[r_type] ? reloc_name[r_type] : "?");
797 printk(KERN_ERR "%s: relocation %s resulted in unknown format %u\n",
798 mod->name, reloc_name[r_type] ? reloc_name[r_type] : "?", format);
801 return ok ? 0 : -ENOEXEC;
805 apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symindex,
806 unsigned int relsec, struct module *mod)
808 unsigned int i, n = sechdrs[relsec].sh_size / sizeof(Elf64_Rela);
809 Elf64_Rela *rela = (void *) sechdrs[relsec].sh_addr;
810 Elf64_Shdr *target_sec;
813 DEBUGP("%s: applying section %u (%u relocs) to %u\n", __func__,
814 relsec, n, sechdrs[relsec].sh_info);
816 target_sec = sechdrs + sechdrs[relsec].sh_info;
818 if (target_sec->sh_entsize == ~0UL)
820 * If target section wasn't allocated, we don't need to relocate it.
821 * Happens, e.g., for debug sections.
827 * XXX Should have an arch-hook for running this after final section
828 * addresses have been selected...
831 if (mod->core_size > MAX_LTOFF)
833 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
834 * at the end of the module.
836 gp = mod->core_size - MAX_LTOFF / 2;
838 gp = mod->core_size / 2;
839 gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
841 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
844 for (i = 0; i < n; i++) {
845 ret = do_reloc(mod, ELF64_R_TYPE(rela[i].r_info),
846 ((Elf64_Sym *) sechdrs[symindex].sh_addr
847 + ELF64_R_SYM(rela[i].r_info)),
848 rela[i].r_addend, target_sec,
849 (void *) target_sec->sh_addr + rela[i].r_offset);
857 apply_relocate (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symindex,
858 unsigned int relsec, struct module *mod)
860 printk(KERN_ERR "module %s: REL relocs in section %u unsupported\n", mod->name, relsec);
865 * Modules contain a single unwind table which covers both the core and the init text
866 * sections but since the two are not contiguous, we need to split this table up such that
867 * we can register (and unregister) each "segment" separately. Fortunately, this sounds
868 * more complicated than it really is.
871 register_unwind_table (struct module *mod)
873 struct unw_table_entry *start = (void *) mod->arch.unwind->sh_addr;
874 struct unw_table_entry *end = start + mod->arch.unwind->sh_size / sizeof (*start);
875 struct unw_table_entry tmp, *e1, *e2, *core, *init;
876 unsigned long num_init = 0, num_core = 0;
878 /* First, count how many init and core unwind-table entries there are. */
879 for (e1 = start; e1 < end; ++e1)
880 if (in_init(mod, e1->start_offset))
885 * Second, sort the table such that all unwind-table entries for the init and core
886 * text sections are nicely separated. We do this with a stupid bubble sort
887 * (unwind tables don't get ridiculously huge).
889 for (e1 = start; e1 < end; ++e1) {
890 for (e2 = e1 + 1; e2 < end; ++e2) {
891 if (e2->start_offset < e1->start_offset) {
899 * Third, locate the init and core segments in the unwind table:
901 if (in_init(mod, start->start_offset)) {
903 core = start + num_init;
906 init = start + num_core;
909 DEBUGP("%s: name=%s, gp=%lx, num_init=%lu, num_core=%lu\n", __func__,
910 mod->name, mod->arch.gp, num_init, num_core);
913 * Fourth, register both tables (if not empty).
916 mod->arch.core_unw_table = unw_add_unwind_table(mod->name, 0, mod->arch.gp,
917 core, core + num_core);
918 DEBUGP("%s: core: handle=%p [%p-%p)\n", __func__,
919 mod->arch.core_unw_table, core, core + num_core);
922 mod->arch.init_unw_table = unw_add_unwind_table(mod->name, 0, mod->arch.gp,
923 init, init + num_init);
924 DEBUGP("%s: init: handle=%p [%p-%p)\n", __func__,
925 mod->arch.init_unw_table, init, init + num_init);
930 module_finalize (const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mod)
932 DEBUGP("%s: init: entry=%p\n", __func__, mod->init);
933 if (mod->arch.unwind)
934 register_unwind_table(mod);
939 module_arch_cleanup (struct module *mod)
941 if (mod->arch.init_unw_table)
942 unw_remove_unwind_table(mod->arch.init_unw_table);
943 if (mod->arch.core_unw_table)
944 unw_remove_unwind_table(mod->arch.core_unw_table);
947 void *dereference_function_descriptor(void *ptr)
949 struct fdesc *desc = ptr;
952 if (!probe_kernel_address(&desc->ip, p))