2 * IA-64-specific support for kernel module loader.
4 * Copyright (C) 2003 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com>
7 * Loosely based on patch by Rusty Russell.
10 /* relocs tested so far:
21 PCREL21B (for br.call only; br.cond is not supported out of modules!)
22 PCREL60B (for brl.cond only; brl.call is not supported for modules!)
29 #include <linux/kernel.h>
30 #include <linux/sched.h>
31 #include <linux/elf.h>
32 #include <linux/moduleloader.h>
33 #include <linux/string.h>
34 #include <linux/vmalloc.h>
36 #include <asm/patch.h>
37 #include <asm/unaligned.h>
39 #define ARCH_MODULE_DEBUG 0
42 # define DEBUGP printk
45 # define DEBUGP(fmt , a...)
54 #define MAX_LTOFF ((uint64_t) (1 << 22)) /* max. allowable linkage-table offset */
56 /* Define some relocation helper macros/types: */
58 #define FORMAT_SHIFT 0
60 #define FORMAT_MASK ((1 << FORMAT_BITS) - 1)
63 #define VALUE_MASK ((1 << VALUE_BITS) - 1)
65 enum reloc_target_format {
66 /* direct encoded formats: */
76 /* formats that cannot be directly decoded: */
78 RF_INSN21B, /* imm21 form 1 */
79 RF_INSN21M, /* imm21 form 2 */
80 RF_INSN21F /* imm21 form 3 */
83 enum reloc_value_formula {
84 RV_DIRECT = 4, /* S + A */
85 RV_GPREL = 5, /* @gprel(S + A) */
86 RV_LTREL = 6, /* @ltoff(S + A) */
87 RV_PLTREL = 7, /* @pltoff(S + A) */
88 RV_FPTR = 8, /* @fptr(S + A) */
89 RV_PCREL = 9, /* S + A - P */
90 RV_LTREL_FPTR = 10, /* @ltoff(@fptr(S + A)) */
91 RV_SEGREL = 11, /* @segrel(S + A) */
92 RV_SECREL = 12, /* @secrel(S + A) */
93 RV_BDREL = 13, /* BD + A */
94 RV_LTV = 14, /* S + A (like RV_DIRECT, except frozen at static link-time) */
95 RV_PCREL2 = 15, /* S + A - P */
96 RV_SPECIAL = 16, /* various (see below) */
98 RV_TPREL = 18, /* @tprel(S + A) */
99 RV_LTREL_TPREL = 19, /* @ltoff(@tprel(S + A)) */
100 RV_DTPMOD = 20, /* @dtpmod(S + A) */
101 RV_LTREL_DTPMOD = 21, /* @ltoff(@dtpmod(S + A)) */
102 RV_DTPREL = 22, /* @dtprel(S + A) */
103 RV_LTREL_DTPREL = 23, /* @ltoff(@dtprel(S + A)) */
108 /* 28-31 reserved for implementation-specific purposes. */
111 #define N(reloc) [R_IA64_##reloc] = #reloc
113 static const char *reloc_name[256] = {
114 N(NONE), N(IMM14), N(IMM22), N(IMM64),
115 N(DIR32MSB), N(DIR32LSB), N(DIR64MSB), N(DIR64LSB),
116 N(GPREL22), N(GPREL64I), N(GPREL32MSB), N(GPREL32LSB),
117 N(GPREL64MSB), N(GPREL64LSB), N(LTOFF22), N(LTOFF64I),
118 N(PLTOFF22), N(PLTOFF64I), N(PLTOFF64MSB), N(PLTOFF64LSB),
119 N(FPTR64I), N(FPTR32MSB), N(FPTR32LSB), N(FPTR64MSB),
120 N(FPTR64LSB), N(PCREL60B), N(PCREL21B), N(PCREL21M),
121 N(PCREL21F), N(PCREL32MSB), N(PCREL32LSB), N(PCREL64MSB),
122 N(PCREL64LSB), N(LTOFF_FPTR22), N(LTOFF_FPTR64I), N(LTOFF_FPTR32MSB),
123 N(LTOFF_FPTR32LSB), N(LTOFF_FPTR64MSB), N(LTOFF_FPTR64LSB), N(SEGREL32MSB),
124 N(SEGREL32LSB), N(SEGREL64MSB), N(SEGREL64LSB), N(SECREL32MSB),
125 N(SECREL32LSB), N(SECREL64MSB), N(SECREL64LSB), N(REL32MSB),
126 N(REL32LSB), N(REL64MSB), N(REL64LSB), N(LTV32MSB),
127 N(LTV32LSB), N(LTV64MSB), N(LTV64LSB), N(PCREL21BI),
128 N(PCREL22), N(PCREL64I), N(IPLTMSB), N(IPLTLSB),
129 N(COPY), N(LTOFF22X), N(LDXMOV), N(TPREL14),
130 N(TPREL22), N(TPREL64I), N(TPREL64MSB), N(TPREL64LSB),
131 N(LTOFF_TPREL22), N(DTPMOD64MSB), N(DTPMOD64LSB), N(LTOFF_DTPMOD22),
132 N(DTPREL14), N(DTPREL22), N(DTPREL64I), N(DTPREL32MSB),
133 N(DTPREL32LSB), N(DTPREL64MSB), N(DTPREL64LSB), N(LTOFF_DTPREL22)
138 /* Opaque struct for insns, to protect against derefs. */
141 static inline uint64_t
142 bundle (const struct insn *insn)
144 return (uint64_t) insn & ~0xfUL;
148 slot (const struct insn *insn)
150 return (uint64_t) insn & 0x3;
154 apply_imm64 (struct module *mod, struct insn *insn, uint64_t val)
156 if (slot(insn) != 2) {
157 printk(KERN_ERR "%s: invalid slot number %d for IMM64\n",
158 mod->name, slot(insn));
161 ia64_patch_imm64((u64) insn, val);
166 apply_imm60 (struct module *mod, struct insn *insn, uint64_t val)
168 if (slot(insn) != 2) {
169 printk(KERN_ERR "%s: invalid slot number %d for IMM60\n",
170 mod->name, slot(insn));
173 if (val + ((uint64_t) 1 << 59) >= (1UL << 60)) {
174 printk(KERN_ERR "%s: value %ld out of IMM60 range\n", mod->name, (int64_t) val);
177 ia64_patch_imm60((u64) insn, val);
182 apply_imm22 (struct module *mod, struct insn *insn, uint64_t val)
184 if (val + (1 << 21) >= (1 << 22)) {
185 printk(KERN_ERR "%s: value %li out of IMM22 range\n", mod->name, (int64_t)val);
188 ia64_patch((u64) insn, 0x01fffcfe000UL, ( ((val & 0x200000UL) << 15) /* bit 21 -> 36 */
189 | ((val & 0x1f0000UL) << 6) /* bit 16 -> 22 */
190 | ((val & 0x00ff80UL) << 20) /* bit 7 -> 27 */
191 | ((val & 0x00007fUL) << 13) /* bit 0 -> 13 */));
196 apply_imm21b (struct module *mod, struct insn *insn, uint64_t val)
198 if (val + (1 << 20) >= (1 << 21)) {
199 printk(KERN_ERR "%s: value %li out of IMM21b range\n", mod->name, (int64_t)val);
202 ia64_patch((u64) insn, 0x11ffffe000UL, ( ((val & 0x100000UL) << 16) /* bit 20 -> 36 */
203 | ((val & 0x0fffffUL) << 13) /* bit 0 -> 13 */));
210 /* Three instruction bundles in PLT. */
211 unsigned char bundle[2][16];
214 static const struct plt_entry ia64_plt_template = {
217 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0 */
218 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, /* movl gp=TARGET_GP */
219 0x00, 0x00, 0x00, 0x60
222 0x05, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0 */
223 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* brl.many gp=TARGET_GP */
224 0x08, 0x00, 0x00, 0xc0
230 patch_plt (struct module *mod, struct plt_entry *plt, long target_ip, unsigned long target_gp)
232 if (apply_imm64(mod, (struct insn *) (plt->bundle[0] + 2), target_gp)
233 && apply_imm60(mod, (struct insn *) (plt->bundle[1] + 2),
234 (target_ip - (int64_t) plt->bundle[1]) / 16))
240 plt_target (struct plt_entry *plt)
242 uint64_t b0, b1, *b = (uint64_t *) plt->bundle[1];
245 b0 = b[0]; b1 = b[1];
246 off = ( ((b1 & 0x00fffff000000000UL) >> 36) /* imm20b -> bit 0 */
247 | ((b0 >> 48) << 20) | ((b1 & 0x7fffffUL) << 36) /* imm39 -> bit 20 */
248 | ((b1 & 0x0800000000000000UL) << 0)); /* i -> bit 59 */
249 return (long) plt->bundle[1] + 16*off;
255 /* Three instruction bundles in PLT. */
256 unsigned char bundle[3][16];
259 static const struct plt_entry ia64_plt_template = {
262 0x05, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0 */
263 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* movl r16=TARGET_IP */
264 0x02, 0x00, 0x00, 0x60
267 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0 */
268 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, /* movl gp=TARGET_GP */
269 0x00, 0x00, 0x00, 0x60
272 0x11, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MIB] nop.m 0 */
273 0x60, 0x80, 0x04, 0x80, 0x03, 0x00, /* mov b6=r16 */
274 0x60, 0x00, 0x80, 0x00 /* br.few b6 */
280 patch_plt (struct module *mod, struct plt_entry *plt, long target_ip, unsigned long target_gp)
282 if (apply_imm64(mod, (struct insn *) (plt->bundle[0] + 2), target_ip)
283 && apply_imm64(mod, (struct insn *) (plt->bundle[1] + 2), target_gp))
289 plt_target (struct plt_entry *plt)
291 uint64_t b0, b1, *b = (uint64_t *) plt->bundle[0];
293 b0 = b[0]; b1 = b[1];
294 return ( ((b1 & 0x000007f000000000) >> 36) /* imm7b -> bit 0 */
295 | ((b1 & 0x07fc000000000000) >> 43) /* imm9d -> bit 7 */
296 | ((b1 & 0x0003e00000000000) >> 29) /* imm5c -> bit 16 */
297 | ((b1 & 0x0000100000000000) >> 23) /* ic -> bit 21 */
298 | ((b0 >> 46) << 22) | ((b1 & 0x7fffff) << 40) /* imm41 -> bit 22 */
299 | ((b1 & 0x0800000000000000) << 4)); /* i -> bit 63 */
302 #endif /* !USE_BRL */
305 module_alloc (unsigned long size)
309 return vmalloc(size);
313 module_free (struct module *mod, void *module_region)
315 if (mod && mod->arch.init_unw_table &&
316 module_region == mod->module_init) {
317 unw_remove_unwind_table(mod->arch.init_unw_table);
318 mod->arch.init_unw_table = NULL;
320 vfree(module_region);
323 /* Have we already seen one of these relocations? */
324 /* FIXME: we could look in other sections, too --RR */
326 duplicate_reloc (const Elf64_Rela *rela, unsigned int num)
330 for (i = 0; i < num; i++) {
331 if (rela[i].r_info == rela[num].r_info && rela[i].r_addend == rela[num].r_addend)
337 /* Count how many GOT entries we may need */
339 count_gots (const Elf64_Rela *rela, unsigned int num)
341 unsigned int i, ret = 0;
343 /* Sure, this is order(n^2), but it's usually short, and not
345 for (i = 0; i < num; i++) {
346 switch (ELF64_R_TYPE(rela[i].r_info)) {
348 case R_IA64_LTOFF22X:
349 case R_IA64_LTOFF64I:
350 case R_IA64_LTOFF_FPTR22:
351 case R_IA64_LTOFF_FPTR64I:
352 case R_IA64_LTOFF_FPTR32MSB:
353 case R_IA64_LTOFF_FPTR32LSB:
354 case R_IA64_LTOFF_FPTR64MSB:
355 case R_IA64_LTOFF_FPTR64LSB:
356 if (!duplicate_reloc(rela, i))
364 /* Count how many PLT entries we may need */
366 count_plts (const Elf64_Rela *rela, unsigned int num)
368 unsigned int i, ret = 0;
370 /* Sure, this is order(n^2), but it's usually short, and not
372 for (i = 0; i < num; i++) {
373 switch (ELF64_R_TYPE(rela[i].r_info)) {
374 case R_IA64_PCREL21B:
375 case R_IA64_PLTOFF22:
376 case R_IA64_PLTOFF64I:
377 case R_IA64_PLTOFF64MSB:
378 case R_IA64_PLTOFF64LSB:
381 if (!duplicate_reloc(rela, i))
389 /* We need to create an function-descriptors for any internal function
390 which is referenced. */
392 count_fdescs (const Elf64_Rela *rela, unsigned int num)
394 unsigned int i, ret = 0;
396 /* Sure, this is order(n^2), but it's usually short, and not time critical. */
397 for (i = 0; i < num; i++) {
398 switch (ELF64_R_TYPE(rela[i].r_info)) {
400 case R_IA64_FPTR32LSB:
401 case R_IA64_FPTR32MSB:
402 case R_IA64_FPTR64LSB:
403 case R_IA64_FPTR64MSB:
404 case R_IA64_LTOFF_FPTR22:
405 case R_IA64_LTOFF_FPTR32LSB:
406 case R_IA64_LTOFF_FPTR32MSB:
407 case R_IA64_LTOFF_FPTR64I:
408 case R_IA64_LTOFF_FPTR64LSB:
409 case R_IA64_LTOFF_FPTR64MSB:
413 * Jumps to static functions sometimes go straight to their
414 * offset. Of course, that may not be possible if the jump is
415 * from init -> core or vice. versa, so we need to generate an
416 * FDESC (and PLT etc) for that.
418 case R_IA64_PCREL21B:
419 if (!duplicate_reloc(rela, i))
428 module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
431 unsigned long core_plts = 0, init_plts = 0, gots = 0, fdescs = 0;
432 Elf64_Shdr *s, *sechdrs_end = sechdrs + ehdr->e_shnum;
435 * To store the PLTs and function-descriptors, we expand the .text section for
436 * core module-code and the .init.text section for initialization code.
438 for (s = sechdrs; s < sechdrs_end; ++s)
439 if (strcmp(".core.plt", secstrings + s->sh_name) == 0)
440 mod->arch.core_plt = s;
441 else if (strcmp(".init.plt", secstrings + s->sh_name) == 0)
442 mod->arch.init_plt = s;
443 else if (strcmp(".got", secstrings + s->sh_name) == 0)
445 else if (strcmp(".opd", secstrings + s->sh_name) == 0)
447 else if (strcmp(".IA_64.unwind", secstrings + s->sh_name) == 0)
448 mod->arch.unwind = s;
449 #ifdef CONFIG_PARAVIRT
450 else if (strcmp(".paravirt_bundles",
451 secstrings + s->sh_name) == 0)
452 mod->arch.paravirt_bundles = s;
453 else if (strcmp(".paravirt_insts",
454 secstrings + s->sh_name) == 0)
455 mod->arch.paravirt_insts = s;
458 if (!mod->arch.core_plt || !mod->arch.init_plt || !mod->arch.got || !mod->arch.opd) {
459 printk(KERN_ERR "%s: sections missing\n", mod->name);
463 /* GOT and PLTs can occur in any relocated section... */
464 for (s = sechdrs + 1; s < sechdrs_end; ++s) {
465 const Elf64_Rela *rels = (void *)ehdr + s->sh_offset;
466 unsigned long numrels = s->sh_size/sizeof(Elf64_Rela);
468 if (s->sh_type != SHT_RELA)
471 gots += count_gots(rels, numrels);
472 fdescs += count_fdescs(rels, numrels);
473 if (strstr(secstrings + s->sh_name, ".init"))
474 init_plts += count_plts(rels, numrels);
476 core_plts += count_plts(rels, numrels);
479 mod->arch.core_plt->sh_type = SHT_NOBITS;
480 mod->arch.core_plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
481 mod->arch.core_plt->sh_addralign = 16;
482 mod->arch.core_plt->sh_size = core_plts * sizeof(struct plt_entry);
483 mod->arch.init_plt->sh_type = SHT_NOBITS;
484 mod->arch.init_plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
485 mod->arch.init_plt->sh_addralign = 16;
486 mod->arch.init_plt->sh_size = init_plts * sizeof(struct plt_entry);
487 mod->arch.got->sh_type = SHT_NOBITS;
488 mod->arch.got->sh_flags = ARCH_SHF_SMALL | SHF_ALLOC;
489 mod->arch.got->sh_addralign = 8;
490 mod->arch.got->sh_size = gots * sizeof(struct got_entry);
491 mod->arch.opd->sh_type = SHT_NOBITS;
492 mod->arch.opd->sh_flags = SHF_ALLOC;
493 mod->arch.opd->sh_addralign = 8;
494 mod->arch.opd->sh_size = fdescs * sizeof(struct fdesc);
495 DEBUGP("%s: core.plt=%lx, init.plt=%lx, got=%lx, fdesc=%lx\n",
496 __func__, mod->arch.core_plt->sh_size, mod->arch.init_plt->sh_size,
497 mod->arch.got->sh_size, mod->arch.opd->sh_size);
502 in_init (const struct module *mod, uint64_t addr)
504 return addr - (uint64_t) mod->module_init < mod->init_size;
508 in_core (const struct module *mod, uint64_t addr)
510 return addr - (uint64_t) mod->module_core < mod->core_size;
514 is_internal (const struct module *mod, uint64_t value)
516 return in_init(mod, value) || in_core(mod, value);
520 * Get gp-relative offset for the linkage-table entry of VALUE.
523 get_ltoff (struct module *mod, uint64_t value, int *okp)
525 struct got_entry *got, *e;
530 got = (void *) mod->arch.got->sh_addr;
531 for (e = got; e < got + mod->arch.next_got_entry; ++e)
535 /* Not enough GOT entries? */
536 BUG_ON(e >= (struct got_entry *) (mod->arch.got->sh_addr + mod->arch.got->sh_size));
539 ++mod->arch.next_got_entry;
541 return (uint64_t) e - mod->arch.gp;
545 gp_addressable (struct module *mod, uint64_t value)
547 return value - mod->arch.gp + MAX_LTOFF/2 < MAX_LTOFF;
550 /* Get PC-relative PLT entry for this value. Returns 0 on failure. */
552 get_plt (struct module *mod, const struct insn *insn, uint64_t value, int *okp)
554 struct plt_entry *plt, *plt_end;
555 uint64_t target_ip, target_gp;
560 if (in_init(mod, (uint64_t) insn)) {
561 plt = (void *) mod->arch.init_plt->sh_addr;
562 plt_end = (void *) plt + mod->arch.init_plt->sh_size;
564 plt = (void *) mod->arch.core_plt->sh_addr;
565 plt_end = (void *) plt + mod->arch.core_plt->sh_size;
568 /* "value" is a pointer to a function-descriptor; fetch the target ip/gp from it: */
569 target_ip = ((uint64_t *) value)[0];
570 target_gp = ((uint64_t *) value)[1];
572 /* Look for existing PLT entry. */
573 while (plt->bundle[0][0]) {
574 if (plt_target(plt) == target_ip)
576 if (++plt >= plt_end)
579 *plt = ia64_plt_template;
580 if (!patch_plt(mod, plt, target_ip, target_gp)) {
584 #if ARCH_MODULE_DEBUG
585 if (plt_target(plt) != target_ip) {
586 printk("%s: mistargeted PLT: wanted %lx, got %lx\n",
587 __func__, target_ip, plt_target(plt));
593 return (uint64_t) plt;
596 /* Get function descriptor for VALUE. */
598 get_fdesc (struct module *mod, uint64_t value, int *okp)
600 struct fdesc *fdesc = (void *) mod->arch.opd->sh_addr;
606 printk(KERN_ERR "%s: fdesc for zero requested!\n", mod->name);
610 if (!is_internal(mod, value))
612 * If it's not a module-local entry-point, "value" already points to a
613 * function-descriptor.
617 /* Look for existing function descriptor. */
619 if (fdesc->ip == value)
620 return (uint64_t)fdesc;
621 if ((uint64_t) ++fdesc >= mod->arch.opd->sh_addr + mod->arch.opd->sh_size)
627 fdesc->gp = mod->arch.gp;
628 return (uint64_t) fdesc;
632 do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
633 Elf64_Shdr *sec, void *location)
635 enum reloc_target_format format = (r_type >> FORMAT_SHIFT) & FORMAT_MASK;
636 enum reloc_value_formula formula = (r_type >> VALUE_SHIFT) & VALUE_MASK;
640 val = sym->st_value + addend;
643 case RV_SEGREL: /* segment base is arbitrarily chosen to be 0 for kernel modules */
647 case RV_GPREL: val -= mod->arch.gp; break;
648 case RV_LTREL: val = get_ltoff(mod, val, &ok); break;
649 case RV_PLTREL: val = get_plt(mod, location, val, &ok); break;
650 case RV_FPTR: val = get_fdesc(mod, val, &ok); break;
651 case RV_SECREL: val -= sec->sh_addr; break;
652 case RV_LTREL_FPTR: val = get_ltoff(mod, get_fdesc(mod, val, &ok), &ok); break;
656 case R_IA64_PCREL21B:
657 if ((in_init(mod, val) && in_core(mod, (uint64_t)location)) ||
658 (in_core(mod, val) && in_init(mod, (uint64_t)location))) {
660 * Init section may have been allocated far away from core,
661 * if the branch won't reach, then allocate a plt for it.
663 uint64_t delta = ((int64_t)val - (int64_t)location) / 16;
664 if (delta + (1 << 20) >= (1 << 21)) {
665 val = get_fdesc(mod, val, &ok);
666 val = get_plt(mod, location, val, &ok);
668 } else if (!is_internal(mod, val))
669 val = get_plt(mod, location, val, &ok);
672 val -= bundle(location);
675 case R_IA64_PCREL32MSB:
676 case R_IA64_PCREL32LSB:
677 case R_IA64_PCREL64MSB:
678 case R_IA64_PCREL64LSB:
679 val -= (uint64_t) location;
684 case R_IA64_PCREL60B: format = RF_INSN60; break;
685 case R_IA64_PCREL21B: format = RF_INSN21B; break;
686 case R_IA64_PCREL21M: format = RF_INSN21M; break;
687 case R_IA64_PCREL21F: format = RF_INSN21F; break;
693 val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
697 /* can link-time value relocs happen here? */
702 if (r_type == R_IA64_PCREL21BI) {
703 if (!is_internal(mod, val)) {
704 printk(KERN_ERR "%s: %s reloc against non-local symbol (%lx)\n",
705 __func__, reloc_name[r_type], val);
710 val -= bundle(location);
717 val = get_fdesc(mod, get_plt(mod, location, val, &ok), &ok);
719 if (r_type == R_IA64_IPLTMSB)
724 val = addend - sym->st_value;
728 case R_IA64_LTOFF22X:
729 if (gp_addressable(mod, val))
732 val = get_ltoff(mod, val, &ok);
737 if (gp_addressable(mod, val)) {
738 /* turn "ld8" into "mov": */
739 DEBUGP("%s: patching ld8 at %p to mov\n", __func__, location);
740 ia64_patch((u64) location, 0x1fff80fe000UL, 0x10000000000UL);
745 if (reloc_name[r_type])
746 printk(KERN_ERR "%s: special reloc %s not supported",
747 mod->name, reloc_name[r_type]);
749 printk(KERN_ERR "%s: unknown special reloc %x\n",
758 case RV_LTREL_DTPMOD:
760 case RV_LTREL_DTPREL:
761 printk(KERN_ERR "%s: %s reloc not supported\n",
762 mod->name, reloc_name[r_type] ? reloc_name[r_type] : "?");
766 printk(KERN_ERR "%s: unknown reloc %x\n", mod->name, r_type);
773 DEBUGP("%s: [%p]<-%016lx = %s(%lx)\n", __func__, location, val,
774 reloc_name[r_type] ? reloc_name[r_type] : "?", sym->st_value + addend);
777 case RF_INSN21B: ok = apply_imm21b(mod, location, (int64_t) val / 16); break;
778 case RF_INSN22: ok = apply_imm22(mod, location, val); break;
779 case RF_INSN64: ok = apply_imm64(mod, location, val); break;
780 case RF_INSN60: ok = apply_imm60(mod, location, (int64_t) val / 16); break;
781 case RF_32LSB: put_unaligned(val, (uint32_t *) location); break;
782 case RF_64LSB: put_unaligned(val, (uint64_t *) location); break;
783 case RF_32MSB: /* ia64 Linux is little-endian... */
784 case RF_64MSB: /* ia64 Linux is little-endian... */
785 case RF_INSN14: /* must be within-module, i.e., resolved by "ld -r" */
786 case RF_INSN21M: /* must be within-module, i.e., resolved by "ld -r" */
787 case RF_INSN21F: /* must be within-module, i.e., resolved by "ld -r" */
788 printk(KERN_ERR "%s: format %u needed by %s reloc is not supported\n",
789 mod->name, format, reloc_name[r_type] ? reloc_name[r_type] : "?");
793 printk(KERN_ERR "%s: relocation %s resulted in unknown format %u\n",
794 mod->name, reloc_name[r_type] ? reloc_name[r_type] : "?", format);
797 return ok ? 0 : -ENOEXEC;
801 apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symindex,
802 unsigned int relsec, struct module *mod)
804 unsigned int i, n = sechdrs[relsec].sh_size / sizeof(Elf64_Rela);
805 Elf64_Rela *rela = (void *) sechdrs[relsec].sh_addr;
806 Elf64_Shdr *target_sec;
809 DEBUGP("%s: applying section %u (%u relocs) to %u\n", __func__,
810 relsec, n, sechdrs[relsec].sh_info);
812 target_sec = sechdrs + sechdrs[relsec].sh_info;
814 if (target_sec->sh_entsize == ~0UL)
816 * If target section wasn't allocated, we don't need to relocate it.
817 * Happens, e.g., for debug sections.
823 * XXX Should have an arch-hook for running this after final section
824 * addresses have been selected...
827 if (mod->core_size > MAX_LTOFF)
829 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
830 * at the end of the module.
832 gp = mod->core_size - MAX_LTOFF / 2;
834 gp = mod->core_size / 2;
835 gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
837 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
840 for (i = 0; i < n; i++) {
841 ret = do_reloc(mod, ELF64_R_TYPE(rela[i].r_info),
842 ((Elf64_Sym *) sechdrs[symindex].sh_addr
843 + ELF64_R_SYM(rela[i].r_info)),
844 rela[i].r_addend, target_sec,
845 (void *) target_sec->sh_addr + rela[i].r_offset);
853 apply_relocate (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symindex,
854 unsigned int relsec, struct module *mod)
856 printk(KERN_ERR "module %s: REL relocs in section %u unsupported\n", mod->name, relsec);
861 * Modules contain a single unwind table which covers both the core and the init text
862 * sections but since the two are not contiguous, we need to split this table up such that
863 * we can register (and unregister) each "segment" separately. Fortunately, this sounds
864 * more complicated than it really is.
867 register_unwind_table (struct module *mod)
869 struct unw_table_entry *start = (void *) mod->arch.unwind->sh_addr;
870 struct unw_table_entry *end = start + mod->arch.unwind->sh_size / sizeof (*start);
871 struct unw_table_entry tmp, *e1, *e2, *core, *init;
872 unsigned long num_init = 0, num_core = 0;
874 /* First, count how many init and core unwind-table entries there are. */
875 for (e1 = start; e1 < end; ++e1)
876 if (in_init(mod, e1->start_offset))
881 * Second, sort the table such that all unwind-table entries for the init and core
882 * text sections are nicely separated. We do this with a stupid bubble sort
883 * (unwind tables don't get ridiculously huge).
885 for (e1 = start; e1 < end; ++e1) {
886 for (e2 = e1 + 1; e2 < end; ++e2) {
887 if (e2->start_offset < e1->start_offset) {
895 * Third, locate the init and core segments in the unwind table:
897 if (in_init(mod, start->start_offset)) {
899 core = start + num_init;
902 init = start + num_core;
905 DEBUGP("%s: name=%s, gp=%lx, num_init=%lu, num_core=%lu\n", __func__,
906 mod->name, mod->arch.gp, num_init, num_core);
909 * Fourth, register both tables (if not empty).
912 mod->arch.core_unw_table = unw_add_unwind_table(mod->name, 0, mod->arch.gp,
913 core, core + num_core);
914 DEBUGP("%s: core: handle=%p [%p-%p)\n", __func__,
915 mod->arch.core_unw_table, core, core + num_core);
918 mod->arch.init_unw_table = unw_add_unwind_table(mod->name, 0, mod->arch.gp,
919 init, init + num_init);
920 DEBUGP("%s: init: handle=%p [%p-%p)\n", __func__,
921 mod->arch.init_unw_table, init, init + num_init);
926 module_finalize (const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mod)
928 DEBUGP("%s: init: entry=%p\n", __func__, mod->init);
929 if (mod->arch.unwind)
930 register_unwind_table(mod);
931 #ifdef CONFIG_PARAVIRT
932 if (mod->arch.paravirt_bundles) {
933 struct paravirt_patch_site_bundle *start =
934 (struct paravirt_patch_site_bundle *)
935 mod->arch.paravirt_bundles->sh_addr;
936 struct paravirt_patch_site_bundle *end =
937 (struct paravirt_patch_site_bundle *)
938 (mod->arch.paravirt_bundles->sh_addr +
939 mod->arch.paravirt_bundles->sh_size);
941 paravirt_patch_apply_bundle(start, end);
943 if (mod->arch.paravirt_insts) {
944 struct paravirt_patch_site_inst *start =
945 (struct paravirt_patch_site_inst *)
946 mod->arch.paravirt_insts->sh_addr;
947 struct paravirt_patch_site_inst *end =
948 (struct paravirt_patch_site_inst *)
949 (mod->arch.paravirt_insts->sh_addr +
950 mod->arch.paravirt_insts->sh_size);
952 paravirt_patch_apply_inst(start, end);
959 module_arch_cleanup (struct module *mod)
961 if (mod->arch.init_unw_table)
962 unw_remove_unwind_table(mod->arch.init_unw_table);
963 if (mod->arch.core_unw_table)
964 unw_remove_unwind_table(mod->arch.core_unw_table);