2 * IA-64-specific support for kernel module loader.
4 * Copyright (C) 2003 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com>
7 * Loosely based on patch by Rusty Russell.
10 /* relocs tested so far:
21 PCREL21B (for br.call only; br.cond is not supported out of modules!)
22 PCREL60B (for brl.cond only; brl.call is not supported for modules!)
28 #include <linux/config.h>
30 #include <linux/kernel.h>
31 #include <linux/sched.h>
32 #include <linux/elf.h>
33 #include <linux/moduleloader.h>
34 #include <linux/string.h>
35 #include <linux/vmalloc.h>
37 #include <asm/patch.h>
38 #include <asm/unaligned.h>
40 #define ARCH_MODULE_DEBUG 0
43 # define DEBUGP printk
46 # define DEBUGP(fmt , a...)
55 #define MAX_LTOFF ((uint64_t) (1 << 22)) /* max. allowable linkage-table offset */
57 /* Define some relocation helper macros/types: */
59 #define FORMAT_SHIFT 0
61 #define FORMAT_MASK ((1 << FORMAT_BITS) - 1)
64 #define VALUE_MASK ((1 << VALUE_BITS) - 1)
66 enum reloc_target_format {
67 /* direct encoded formats: */
77 /* formats that cannot be directly decoded: */
79 RF_INSN21B, /* imm21 form 1 */
80 RF_INSN21M, /* imm21 form 2 */
81 RF_INSN21F /* imm21 form 3 */
84 enum reloc_value_formula {
85 RV_DIRECT = 4, /* S + A */
86 RV_GPREL = 5, /* @gprel(S + A) */
87 RV_LTREL = 6, /* @ltoff(S + A) */
88 RV_PLTREL = 7, /* @pltoff(S + A) */
89 RV_FPTR = 8, /* @fptr(S + A) */
90 RV_PCREL = 9, /* S + A - P */
91 RV_LTREL_FPTR = 10, /* @ltoff(@fptr(S + A)) */
92 RV_SEGREL = 11, /* @segrel(S + A) */
93 RV_SECREL = 12, /* @secrel(S + A) */
94 RV_BDREL = 13, /* BD + A */
95 RV_LTV = 14, /* S + A (like RV_DIRECT, except frozen at static link-time) */
96 RV_PCREL2 = 15, /* S + A - P */
97 RV_SPECIAL = 16, /* various (see below) */
99 RV_TPREL = 18, /* @tprel(S + A) */
100 RV_LTREL_TPREL = 19, /* @ltoff(@tprel(S + A)) */
101 RV_DTPMOD = 20, /* @dtpmod(S + A) */
102 RV_LTREL_DTPMOD = 21, /* @ltoff(@dtpmod(S + A)) */
103 RV_DTPREL = 22, /* @dtprel(S + A) */
104 RV_LTREL_DTPREL = 23, /* @ltoff(@dtprel(S + A)) */
109 /* 28-31 reserved for implementation-specific purposes. */
112 #define N(reloc) [R_IA64_##reloc] = #reloc
114 static const char *reloc_name[256] = {
115 N(NONE), N(IMM14), N(IMM22), N(IMM64),
116 N(DIR32MSB), N(DIR32LSB), N(DIR64MSB), N(DIR64LSB),
117 N(GPREL22), N(GPREL64I), N(GPREL32MSB), N(GPREL32LSB),
118 N(GPREL64MSB), N(GPREL64LSB), N(LTOFF22), N(LTOFF64I),
119 N(PLTOFF22), N(PLTOFF64I), N(PLTOFF64MSB), N(PLTOFF64LSB),
120 N(FPTR64I), N(FPTR32MSB), N(FPTR32LSB), N(FPTR64MSB),
121 N(FPTR64LSB), N(PCREL60B), N(PCREL21B), N(PCREL21M),
122 N(PCREL21F), N(PCREL32MSB), N(PCREL32LSB), N(PCREL64MSB),
123 N(PCREL64LSB), N(LTOFF_FPTR22), N(LTOFF_FPTR64I), N(LTOFF_FPTR32MSB),
124 N(LTOFF_FPTR32LSB), N(LTOFF_FPTR64MSB), N(LTOFF_FPTR64LSB), N(SEGREL32MSB),
125 N(SEGREL32LSB), N(SEGREL64MSB), N(SEGREL64LSB), N(SECREL32MSB),
126 N(SECREL32LSB), N(SECREL64MSB), N(SECREL64LSB), N(REL32MSB),
127 N(REL32LSB), N(REL64MSB), N(REL64LSB), N(LTV32MSB),
128 N(LTV32LSB), N(LTV64MSB), N(LTV64LSB), N(PCREL21BI),
129 N(PCREL22), N(PCREL64I), N(IPLTMSB), N(IPLTLSB),
130 N(COPY), N(LTOFF22X), N(LDXMOV), N(TPREL14),
131 N(TPREL22), N(TPREL64I), N(TPREL64MSB), N(TPREL64LSB),
132 N(LTOFF_TPREL22), N(DTPMOD64MSB), N(DTPMOD64LSB), N(LTOFF_DTPMOD22),
133 N(DTPREL14), N(DTPREL22), N(DTPREL64I), N(DTPREL32MSB),
134 N(DTPREL32LSB), N(DTPREL64MSB), N(DTPREL64LSB), N(LTOFF_DTPREL22)
148 /* Opaque struct for insns, to protect against derefs. */
151 static inline uint64_t
152 bundle (const struct insn *insn)
154 return (uint64_t) insn & ~0xfUL;
158 slot (const struct insn *insn)
160 return (uint64_t) insn & 0x3;
164 apply_imm64 (struct module *mod, struct insn *insn, uint64_t val)
166 if (slot(insn) != 2) {
167 printk(KERN_ERR "%s: invalid slot number %d for IMM64\n",
168 mod->name, slot(insn));
171 ia64_patch_imm64((u64) insn, val);
176 apply_imm60 (struct module *mod, struct insn *insn, uint64_t val)
178 if (slot(insn) != 2) {
179 printk(KERN_ERR "%s: invalid slot number %d for IMM60\n",
180 mod->name, slot(insn));
183 if (val + ((uint64_t) 1 << 59) >= (1UL << 60)) {
184 printk(KERN_ERR "%s: value %ld out of IMM60 range\n", mod->name, (int64_t) val);
187 ia64_patch_imm60((u64) insn, val);
192 apply_imm22 (struct module *mod, struct insn *insn, uint64_t val)
194 if (val + (1 << 21) >= (1 << 22)) {
195 printk(KERN_ERR "%s: value %li out of IMM22 range\n", mod->name, (int64_t)val);
198 ia64_patch((u64) insn, 0x01fffcfe000UL, ( ((val & 0x200000UL) << 15) /* bit 21 -> 36 */
199 | ((val & 0x1f0000UL) << 6) /* bit 16 -> 22 */
200 | ((val & 0x00ff80UL) << 20) /* bit 7 -> 27 */
201 | ((val & 0x00007fUL) << 13) /* bit 0 -> 13 */));
206 apply_imm21b (struct module *mod, struct insn *insn, uint64_t val)
208 if (val + (1 << 20) >= (1 << 21)) {
209 printk(KERN_ERR "%s: value %li out of IMM21b range\n", mod->name, (int64_t)val);
212 ia64_patch((u64) insn, 0x11ffffe000UL, ( ((val & 0x100000UL) << 16) /* bit 20 -> 36 */
213 | ((val & 0x0fffffUL) << 13) /* bit 0 -> 13 */));
220 /* Three instruction bundles in PLT. */
221 unsigned char bundle[2][16];
224 static const struct plt_entry ia64_plt_template = {
227 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0 */
228 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, /* movl gp=TARGET_GP */
229 0x00, 0x00, 0x00, 0x60
232 0x05, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0 */
233 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* brl.many gp=TARGET_GP */
234 0x08, 0x00, 0x00, 0xc0
240 patch_plt (struct module *mod, struct plt_entry *plt, long target_ip, unsigned long target_gp)
242 if (apply_imm64(mod, (struct insn *) (plt->bundle[0] + 2), target_gp)
243 && apply_imm60(mod, (struct insn *) (plt->bundle[1] + 2),
244 (target_ip - (int64_t) plt->bundle[1]) / 16))
250 plt_target (struct plt_entry *plt)
252 uint64_t b0, b1, *b = (uint64_t *) plt->bundle[1];
255 b0 = b[0]; b1 = b[1];
256 off = ( ((b1 & 0x00fffff000000000UL) >> 36) /* imm20b -> bit 0 */
257 | ((b0 >> 48) << 20) | ((b1 & 0x7fffffUL) << 36) /* imm39 -> bit 20 */
258 | ((b1 & 0x0800000000000000UL) << 0)); /* i -> bit 59 */
259 return (long) plt->bundle[1] + 16*off;
265 /* Three instruction bundles in PLT. */
266 unsigned char bundle[3][16];
269 static const struct plt_entry ia64_plt_template = {
272 0x05, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0 */
273 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* movl r16=TARGET_IP */
274 0x02, 0x00, 0x00, 0x60
277 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0 */
278 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, /* movl gp=TARGET_GP */
279 0x00, 0x00, 0x00, 0x60
282 0x11, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MIB] nop.m 0 */
283 0x60, 0x80, 0x04, 0x80, 0x03, 0x00, /* mov b6=r16 */
284 0x60, 0x00, 0x80, 0x00 /* br.few b6 */
290 patch_plt (struct module *mod, struct plt_entry *plt, long target_ip, unsigned long target_gp)
292 if (apply_imm64(mod, (struct insn *) (plt->bundle[0] + 2), target_ip)
293 && apply_imm64(mod, (struct insn *) (plt->bundle[1] + 2), target_gp))
299 plt_target (struct plt_entry *plt)
301 uint64_t b0, b1, *b = (uint64_t *) plt->bundle[0];
303 b0 = b[0]; b1 = b[1];
304 return ( ((b1 & 0x000007f000000000) >> 36) /* imm7b -> bit 0 */
305 | ((b1 & 0x07fc000000000000) >> 43) /* imm9d -> bit 7 */
306 | ((b1 & 0x0003e00000000000) >> 29) /* imm5c -> bit 16 */
307 | ((b1 & 0x0000100000000000) >> 23) /* ic -> bit 21 */
308 | ((b0 >> 46) << 22) | ((b1 & 0x7fffff) << 40) /* imm41 -> bit 22 */
309 | ((b1 & 0x0800000000000000) << 4)); /* i -> bit 63 */
312 #endif /* !USE_BRL */
315 module_alloc (unsigned long size)
319 return vmalloc(size);
323 module_free (struct module *mod, void *module_region)
325 if (mod->arch.init_unw_table && module_region == mod->module_init) {
326 unw_remove_unwind_table(mod->arch.init_unw_table);
327 mod->arch.init_unw_table = NULL;
329 vfree(module_region);
332 /* Have we already seen one of these relocations? */
333 /* FIXME: we could look in other sections, too --RR */
335 duplicate_reloc (const Elf64_Rela *rela, unsigned int num)
339 for (i = 0; i < num; i++) {
340 if (rela[i].r_info == rela[num].r_info && rela[i].r_addend == rela[num].r_addend)
346 /* Count how many GOT entries we may need */
348 count_gots (const Elf64_Rela *rela, unsigned int num)
350 unsigned int i, ret = 0;
352 /* Sure, this is order(n^2), but it's usually short, and not
354 for (i = 0; i < num; i++) {
355 switch (ELF64_R_TYPE(rela[i].r_info)) {
357 case R_IA64_LTOFF22X:
358 case R_IA64_LTOFF64I:
359 case R_IA64_LTOFF_FPTR22:
360 case R_IA64_LTOFF_FPTR64I:
361 case R_IA64_LTOFF_FPTR32MSB:
362 case R_IA64_LTOFF_FPTR32LSB:
363 case R_IA64_LTOFF_FPTR64MSB:
364 case R_IA64_LTOFF_FPTR64LSB:
365 if (!duplicate_reloc(rela, i))
373 /* Count how many PLT entries we may need */
375 count_plts (const Elf64_Rela *rela, unsigned int num)
377 unsigned int i, ret = 0;
379 /* Sure, this is order(n^2), but it's usually short, and not
381 for (i = 0; i < num; i++) {
382 switch (ELF64_R_TYPE(rela[i].r_info)) {
383 case R_IA64_PCREL21B:
384 case R_IA64_PLTOFF22:
385 case R_IA64_PLTOFF64I:
386 case R_IA64_PLTOFF64MSB:
387 case R_IA64_PLTOFF64LSB:
390 if (!duplicate_reloc(rela, i))
398 /* We need to create an function-descriptors for any internal function
399 which is referenced. */
401 count_fdescs (const Elf64_Rela *rela, unsigned int num)
403 unsigned int i, ret = 0;
405 /* Sure, this is order(n^2), but it's usually short, and not time critical. */
406 for (i = 0; i < num; i++) {
407 switch (ELF64_R_TYPE(rela[i].r_info)) {
409 case R_IA64_FPTR32LSB:
410 case R_IA64_FPTR32MSB:
411 case R_IA64_FPTR64LSB:
412 case R_IA64_FPTR64MSB:
413 case R_IA64_LTOFF_FPTR22:
414 case R_IA64_LTOFF_FPTR32LSB:
415 case R_IA64_LTOFF_FPTR32MSB:
416 case R_IA64_LTOFF_FPTR64I:
417 case R_IA64_LTOFF_FPTR64LSB:
418 case R_IA64_LTOFF_FPTR64MSB:
422 * Jumps to static functions sometimes go straight to their
423 * offset. Of course, that may not be possible if the jump is
424 * from init -> core or vice. versa, so we need to generate an
425 * FDESC (and PLT etc) for that.
427 case R_IA64_PCREL21B:
428 if (!duplicate_reloc(rela, i))
437 module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
440 unsigned long core_plts = 0, init_plts = 0, gots = 0, fdescs = 0;
441 Elf64_Shdr *s, *sechdrs_end = sechdrs + ehdr->e_shnum;
444 * To store the PLTs and function-descriptors, we expand the .text section for
445 * core module-code and the .init.text section for initialization code.
447 for (s = sechdrs; s < sechdrs_end; ++s)
448 if (strcmp(".core.plt", secstrings + s->sh_name) == 0)
449 mod->arch.core_plt = s;
450 else if (strcmp(".init.plt", secstrings + s->sh_name) == 0)
451 mod->arch.init_plt = s;
452 else if (strcmp(".got", secstrings + s->sh_name) == 0)
454 else if (strcmp(".opd", secstrings + s->sh_name) == 0)
456 else if (strcmp(".IA_64.unwind", secstrings + s->sh_name) == 0)
457 mod->arch.unwind = s;
459 if (!mod->arch.core_plt || !mod->arch.init_plt || !mod->arch.got || !mod->arch.opd) {
460 printk(KERN_ERR "%s: sections missing\n", mod->name);
464 /* GOT and PLTs can occur in any relocated section... */
465 for (s = sechdrs + 1; s < sechdrs_end; ++s) {
466 const Elf64_Rela *rels = (void *)ehdr + s->sh_offset;
467 unsigned long numrels = s->sh_size/sizeof(Elf64_Rela);
469 if (s->sh_type != SHT_RELA)
472 gots += count_gots(rels, numrels);
473 fdescs += count_fdescs(rels, numrels);
474 if (strstr(secstrings + s->sh_name, ".init"))
475 init_plts += count_plts(rels, numrels);
477 core_plts += count_plts(rels, numrels);
480 mod->arch.core_plt->sh_type = SHT_NOBITS;
481 mod->arch.core_plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
482 mod->arch.core_plt->sh_addralign = 16;
483 mod->arch.core_plt->sh_size = core_plts * sizeof(struct plt_entry);
484 mod->arch.init_plt->sh_type = SHT_NOBITS;
485 mod->arch.init_plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
486 mod->arch.init_plt->sh_addralign = 16;
487 mod->arch.init_plt->sh_size = init_plts * sizeof(struct plt_entry);
488 mod->arch.got->sh_type = SHT_NOBITS;
489 mod->arch.got->sh_flags = ARCH_SHF_SMALL | SHF_ALLOC;
490 mod->arch.got->sh_addralign = 8;
491 mod->arch.got->sh_size = gots * sizeof(struct got_entry);
492 mod->arch.opd->sh_type = SHT_NOBITS;
493 mod->arch.opd->sh_flags = SHF_ALLOC;
494 mod->arch.opd->sh_addralign = 8;
495 mod->arch.opd->sh_size = fdescs * sizeof(struct fdesc);
496 DEBUGP("%s: core.plt=%lx, init.plt=%lx, got=%lx, fdesc=%lx\n",
497 __FUNCTION__, mod->arch.core_plt->sh_size, mod->arch.init_plt->sh_size,
498 mod->arch.got->sh_size, mod->arch.opd->sh_size);
503 in_init (const struct module *mod, uint64_t addr)
505 return addr - (uint64_t) mod->module_init < mod->init_size;
509 in_core (const struct module *mod, uint64_t addr)
511 return addr - (uint64_t) mod->module_core < mod->core_size;
515 is_internal (const struct module *mod, uint64_t value)
517 return in_init(mod, value) || in_core(mod, value);
521 * Get gp-relative offset for the linkage-table entry of VALUE.
524 get_ltoff (struct module *mod, uint64_t value, int *okp)
526 struct got_entry *got, *e;
531 got = (void *) mod->arch.got->sh_addr;
532 for (e = got; e < got + mod->arch.next_got_entry; ++e)
536 /* Not enough GOT entries? */
537 if (e >= (struct got_entry *) (mod->arch.got->sh_addr + mod->arch.got->sh_size))
541 ++mod->arch.next_got_entry;
543 return (uint64_t) e - mod->arch.gp;
547 gp_addressable (struct module *mod, uint64_t value)
549 return value - mod->arch.gp + MAX_LTOFF/2 < MAX_LTOFF;
552 /* Get PC-relative PLT entry for this value. Returns 0 on failure. */
554 get_plt (struct module *mod, const struct insn *insn, uint64_t value, int *okp)
556 struct plt_entry *plt, *plt_end;
557 uint64_t target_ip, target_gp;
562 if (in_init(mod, (uint64_t) insn)) {
563 plt = (void *) mod->arch.init_plt->sh_addr;
564 plt_end = (void *) plt + mod->arch.init_plt->sh_size;
566 plt = (void *) mod->arch.core_plt->sh_addr;
567 plt_end = (void *) plt + mod->arch.core_plt->sh_size;
570 /* "value" is a pointer to a function-descriptor; fetch the target ip/gp from it: */
571 target_ip = ((uint64_t *) value)[0];
572 target_gp = ((uint64_t *) value)[1];
574 /* Look for existing PLT entry. */
575 while (plt->bundle[0][0]) {
576 if (plt_target(plt) == target_ip)
578 if (++plt >= plt_end)
581 *plt = ia64_plt_template;
582 if (!patch_plt(mod, plt, target_ip, target_gp)) {
586 #if ARCH_MODULE_DEBUG
587 if (plt_target(plt) != target_ip) {
588 printk("%s: mistargeted PLT: wanted %lx, got %lx\n",
589 __FUNCTION__, target_ip, plt_target(plt));
595 return (uint64_t) plt;
598 /* Get function descriptor for VALUE. */
600 get_fdesc (struct module *mod, uint64_t value, int *okp)
602 struct fdesc *fdesc = (void *) mod->arch.opd->sh_addr;
608 printk(KERN_ERR "%s: fdesc for zero requested!\n", mod->name);
612 if (!is_internal(mod, value))
614 * If it's not a module-local entry-point, "value" already points to a
615 * function-descriptor.
619 /* Look for existing function descriptor. */
621 if (fdesc->ip == value)
622 return (uint64_t)fdesc;
623 if ((uint64_t) ++fdesc >= mod->arch.opd->sh_addr + mod->arch.opd->sh_size)
629 fdesc->gp = mod->arch.gp;
630 return (uint64_t) fdesc;
634 do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
635 Elf64_Shdr *sec, void *location)
637 enum reloc_target_format format = (r_type >> FORMAT_SHIFT) & FORMAT_MASK;
638 enum reloc_value_formula formula = (r_type >> VALUE_SHIFT) & VALUE_MASK;
642 val = sym->st_value + addend;
645 case RV_SEGREL: /* segment base is arbitrarily chosen to be 0 for kernel modules */
649 case RV_GPREL: val -= mod->arch.gp; break;
650 case RV_LTREL: val = get_ltoff(mod, val, &ok); break;
651 case RV_PLTREL: val = get_plt(mod, location, val, &ok); break;
652 case RV_FPTR: val = get_fdesc(mod, val, &ok); break;
653 case RV_SECREL: val -= sec->sh_addr; break;
654 case RV_LTREL_FPTR: val = get_ltoff(mod, get_fdesc(mod, val, &ok), &ok); break;
658 case R_IA64_PCREL21B:
659 if ((in_init(mod, val) && in_core(mod, (uint64_t)location)) ||
660 (in_core(mod, val) && in_init(mod, (uint64_t)location))) {
662 * Init section may have been allocated far away from core,
663 * if the branch won't reach, then allocate a plt for it.
665 uint64_t delta = ((int64_t)val - (int64_t)location) / 16;
666 if (delta + (1 << 20) >= (1 << 21)) {
667 val = get_fdesc(mod, val, &ok);
668 val = get_plt(mod, location, val, &ok);
670 } else if (!is_internal(mod, val))
671 val = get_plt(mod, location, val, &ok);
674 val -= bundle(location);
677 case R_IA64_PCREL32MSB:
678 case R_IA64_PCREL32LSB:
679 case R_IA64_PCREL64MSB:
680 case R_IA64_PCREL64LSB:
681 val -= (uint64_t) location;
686 case R_IA64_PCREL60B: format = RF_INSN60; break;
687 case R_IA64_PCREL21B: format = RF_INSN21B; break;
688 case R_IA64_PCREL21M: format = RF_INSN21M; break;
689 case R_IA64_PCREL21F: format = RF_INSN21F; break;
695 val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
699 /* can link-time value relocs happen here? */
704 if (r_type == R_IA64_PCREL21BI) {
705 if (!is_internal(mod, val)) {
706 printk(KERN_ERR "%s: %s reloc against non-local symbol (%lx)\n",
707 __FUNCTION__, reloc_name[r_type], val);
712 val -= bundle(location);
719 val = get_fdesc(mod, get_plt(mod, location, val, &ok), &ok);
721 if (r_type == R_IA64_IPLTMSB)
726 val = addend - sym->st_value;
730 case R_IA64_LTOFF22X:
731 if (gp_addressable(mod, val))
734 val = get_ltoff(mod, val, &ok);
739 if (gp_addressable(mod, val)) {
740 /* turn "ld8" into "mov": */
741 DEBUGP("%s: patching ld8 at %p to mov\n", __FUNCTION__, location);
742 ia64_patch((u64) location, 0x1fff80fe000UL, 0x10000000000UL);
747 if (reloc_name[r_type])
748 printk(KERN_ERR "%s: special reloc %s not supported",
749 mod->name, reloc_name[r_type]);
751 printk(KERN_ERR "%s: unknown special reloc %x\n",
760 case RV_LTREL_DTPMOD:
762 case RV_LTREL_DTPREL:
763 printk(KERN_ERR "%s: %s reloc not supported\n",
764 mod->name, reloc_name[r_type] ? reloc_name[r_type] : "?");
768 printk(KERN_ERR "%s: unknown reloc %x\n", mod->name, r_type);
775 DEBUGP("%s: [%p]<-%016lx = %s(%lx)\n", __FUNCTION__, location, val,
776 reloc_name[r_type] ? reloc_name[r_type] : "?", sym->st_value + addend);
779 case RF_INSN21B: ok = apply_imm21b(mod, location, (int64_t) val / 16); break;
780 case RF_INSN22: ok = apply_imm22(mod, location, val); break;
781 case RF_INSN64: ok = apply_imm64(mod, location, val); break;
782 case RF_INSN60: ok = apply_imm60(mod, location, (int64_t) val / 16); break;
783 case RF_32LSB: put_unaligned(val, (uint32_t *) location); break;
784 case RF_64LSB: put_unaligned(val, (uint64_t *) location); break;
785 case RF_32MSB: /* ia64 Linux is little-endian... */
786 case RF_64MSB: /* ia64 Linux is little-endian... */
787 case RF_INSN14: /* must be within-module, i.e., resolved by "ld -r" */
788 case RF_INSN21M: /* must be within-module, i.e., resolved by "ld -r" */
789 case RF_INSN21F: /* must be within-module, i.e., resolved by "ld -r" */
790 printk(KERN_ERR "%s: format %u needed by %s reloc is not supported\n",
791 mod->name, format, reloc_name[r_type] ? reloc_name[r_type] : "?");
795 printk(KERN_ERR "%s: relocation %s resulted in unknown format %u\n",
796 mod->name, reloc_name[r_type] ? reloc_name[r_type] : "?", format);
799 return ok ? 0 : -ENOEXEC;
803 apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symindex,
804 unsigned int relsec, struct module *mod)
806 unsigned int i, n = sechdrs[relsec].sh_size / sizeof(Elf64_Rela);
807 Elf64_Rela *rela = (void *) sechdrs[relsec].sh_addr;
808 Elf64_Shdr *target_sec;
811 DEBUGP("%s: applying section %u (%u relocs) to %u\n", __FUNCTION__,
812 relsec, n, sechdrs[relsec].sh_info);
814 target_sec = sechdrs + sechdrs[relsec].sh_info;
816 if (target_sec->sh_entsize == ~0UL)
818 * If target section wasn't allocated, we don't need to relocate it.
819 * Happens, e.g., for debug sections.
825 * XXX Should have an arch-hook for running this after final section
826 * addresses have been selected...
829 if (mod->core_size > MAX_LTOFF)
831 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
832 * at the end of the module.
834 gp = mod->core_size - MAX_LTOFF / 2;
836 gp = mod->core_size / 2;
837 gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
839 DEBUGP("%s: placing gp at 0x%lx\n", __FUNCTION__, gp);
842 for (i = 0; i < n; i++) {
843 ret = do_reloc(mod, ELF64_R_TYPE(rela[i].r_info),
844 ((Elf64_Sym *) sechdrs[symindex].sh_addr
845 + ELF64_R_SYM(rela[i].r_info)),
846 rela[i].r_addend, target_sec,
847 (void *) target_sec->sh_addr + rela[i].r_offset);
855 apply_relocate (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symindex,
856 unsigned int relsec, struct module *mod)
858 printk(KERN_ERR "module %s: REL relocs in section %u unsupported\n", mod->name, relsec);
863 * Modules contain a single unwind table which covers both the core and the init text
864 * sections but since the two are not contiguous, we need to split this table up such that
865 * we can register (and unregister) each "segment" seperately. Fortunately, this sounds
866 * more complicated than it really is.
869 register_unwind_table (struct module *mod)
871 struct unw_table_entry *start = (void *) mod->arch.unwind->sh_addr;
872 struct unw_table_entry *end = start + mod->arch.unwind->sh_size / sizeof (*start);
873 struct unw_table_entry tmp, *e1, *e2, *core, *init;
874 unsigned long num_init = 0, num_core = 0;
876 /* First, count how many init and core unwind-table entries there are. */
877 for (e1 = start; e1 < end; ++e1)
878 if (in_init(mod, e1->start_offset))
883 * Second, sort the table such that all unwind-table entries for the init and core
884 * text sections are nicely separated. We do this with a stupid bubble sort
885 * (unwind tables don't get ridiculously huge).
887 for (e1 = start; e1 < end; ++e1) {
888 for (e2 = e1 + 1; e2 < end; ++e2) {
889 if (e2->start_offset < e1->start_offset) {
897 * Third, locate the init and core segments in the unwind table:
899 if (in_init(mod, start->start_offset)) {
901 core = start + num_init;
904 init = start + num_core;
907 DEBUGP("%s: name=%s, gp=%lx, num_init=%lu, num_core=%lu\n", __FUNCTION__,
908 mod->name, mod->arch.gp, num_init, num_core);
911 * Fourth, register both tables (if not empty).
914 mod->arch.core_unw_table = unw_add_unwind_table(mod->name, 0, mod->arch.gp,
915 core, core + num_core);
916 DEBUGP("%s: core: handle=%p [%p-%p)\n", __FUNCTION__,
917 mod->arch.core_unw_table, core, core + num_core);
920 mod->arch.init_unw_table = unw_add_unwind_table(mod->name, 0, mod->arch.gp,
921 init, init + num_init);
922 DEBUGP("%s: init: handle=%p [%p-%p)\n", __FUNCTION__,
923 mod->arch.init_unw_table, init, init + num_init);
928 module_finalize (const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mod)
930 DEBUGP("%s: init: entry=%p\n", __FUNCTION__, mod->init);
931 if (mod->arch.unwind)
932 register_unwind_table(mod);
937 module_arch_cleanup (struct module *mod)
939 if (mod->arch.init_unw_table)
940 unw_remove_unwind_table(mod->arch.init_unw_table);
941 if (mod->arch.core_unw_table)
942 unw_remove_unwind_table(mod->arch.core_unw_table);
947 percpu_modcopy (void *pcpudst, const void *src, unsigned long size)
950 for_each_possible_cpu(i) {
951 memcpy(pcpudst + __per_cpu_offset[i], src, size);
954 #endif /* CONFIG_SMP */