1 /* Kernel module help for PPC64.
2 Copyright (C) 2001, 2003 Rusty Russell IBM Corporation.
4 This program is free software; you can redistribute it and/or modify
5 it under the terms of the GNU General Public License as published by
6 the Free Software Foundation; either version 2 of the License, or
7 (at your option) any later version.
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU General Public License for more details.
14 You should have received a copy of the GNU General Public License
15 along with this program; if not, write to the Free Software
16 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 #include <linux/module.h>
19 #include <linux/elf.h>
20 #include <linux/moduleloader.h>
21 #include <linux/err.h>
22 #include <linux/vmalloc.h>
23 #include <linux/bug.h>
24 #include <asm/module.h>
25 #include <asm/uaccess.h>
26 #include <asm/firmware.h>
27 #include <linux/sort.h>
31 /* FIXME: We don't do .init separately. To do this, we'd need to have
32 a separate r2 value in the init and core section, and stub between
35 Using a magic allocator which places modules within 32MB solves
36 this, and makes other things simpler. Anton?
41 #define DEBUGP(fmt , ...)
44 /* There's actually a third entry here, but it's unused */
45 struct ppc64_opd_entry
47 unsigned long funcaddr;
51 /* Like PPC32, we need little trampolines to do > 24-bit jumps (into
52 the kernel itself). But on PPC64, these need to be used for every
53 jump, actually, to reset r2 (TOC+0x8000). */
54 struct ppc64_stub_entry
56 /* 28 byte jump instruction sequence (7 instructions) */
57 unsigned char jump[28];
58 unsigned char unused[4];
59 /* Data for the above code */
60 struct ppc64_opd_entry opd;
63 /* We use a stub to fix up r2 (TOC ptr) and to jump to the (external)
64 function which may be more than 24-bits away. We could simply
65 patch the new r2 value and function pointer into the stub, but it's
66 significantly shorter to put these values at the end of the stub
67 code, and patch the stub address (32-bits relative to the TOC ptr,
69 static struct ppc64_stub_entry ppc64_stub =
71 0x3d, 0x82, 0x00, 0x00, /* addis r12,r2, <high> */
72 0x39, 0x8c, 0x00, 0x00, /* addi r12,r12, <low> */
73 /* Save current r2 value in magic place on the stack. */
74 0xf8, 0x41, 0x00, 0x28, /* std r2,40(r1) */
75 0xe9, 0x6c, 0x00, 0x20, /* ld r11,32(r12) */
76 0xe8, 0x4c, 0x00, 0x28, /* ld r2,40(r12) */
77 0x7d, 0x69, 0x03, 0xa6, /* mtctr r11 */
78 0x4e, 0x80, 0x04, 0x20 /* bctr */
81 /* Count how many different 24-bit relocations (different symbol,
83 static unsigned int count_relocs(const Elf64_Rela *rela, unsigned int num)
85 unsigned int i, r_info, r_addend, _count_relocs;
87 /* FIXME: Only count external ones --RR */
91 for (i = 0; i < num; i++)
92 /* Only count 24-bit relocs, others don't need stubs */
93 if (ELF64_R_TYPE(rela[i].r_info) == R_PPC_REL24 &&
94 (r_info != ELF64_R_SYM(rela[i].r_info) ||
95 r_addend != rela[i].r_addend)) {
97 r_info = ELF64_R_SYM(rela[i].r_info);
98 r_addend = rela[i].r_addend;
101 return _count_relocs;
104 void *module_alloc(unsigned long size)
109 return vmalloc_exec(size);
112 /* Free memory returned from module_alloc */
113 void module_free(struct module *mod, void *module_region)
115 vfree(module_region);
116 /* FIXME: If module_region == mod->init_region, trim exception
120 static int relacmp(const void *_x, const void *_y)
122 const Elf64_Rela *x, *y;
124 y = (Elf64_Rela *)_x;
125 x = (Elf64_Rela *)_y;
127 /* Compare the entire r_info (as opposed to ELF64_R_SYM(r_info) only) to
128 * make the comparison cheaper/faster. It won't affect the sorting or
129 * the counting algorithms' performance
131 if (x->r_info < y->r_info)
133 else if (x->r_info > y->r_info)
135 else if (x->r_addend < y->r_addend)
137 else if (x->r_addend > y->r_addend)
143 static void relaswap(void *_x, void *_y, int size)
145 uint64_t *x, *y, tmp;
151 for (i = 0; i < sizeof(Elf64_Rela) / sizeof(uint64_t); i++) {
158 /* Get size of potential trampolines required. */
159 static unsigned long get_stubs_size(const Elf64_Ehdr *hdr,
160 const Elf64_Shdr *sechdrs)
162 /* One extra reloc so it's always 0-funcaddr terminated */
163 unsigned long relocs = 1;
166 /* Every relocated section... */
167 for (i = 1; i < hdr->e_shnum; i++) {
168 if (sechdrs[i].sh_type == SHT_RELA) {
169 DEBUGP("Found relocations in section %u\n", i);
170 DEBUGP("Ptr: %p. Number: %lu\n",
171 (void *)sechdrs[i].sh_addr,
172 sechdrs[i].sh_size / sizeof(Elf64_Rela));
174 /* Sort the relocation information based on a symbol and
175 * addend key. This is a stable O(n*log n) complexity
176 * alogrithm but it will reduce the complexity of
177 * count_relocs() to linear complexity O(n)
179 sort((void *)sechdrs[i].sh_addr,
180 sechdrs[i].sh_size / sizeof(Elf64_Rela),
181 sizeof(Elf64_Rela), relacmp, relaswap);
183 relocs += count_relocs((void *)sechdrs[i].sh_addr,
185 / sizeof(Elf64_Rela));
189 DEBUGP("Looks like a total of %lu stubs, max\n", relocs);
190 return relocs * sizeof(struct ppc64_stub_entry);
193 static void dedotify_versions(struct modversion_info *vers,
196 struct modversion_info *end;
198 for (end = (void *)vers + size; vers < end; vers++)
199 if (vers->name[0] == '.')
200 memmove(vers->name, vers->name+1, strlen(vers->name));
203 /* Undefined symbols which refer to .funcname, hack to funcname */
204 static void dedotify(Elf64_Sym *syms, unsigned int numsyms, char *strtab)
208 for (i = 1; i < numsyms; i++) {
209 if (syms[i].st_shndx == SHN_UNDEF) {
210 char *name = strtab + syms[i].st_name;
212 memmove(name, name+1, strlen(name));
217 int module_frob_arch_sections(Elf64_Ehdr *hdr,
224 /* Find .toc and .stubs sections, symtab and strtab */
225 for (i = 1; i < hdr->e_shnum; i++) {
227 if (strcmp(secstrings + sechdrs[i].sh_name, ".stubs") == 0)
228 me->arch.stubs_section = i;
229 else if (strcmp(secstrings + sechdrs[i].sh_name, ".toc") == 0)
230 me->arch.toc_section = i;
231 else if (strcmp(secstrings+sechdrs[i].sh_name,"__versions")==0)
232 dedotify_versions((void *)hdr + sechdrs[i].sh_offset,
235 /* We don't handle .init for the moment: rename to _init */
236 while ((p = strstr(secstrings + sechdrs[i].sh_name, ".init")))
239 if (sechdrs[i].sh_type == SHT_SYMTAB)
240 dedotify((void *)hdr + sechdrs[i].sh_offset,
241 sechdrs[i].sh_size / sizeof(Elf64_Sym),
243 + sechdrs[sechdrs[i].sh_link].sh_offset);
246 if (!me->arch.stubs_section) {
247 printk("%s: doesn't contain .stubs.\n", me->name);
251 /* If we don't have a .toc, just use .stubs. We need to set r2
252 to some reasonable value in case the module calls out to
253 other functions via a stub, or if a function pointer escapes
254 the module by some means. */
255 if (!me->arch.toc_section)
256 me->arch.toc_section = me->arch.stubs_section;
258 /* Override the stubs size */
259 sechdrs[me->arch.stubs_section].sh_size = get_stubs_size(hdr, sechdrs);
263 int apply_relocate(Elf64_Shdr *sechdrs,
265 unsigned int symindex,
269 printk(KERN_ERR "%s: Non-ADD RELOCATION unsupported\n", me->name);
273 /* r2 is the TOC pointer: it actually points 0x8000 into the TOC (this
274 gives the value maximum span in an instruction which uses a signed
276 static inline unsigned long my_r2(Elf64_Shdr *sechdrs, struct module *me)
278 return sechdrs[me->arch.toc_section].sh_addr + 0x8000;
281 /* Both low and high 16 bits are added as SIGNED additions, so if low
282 16 bits has high bit set, high 16 bits must be adjusted. These
283 macros do that (stolen from binutils). */
284 #define PPC_LO(v) ((v) & 0xffff)
285 #define PPC_HI(v) (((v) >> 16) & 0xffff)
286 #define PPC_HA(v) PPC_HI ((v) + 0x8000)
288 /* Patch stub to reference function and correct r2 value. */
289 static inline int create_stub(Elf64_Shdr *sechdrs,
290 struct ppc64_stub_entry *entry,
291 struct ppc64_opd_entry *opd,
294 Elf64_Half *loc1, *loc2;
299 loc1 = (Elf64_Half *)&entry->jump[2];
300 loc2 = (Elf64_Half *)&entry->jump[6];
302 /* Stub uses address relative to r2. */
303 reladdr = (unsigned long)entry - my_r2(sechdrs, me);
304 if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) {
305 printk("%s: Address %p of stub out of range of %p.\n",
306 me->name, (void *)reladdr, (void *)my_r2);
309 DEBUGP("Stub %p get data from reladdr %li\n", entry, reladdr);
311 *loc1 = PPC_HA(reladdr);
312 *loc2 = PPC_LO(reladdr);
313 entry->opd.funcaddr = opd->funcaddr;
314 entry->opd.r2 = opd->r2;
318 /* Create stub to jump to function described in this OPD: we need the
319 stub to set up the TOC ptr (r2) for the function. */
320 static unsigned long stub_for_addr(Elf64_Shdr *sechdrs,
321 unsigned long opdaddr,
324 struct ppc64_stub_entry *stubs;
325 struct ppc64_opd_entry *opd = (void *)opdaddr;
326 unsigned int i, num_stubs;
328 num_stubs = sechdrs[me->arch.stubs_section].sh_size / sizeof(*stubs);
330 /* Find this stub, or if that fails, the next avail. entry */
331 stubs = (void *)sechdrs[me->arch.stubs_section].sh_addr;
332 for (i = 0; stubs[i].opd.funcaddr; i++) {
333 BUG_ON(i >= num_stubs);
335 if (stubs[i].opd.funcaddr == opd->funcaddr)
336 return (unsigned long)&stubs[i];
339 if (!create_stub(sechdrs, &stubs[i], opd, me))
342 return (unsigned long)&stubs[i];
345 /* We expect a noop next: if it is, replace it with instruction to
347 static int restore_r2(u32 *instruction, struct module *me)
349 if (*instruction != 0x60000000) {
350 printk("%s: Expect noop after relocate, got %08x\n",
351 me->name, *instruction);
354 *instruction = 0xe8410028; /* ld r2,40(r1) */
358 int apply_relocate_add(Elf64_Shdr *sechdrs,
360 unsigned int symindex,
365 Elf64_Rela *rela = (void *)sechdrs[relsec].sh_addr;
367 unsigned long *location;
370 DEBUGP("Applying ADD relocate section %u to %u\n", relsec,
371 sechdrs[relsec].sh_info);
372 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rela); i++) {
373 /* This is where to make the change */
374 location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
376 /* This is the symbol it is referring to */
377 sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
378 + ELF64_R_SYM(rela[i].r_info);
380 DEBUGP("RELOC at %p: %li-type as %s (%lu) + %li\n",
381 location, (long)ELF64_R_TYPE(rela[i].r_info),
382 strtab + sym->st_name, (unsigned long)sym->st_value,
383 (long)rela[i].r_addend);
385 /* `Everything is relative'. */
386 value = sym->st_value + rela[i].r_addend;
388 switch (ELF64_R_TYPE(rela[i].r_info)) {
391 *(u32 *)location = value;
396 *(unsigned long *)location = value;
400 *(unsigned long *)location = my_r2(sechdrs, me);
404 /* Subtract TOC pointer */
405 value -= my_r2(sechdrs, me);
406 if (value + 0x8000 > 0xffff) {
407 printk("%s: bad TOC16 relocation (%lu)\n",
411 *((uint16_t *) location)
412 = (*((uint16_t *) location) & ~0xffff)
416 case R_PPC64_TOC16_DS:
417 /* Subtract TOC pointer */
418 value -= my_r2(sechdrs, me);
419 if ((value & 3) != 0 || value + 0x8000 > 0xffff) {
420 printk("%s: bad TOC16_DS relocation (%lu)\n",
424 *((uint16_t *) location)
425 = (*((uint16_t *) location) & ~0xfffc)
430 /* FIXME: Handle weak symbols here --RR */
431 if (sym->st_shndx == SHN_UNDEF) {
432 /* External: go via stub */
433 value = stub_for_addr(sechdrs, value, me);
436 if (!restore_r2((u32 *)location + 1, me))
440 /* Convert value to relative */
441 value -= (unsigned long)location;
442 if (value + 0x2000000 > 0x3ffffff || (value & 3) != 0){
443 printk("%s: REL24 %li out of range!\n",
444 me->name, (long int)value);
448 /* Only replace bits 2 through 26 */
449 *(uint32_t *)location
450 = (*(uint32_t *)location & ~0x03fffffc)
451 | (value & 0x03fffffc);
455 /* 64 bits relative (used by features fixups) */
456 *location = value - (unsigned long)location;
460 printk("%s: Unknown ADD relocation: %lu\n",
462 (unsigned long)ELF64_R_TYPE(rela[i].r_info));
470 LIST_HEAD(module_bug_list);
472 static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
473 const Elf_Shdr *sechdrs,
479 secstrings = (char *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
480 for (i = 1; i < hdr->e_shnum; i++)
481 if (strcmp(secstrings+sechdrs[i].sh_name, name) == 0)
486 int module_finalize(const Elf_Ehdr *hdr,
487 const Elf_Shdr *sechdrs, struct module *me)
489 const Elf_Shdr *sect;
492 err = module_bug_finalize(hdr, sechdrs, me);
496 /* Apply feature fixups */
497 sect = find_section(hdr, sechdrs, "__ftr_fixup");
499 do_feature_fixups(cur_cpu_spec->cpu_features,
500 (void *)sect->sh_addr,
501 (void *)sect->sh_addr + sect->sh_size);
503 sect = find_section(hdr, sechdrs, "__fw_ftr_fixup");
505 do_feature_fixups(powerpc_firmware_features,
506 (void *)sect->sh_addr,
507 (void *)sect->sh_addr + sect->sh_size);
512 void module_arch_cleanup(struct module *mod)
514 module_bug_cleanup(mod);
517 struct bug_entry *module_find_bug(unsigned long bugaddr)
519 struct mod_arch_specific *mod;
521 struct bug_entry *bug;
523 list_for_each_entry(mod, &module_bug_list, bug_list) {
524 bug = mod->bug_table;
525 for (i = 0; i < mod->num_bugs; ++i, ++bug)
526 if (bugaddr == bug->bug_addr)