int reloc_non_plt(Obj_Entry *obj, Obj_Entry *obj_rtld, int flags, RtldLockState *lockstate) { const Elf_Rel *rel; const Elf_Rel *rellim; Elf_Addr *got = obj->pltgot; const Elf_Sym *sym, *def; const Obj_Entry *defobj; Elf_Word i; #ifdef SUPPORT_OLD_BROKEN_LD int broken; #endif /* The relocation for the dynamic loader has already been done. */ if (obj == obj_rtld) return (0); if ((flags & SYMLOOK_IFUNC) != 0) /* XXX not implemented */ return (0); #ifdef SUPPORT_OLD_BROKEN_LD broken = 0; sym = obj->symtab; for (i = 1; i < 12; i++) if (sym[i].st_info == ELF_ST_INFO(STB_LOCAL, STT_NOTYPE)) broken = 1; dbg("%s: broken=%d", obj->path, broken); #endif i = (got[1] & GOT1_MASK) ? 2 : 1; /* Relocate the local GOT entries */ got += i; dbg("got:%p for %d entries adding %p", got, obj->local_gotno, obj->relocbase); for (; i < obj->local_gotno; i++) { *got += (Elf_Addr)obj->relocbase; got++; } sym = obj->symtab + obj->gotsym; dbg("got:%p for %d entries", got, obj->symtabno); /* Now do the global GOT entries */ for (i = obj->gotsym; i < obj->symtabno; i++) { dbg(" doing got %d sym %p (%s, %lx)", i - obj->gotsym, sym, sym->st_name + obj->strtab, (u_long) *got); #ifdef SUPPORT_OLD_BROKEN_LD if (ELF_ST_TYPE(sym->st_info) == STT_FUNC && broken && sym->st_shndx == SHN_UNDEF) { /* * XXX DANGER WILL ROBINSON! * You might think this is stupid, as it intentionally * defeats lazy binding -- and you'd be right. * Unfortunately, for lazy binding to work right, we * need to a way to force the GOT slots used for * function pointers to be resolved immediately. This * is supposed to be done automatically by the linker, * by not outputting a PLT slot and setting st_value * to 0 if there are non-PLT references, but older * versions of GNU ld do not do this. */ def = find_symdef(i, obj, &defobj, flags, NULL, lockstate); if (def == NULL) return -1; *got = def->st_value + (Elf_Addr)defobj->relocbase; } else #endif if (ELF_ST_TYPE(sym->st_info) == STT_FUNC && sym->st_value != 0 && sym->st_shndx == SHN_UNDEF) { /* * If there are non-PLT references to the function, * st_value should be 0, forcing us to resolve the * address immediately. * * XXX DANGER WILL ROBINSON! * The linker is not outputting PLT slots for calls to * functions that are defined in the same shared * library. This is a bug, because it can screw up * link ordering rules if the symbol is defined in * more than one module. For now, if there is a * definition, we fail the test above and force a full * symbol lookup. This means that all intra-module * calls are bound immediately. - mycroft, 2003/09/24 */ *got = sym->st_value + (Elf_Addr)obj->relocbase; if ((Elf_Addr)(*got) == (Elf_Addr)obj->relocbase) { dbg("Warning2, i:%d maps to relocbase address:%p", i, obj->relocbase); } } else if (sym->st_info == ELF_ST_INFO(STB_GLOBAL, STT_SECTION)) { /* Symbols with index SHN_ABS are not relocated. */ if (sym->st_shndx != SHN_ABS) { *got = sym->st_value + (Elf_Addr)obj->relocbase; if ((Elf_Addr)(*got) == (Elf_Addr)obj->relocbase) { dbg("Warning3, i:%d maps to relocbase address:%p", i, obj->relocbase); } } } else { /* TODO: add cache here */ def = find_symdef(i, obj, &defobj, flags, NULL, lockstate); if (def == NULL) { dbg("Warning4, can't find symbole %d", i); return -1; } *got = def->st_value + (Elf_Addr)defobj->relocbase; if ((Elf_Addr)(*got) == (Elf_Addr)obj->relocbase) { dbg("Warning4, i:%d maps to relocbase address:%p", i, obj->relocbase); dbg("via first obj symbol %s", obj->strtab + obj->symtab[i].st_name); dbg("found in obj %p:%s", defobj, defobj->path); } } dbg(" --> now %lx", (u_long) *got); ++sym; ++got; } got = obj->pltgot; rellim = (const Elf_Rel *)((caddr_t)obj->rel + obj->relsize); for (rel = obj->rel; rel < rellim; rel++) { Elf_Word r_symndx, r_type; void *where; where = obj->relocbase + rel->r_offset; r_symndx = ELF_R_SYM(rel->r_info); r_type = ELF_R_TYPE(rel->r_info); switch (r_type & 0xff) { case R_TYPE(NONE): break; case R_TYPE(REL32): { /* 32-bit PC-relative reference */ const size_t rlen = ELF_R_NXTTYPE_64_P(r_type) ? sizeof(Elf_Sxword) : sizeof(Elf_Sword); Elf_Sxword old = load_ptr(where, rlen); Elf_Sxword val = old; def = obj->symtab + r_symndx; if (r_symndx >= obj->gotsym) { val += got[obj->local_gotno + r_symndx - obj->gotsym]; dbg("REL32/G(%p) %p --> %p (%s) in %s", where, (void *)old, (void *)val, obj->strtab + def->st_name, obj->path); } else { /* * XXX: ABI DIFFERENCE! * * Old NetBSD binutils would generate shared * libs with section-relative relocations being * already adjusted for the start address of * the section. * * New binutils, OTOH, generate shared libs * with the same relocations being based at * zero, so we need to add in the start address * of the section. * * --rkb, Oct 6, 2001 */ if (def->st_info == ELF_ST_INFO(STB_LOCAL, STT_SECTION) #ifdef SUPPORT_OLD_BROKEN_LD && !broken #endif ) val += (Elf_Addr)def->st_value; val += (Elf_Addr)obj->relocbase; dbg("REL32/L(%p) %p -> %p (%s) in %s", where, (void *)old, (void *)val, obj->strtab + def->st_name, obj->path); } store_ptr(where, val, rlen); break; } #ifdef __mips_n64 case R_TYPE(TLS_DTPMOD64): #else case R_TYPE(TLS_DTPMOD32): #endif { const size_t rlen = sizeof(Elf_Addr); Elf_Addr old = load_ptr(where, rlen); Elf_Addr val = old; def = find_symdef(r_symndx, obj, &defobj, flags, NULL, lockstate); if (def == NULL) return -1; val += (Elf_Addr)defobj->tlsindex; store_ptr(where, val, rlen); dbg("DTPMOD %s in %s %p --> %p in %s", obj->strtab + obj->symtab[r_symndx].st_name, obj->path, (void *)old, (void*)val, defobj->path); break; } #ifdef __mips_n64 case R_TYPE(TLS_DTPREL64): #else case R_TYPE(TLS_DTPREL32): #endif { const size_t rlen = sizeof(Elf_Addr); Elf_Addr old = load_ptr(where, rlen); Elf_Addr val = old; def = find_symdef(r_symndx, obj, &defobj, flags, NULL, lockstate); if (def == NULL) return -1; if (!defobj->tls_done && allocate_tls_offset(obj)) return -1; val += (Elf_Addr)def->st_value - TLS_DTP_OFFSET; store_ptr(where, val, rlen); dbg("DTPREL %s in %s %p --> %p in %s", obj->strtab + obj->symtab[r_symndx].st_name, obj->path, (void*)old, (void *)val, defobj->path); break; } #ifdef __mips_n64 case R_TYPE(TLS_TPREL64): #else case R_TYPE(TLS_TPREL32): #endif { const size_t rlen = sizeof(Elf_Addr); Elf_Addr old = load_ptr(where, rlen); Elf_Addr val = old; def = find_symdef(r_symndx, obj, &defobj, flags, NULL, lockstate); if (def == NULL) return -1; if (!defobj->tls_done && allocate_tls_offset(obj)) return -1; val += (Elf_Addr)(def->st_value + defobj->tlsoffset - TLS_TP_OFFSET - TLS_TCB_SIZE); store_ptr(where, val, rlen); dbg("TPREL %s in %s %p --> %p in %s", obj->strtab + obj->symtab[r_symndx].st_name, obj->path, (void*)old, (void *)val, defobj->path); break; } default: dbg("sym = %lu, type = %lu, offset = %p, " "contents = %p, symbol = %s", (u_long)r_symndx, (u_long)ELF_R_TYPE(rel->r_info), (void *)rel->r_offset, (void *)load_ptr(where, sizeof(Elf_Sword)), obj->strtab + obj->symtab[r_symndx].st_name); _rtld_error("%s: Unsupported relocation type %ld " "in non-PLT relocations", obj->path, (u_long) ELF_R_TYPE(rel->r_info)); return -1; } } return 0; }
/* Process the non-PLT relocations. */ int reloc_non_plt(Obj_Entry *obj, Obj_Entry *obj_rtld, int flags, RtldLockState *lockstate) { const Elf_Rel *rellim; const Elf_Rel *rel; SymCache *cache; const Elf_Sym *def; const Obj_Entry *defobj; Elf_Addr *where, symval, add; int r; r = -1; /* * The dynamic loader may be called from a thread, we have * limited amounts of stack available so we cannot use alloca(). */ if (obj != obj_rtld) { cache = calloc(obj->dynsymcount, sizeof(SymCache)); /* No need to check for NULL here */ } else cache = NULL; rellim = (const Elf_Rel *)((caddr_t) obj->rel + obj->relsize); for (rel = obj->rel; rel < rellim; rel++) { switch (ELF_R_TYPE(rel->r_info)) { case R_386_32: case R_386_PC32: case R_386_GLOB_DAT: case R_386_TLS_TPOFF: case R_386_TLS_TPOFF32: case R_386_TLS_DTPMOD32: case R_386_TLS_DTPOFF32: def = find_symdef(ELF_R_SYM(rel->r_info), obj, &defobj, flags, cache, lockstate); if (def == NULL) goto done; if (ELF_ST_TYPE(def->st_info) == STT_GNU_IFUNC) { switch (ELF_R_TYPE(rel->r_info)) { case R_386_32: case R_386_PC32: case R_386_GLOB_DAT: if ((flags & SYMLOOK_IFUNC) == 0) { obj->non_plt_gnu_ifunc = true; continue; } symval = (Elf_Addr)rtld_resolve_ifunc( defobj, def); break; case R_386_TLS_TPOFF: case R_386_TLS_TPOFF32: case R_386_TLS_DTPMOD32: case R_386_TLS_DTPOFF32: _rtld_error("%s: IFUNC for TLS reloc", obj->path); goto done; } } else { if ((flags & SYMLOOK_IFUNC) != 0) continue; symval = (Elf_Addr)defobj->relocbase + def->st_value; } break; default: if ((flags & SYMLOOK_IFUNC) != 0) continue; break; } where = (Elf_Addr *)(obj->relocbase + rel->r_offset); switch (ELF_R_TYPE(rel->r_info)) { case R_386_NONE: break; case R_386_32: *where += symval; break; case R_386_PC32: /* * I don't think the dynamic linker should ever * see this type of relocation. But the * binutils-2.6 tools sometimes generate it. */ *where += symval - (Elf_Addr)where; break; case R_386_COPY: /* * These are deferred until all other * relocations have been done. All we do here * is make sure that the COPY relocation is * not in a shared library. They are allowed * only in executable files. */ if (!obj->mainprog) { _rtld_error("%s: Unexpected R_386_COPY " "relocation in shared library", obj->path); goto done; } break; case R_386_GLOB_DAT: *where = symval; break; case R_386_RELATIVE: *where += (Elf_Addr)obj->relocbase; break; case R_386_TLS_TPOFF: case R_386_TLS_TPOFF32: /* * We lazily allocate offsets for static TLS * as we see the first relocation that * references the TLS block. This allows us to * support (small amounts of) static TLS in * dynamically loaded modules. If we run out * of space, we generate an error. */ if (!defobj->tls_done) { if (!allocate_tls_offset((Obj_Entry*) defobj)) { _rtld_error("%s: No space available " "for static Thread Local Storage", obj->path); goto done; } } add = (Elf_Addr)(def->st_value - defobj->tlsoffset); if (ELF_R_TYPE(rel->r_info) == R_386_TLS_TPOFF) *where += add; else *where -= add; break; case R_386_TLS_DTPMOD32: *where += (Elf_Addr)defobj->tlsindex; break; case R_386_TLS_DTPOFF32: *where += (Elf_Addr) def->st_value; break; default: _rtld_error("%s: Unsupported relocation type %d" " in non-PLT relocations\n", obj->path, ELF_R_TYPE(rel->r_info)); goto done; } } r = 0; done: free(cache); return (r); }
/* Process the non-PLT relocations. */ int reloc_non_plt(Obj_Entry *obj, Obj_Entry *obj_rtld, int flags, RtldLockState *lockstate) { const Elf_Rela *relalim; const Elf_Rela *rela; SymCache *cache; const Elf_Sym *def; const Obj_Entry *defobj; Elf_Addr *where, symval; Elf32_Addr *where32; int r; r = -1; /* * The dynamic loader may be called from a thread, we have * limited amounts of stack available so we cannot use alloca(). */ if (obj != obj_rtld) { cache = calloc(obj->dynsymcount, sizeof(SymCache)); /* No need to check for NULL here */ } else cache = NULL; relalim = (const Elf_Rela *)((caddr_t)obj->rela + obj->relasize); for (rela = obj->rela; rela < relalim; rela++) { /* * First, resolve symbol for relocations which * reference symbols. */ switch (ELF_R_TYPE(rela->r_info)) { case R_X86_64_64: case R_X86_64_PC32: case R_X86_64_GLOB_DAT: case R_X86_64_TPOFF64: case R_X86_64_TPOFF32: case R_X86_64_DTPMOD64: case R_X86_64_DTPOFF64: case R_X86_64_DTPOFF32: def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, flags, cache, lockstate); if (def == NULL) goto done; /* * If symbol is IFUNC, only perform relocation * when caller allowed it by passing * SYMLOOK_IFUNC flag. Skip the relocations * otherwise. * * Also error out in case IFUNC relocations * are specified for TLS, which cannot be * usefully interpreted. */ if (ELF_ST_TYPE(def->st_info) == STT_GNU_IFUNC) { switch (ELF_R_TYPE(rela->r_info)) { case R_X86_64_64: case R_X86_64_PC32: case R_X86_64_GLOB_DAT: if ((flags & SYMLOOK_IFUNC) == 0) { obj->non_plt_gnu_ifunc = true; continue; } symval = (Elf_Addr)rtld_resolve_ifunc( defobj, def); break; case R_X86_64_TPOFF64: case R_X86_64_TPOFF32: case R_X86_64_DTPMOD64: case R_X86_64_DTPOFF64: case R_X86_64_DTPOFF32: _rtld_error("%s: IFUNC for TLS reloc", obj->path); goto done; } } else { if ((flags & SYMLOOK_IFUNC) != 0) continue; symval = (Elf_Addr)defobj->relocbase + def->st_value; } break; default: if ((flags & SYMLOOK_IFUNC) != 0) continue; break; } where = (Elf_Addr *)(obj->relocbase + rela->r_offset); where32 = (Elf32_Addr *)where; switch (ELF_R_TYPE(rela->r_info)) { case R_X86_64_NONE: break; case R_X86_64_64: *where = symval + rela->r_addend; break; case R_X86_64_PC32: /* * I don't think the dynamic linker should * ever see this type of relocation. But the * binutils-2.6 tools sometimes generate it. */ *where32 = (Elf32_Addr)(unsigned long)(symval + rela->r_addend - (Elf_Addr)where); break; /* missing: R_X86_64_GOT32 R_X86_64_PLT32 */ case R_X86_64_COPY: /* * These are deferred until all other relocations have * been done. All we do here is make sure that the COPY * relocation is not in a shared library. They are * allowed only in executable files. */ if (!obj->mainprog) { _rtld_error("%s: Unexpected R_X86_64_COPY " "relocation in shared library", obj->path); goto done; } break; case R_X86_64_GLOB_DAT: *where = symval; break; case R_X86_64_TPOFF64: /* * We lazily allocate offsets for static TLS * as we see the first relocation that * references the TLS block. This allows us to * support (small amounts of) static TLS in * dynamically loaded modules. If we run out * of space, we generate an error. */ if (!defobj->tls_done) { if (!allocate_tls_offset((Obj_Entry*) defobj)) { _rtld_error("%s: No space available " "for static Thread Local Storage", obj->path); goto done; } } *where = (Elf_Addr)(def->st_value - defobj->tlsoffset + rela->r_addend); break; case R_X86_64_TPOFF32: /* * We lazily allocate offsets for static TLS * as we see the first relocation that * references the TLS block. This allows us to * support (small amounts of) static TLS in * dynamically loaded modules. If we run out * of space, we generate an error. */ if (!defobj->tls_done) { if (!allocate_tls_offset((Obj_Entry*) defobj)) { _rtld_error("%s: No space available " "for static Thread Local Storage", obj->path); goto done; } } *where32 = (Elf32_Addr)(def->st_value - defobj->tlsoffset + rela->r_addend); break; case R_X86_64_DTPMOD64: *where += (Elf_Addr)defobj->tlsindex; break; case R_X86_64_DTPOFF64: *where += (Elf_Addr)(def->st_value + rela->r_addend); break; case R_X86_64_DTPOFF32: *where32 += (Elf32_Addr)(def->st_value + rela->r_addend); break; case R_X86_64_RELATIVE: *where = (Elf_Addr)(obj->relocbase + rela->r_addend); break; /* * missing: * R_X86_64_GOTPCREL, R_X86_64_32, R_X86_64_32S, R_X86_64_16, * R_X86_64_PC16, R_X86_64_8, R_X86_64_PC8 */ default: _rtld_error("%s: Unsupported relocation type %u" " in non-PLT relocations\n", obj->path, (unsigned int)ELF_R_TYPE(rela->r_info)); goto done; } } r = 0; done: free(cache); return (r); }
/* Relocate a non-PLT object with addend. */ static int reloc_non_plt_obj(Obj_Entry *obj_rtld, Obj_Entry *obj, const Elf_Rela *rela, SymCache *cache, int flags, RtldLockState *lockstate) { struct fptr **fptrs; Elf_Addr *where = (Elf_Addr *) (obj->relocbase + rela->r_offset); switch (ELF_R_TYPE(rela->r_info)) { case R_IA_64_REL64LSB: /* * We handle rtld's relocations in rtld_start.S */ if (obj != obj_rtld) store64(where, load64(where) + (Elf_Addr) obj->relocbase); break; case R_IA_64_DIR64LSB: { const Elf_Sym *def; const Obj_Entry *defobj; Elf_Addr target; def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, flags, cache, lockstate); if (def == NULL) return -1; target = (def->st_shndx != SHN_UNDEF) ? (Elf_Addr)(defobj->relocbase + def->st_value) : 0; store64(where, target + rela->r_addend); break; } case R_IA_64_FPTR64LSB: { /* * We have to make sure that all @fptr references to * the same function are identical so that code can * compare function pointers. */ const Elf_Sym *def; const Obj_Entry *defobj; struct fptr *fptr = 0; Elf_Addr target, gp; int sym_index; def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, SYMLOOK_IN_PLT | flags, cache, lockstate); if (def == NULL) { /* * XXX r_debug_state is problematic and find_symdef() * returns NULL for it. This probably has something to * do with symbol versioning (r_debug_state is in the * symbol map). If we return -1 in that case we abort * relocating rtld, which typically is fatal. So, for * now just skip the symbol when we're relocating * rtld. We don't care about r_debug_state unless we * are being debugged. */ if (obj != obj_rtld) return -1; break; } if (def->st_shndx != SHN_UNDEF) { target = (Elf_Addr)(defobj->relocbase + def->st_value); gp = (Elf_Addr)defobj->pltgot; /* rtld is allowed to reference itself only */ assert(!obj->rtld || obj == defobj); fptrs = defobj->priv; if (fptrs == NULL) fptrs = alloc_fptrs((Obj_Entry *) defobj, obj->rtld); sym_index = def - defobj->symtab; /* * Find the @fptr, using fptrs as a helper. */ if (fptrs) fptr = fptrs[sym_index]; if (!fptr) { fptr = alloc_fptr(target, gp); if (fptrs) fptrs[sym_index] = fptr; } } else fptr = NULL; store64(where, (Elf_Addr)fptr); break; } case R_IA_64_IPLTLSB: { /* * Relocation typically used to populate C++ virtual function * tables. It creates a 128-bit function descriptor at the * specified memory address. */ const Elf_Sym *def; const Obj_Entry *defobj; struct fptr *fptr; Elf_Addr target, gp; def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, flags, cache, lockstate); if (def == NULL) return -1; if (def->st_shndx != SHN_UNDEF) { target = (Elf_Addr)(defobj->relocbase + def->st_value); gp = (Elf_Addr)defobj->pltgot; } else { target = 0; gp = 0; } fptr = (void*)where; store64(&fptr->target, target); store64(&fptr->gp, gp); break; } case R_IA_64_DTPMOD64LSB: { const Elf_Sym *def; const Obj_Entry *defobj; def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, flags, cache, lockstate); if (def == NULL) return -1; store64(where, defobj->tlsindex); break; } case R_IA_64_DTPREL64LSB: { const Elf_Sym *def; const Obj_Entry *defobj; def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, flags, cache, lockstate); if (def == NULL) return -1; store64(where, def->st_value + rela->r_addend); break; } case R_IA_64_TPREL64LSB: { const Elf_Sym *def; const Obj_Entry *defobj; def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, flags, cache, lockstate); if (def == NULL) return -1; /* * We lazily allocate offsets for static TLS as we * see the first relocation that references the * TLS block. This allows us to support (small * amounts of) static TLS in dynamically loaded * modules. If we run out of space, we generate an * error. */ if (!defobj->tls_done) { if (!allocate_tls_offset((Obj_Entry*) defobj)) { _rtld_error("%s: No space available for static " "Thread Local Storage", obj->path); return -1; } } store64(where, defobj->tlsoffset + def->st_value + rela->r_addend); break; } case R_IA_64_NONE: break; default: _rtld_error("%s: Unsupported relocation type %u" " in non-PLT relocations\n", obj->path, (unsigned int)ELF_R_TYPE(rela->r_info)); return -1; } return(0); }