/* Process one elf relocation with addend. */ static int elf_reloc_internal(linker_file_t lf, Elf_Addr relocbase, const void *data, int type, int local, elf_lookup_fn lookup) { Elf32_Addr *where = (Elf32_Addr *)NULL; Elf_Addr addr; Elf_Addr addend = (Elf_Addr)0; Elf_Word rtype = (Elf_Word)0, symidx; const Elf_Rel *rel = NULL; const Elf_Rela *rela = NULL; int error; /* * Stash R_MIPS_HI16 info so we can use it when processing R_MIPS_LO16 */ static Elf_Addr ahl; static Elf32_Addr *where_hi16; switch (type) { case ELF_RELOC_REL: rel = (const Elf_Rel *)data; where = (Elf32_Addr *) (relocbase + rel->r_offset); rtype = ELF_R_TYPE(rel->r_info); symidx = ELF_R_SYM(rel->r_info); switch (rtype) { case R_MIPS_64: addend = *(Elf64_Addr *)where; break; default: addend = *where; break; } break; case ELF_RELOC_RELA: rela = (const Elf_Rela *)data; where = (Elf32_Addr *) (relocbase + rela->r_offset); addend = rela->r_addend; rtype = ELF_R_TYPE(rela->r_info); symidx = ELF_R_SYM(rela->r_info); break; default: panic("unknown reloc type %d\n", type); } switch (rtype) { case R_MIPS_NONE: /* none */ break; case R_MIPS_32: /* S + A */ error = lookup(lf, symidx, 1, &addr); if (error != 0) return (-1); addr += addend; if (*where != addr) *where = (Elf32_Addr)addr; break; case R_MIPS_26: /* ((A << 2) | (P & 0xf0000000) + S) >> 2 */ error = lookup(lf, symidx, 1, &addr); if (error != 0) return (-1); addend &= 0x03ffffff; /* * Addendum for .rela R_MIPS_26 is not shifted right */ if (rela == NULL) addend <<= 2; addr += ((Elf_Addr)where & 0xf0000000) | addend; addr >>= 2; *where &= ~0x03ffffff; *where |= addr & 0x03ffffff; break; case R_MIPS_64: /* S + A */ error = lookup(lf, symidx, 1, &addr); if (error != 0) return (-1); addr += addend; if (*(Elf64_Addr*)where != addr) *(Elf64_Addr*)where = addr; break; case R_MIPS_HI16: /* ((AHL + S) - ((short)(AHL + S)) >> 16 */ if (rela != NULL) { error = lookup(lf, symidx, 1, &addr); if (error != 0) return (-1); addr += addend; *where &= 0xffff0000; *where |= ((((long long) addr + 0x8000LL) >> 16) & 0xffff); } else {
/* * Apply a single intra-module relocation to the data. `relbase' is the * target relocation base for the section (i.e. it corresponds to where * r_offset == 0). `dataaddr' is the relocated address corresponding to * the start of the data, and `len' is the number of bytes. */ int __elfN(reloc)(struct elf_file *ef, symaddr_fn *symaddr, const void *reldata, int reltype, Elf_Addr relbase, Elf_Addr dataaddr, void *data, size_t len) { #ifdef __sparc__ Elf_Size w; const Elf_Rela *a; switch (reltype) { case ELF_RELOC_RELA: a = reldata; if (relbase + a->r_offset >= dataaddr && relbase + a->r_offset < dataaddr + len) { switch (ELF_R_TYPE(a->r_info)) { case R_SPARC_RELATIVE: w = relbase + a->r_addend; bcopy(&w, (u_char *)data + (relbase + a->r_offset - dataaddr), sizeof(w)); break; default: printf("\nunhandled relocation type %u\n", (u_int)ELF_R_TYPE(a->r_info)); return (EFTYPE); } } break; } return (0); #elif (defined(__i386__) || defined(__amd64__)) && __ELF_WORD_SIZE == 64 Elf64_Addr *where, val; Elf_Addr addend, addr; Elf_Size rtype, symidx; const Elf_Rel *rel; const Elf_Rela *rela; switch (reltype) { case ELF_RELOC_REL: rel = (const Elf_Rel *)reldata; where = (Elf_Addr *)((char *)data + relbase + rel->r_offset - dataaddr); addend = 0; rtype = ELF_R_TYPE(rel->r_info); symidx = ELF_R_SYM(rel->r_info); addend = 0; break; case ELF_RELOC_RELA: rela = (const Elf_Rela *)reldata; where = (Elf_Addr *)((char *)data + relbase + rela->r_offset - dataaddr); addend = rela->r_addend; rtype = ELF_R_TYPE(rela->r_info); symidx = ELF_R_SYM(rela->r_info); break; default: return (EINVAL); } if ((char *)where < (char *)data || (char *)where >= (char *)data + len) return (0); if (reltype == ELF_RELOC_REL) addend = *where; /* XXX, definitions not available on i386. */ #define R_X86_64_64 1 #define R_X86_64_RELATIVE 8 switch (rtype) { case R_X86_64_64: /* S + A */ addr = symaddr(ef, symidx); if (addr == 0) return (ESRCH); val = addr + addend; *where = val; break; case R_X86_64_RELATIVE: addr = (Elf_Addr)addend + relbase; val = addr; *where = val; break; default: printf("\nunhandled relocation type %u\n", (u_int)rtype); return (EFTYPE); } return (0); #elif defined(__i386__) && __ELF_WORD_SIZE == 32 Elf_Addr addend, addr, *where, val; Elf_Size rtype, symidx; const Elf_Rel *rel; const Elf_Rela *rela; switch (reltype) { case ELF_RELOC_REL: rel = (const Elf_Rel *)reldata; where = (Elf_Addr *)((char *)data + relbase + rel->r_offset - dataaddr); addend = 0; rtype = ELF_R_TYPE(rel->r_info); symidx = ELF_R_SYM(rel->r_info); addend = 0; break; case ELF_RELOC_RELA: rela = (const Elf_Rela *)reldata; where = (Elf_Addr *)((char *)data + relbase + rela->r_offset - dataaddr); addend = rela->r_addend; rtype = ELF_R_TYPE(rela->r_info); symidx = ELF_R_SYM(rela->r_info); break; default: return (EINVAL); } if ((char *)where < (char *)data || (char *)where >= (char *)data + len) return (0); if (reltype == ELF_RELOC_REL) addend = *where; /* XXX, definitions not available on amd64. */ #define R_386_32 1 /* Add symbol value. */ #define R_386_GLOB_DAT 6 /* Set GOT entry to data address. */ #define R_386_RELATIVE 8 /* Add load address of shared object. */ switch (rtype) { case R_386_RELATIVE: addr = addend + relbase; *where = addr; break; case R_386_32: /* S + A */ addr = symaddr(ef, symidx); if (addr == 0) return (ESRCH); val = addr + addend; *where = val; break; default: printf("\nunhandled relocation type %u\n", (u_int)rtype); return (EFTYPE); } return (0); #elif defined(__powerpc__) Elf_Size w; const Elf_Rela *rela; switch (reltype) { case ELF_RELOC_RELA: rela = reldata; if (relbase + rela->r_offset >= dataaddr && relbase + rela->r_offset < dataaddr + len) { switch (ELF_R_TYPE(rela->r_info)) { case R_PPC_RELATIVE: w = relbase + rela->r_addend; bcopy(&w, (u_char *)data + (relbase + rela->r_offset - dataaddr), sizeof(w)); break; default: printf("\nunhandled relocation type %u\n", (u_int)ELF_R_TYPE(rela->r_info)); return (EFTYPE); } } break; } return (0); #else return (EOPNOTSUPP); #endif }
STATIC BOOLEAN WriteSections64 ( SECTION_FILTER_TYPES FilterType ) { UINT32 Idx; Elf_Shdr *SecShdr; UINT32 SecOffset; BOOLEAN (*Filter)(Elf_Shdr *); // // Initialize filter pointer // switch (FilterType) { case SECTION_TEXT: Filter = IsTextShdr; break; case SECTION_HII: Filter = IsHiiRsrcShdr; break; case SECTION_DATA: Filter = IsDataShdr; break; default: return FALSE; } // // First: copy sections. // for (Idx = 0; Idx < mEhdr->e_shnum; Idx++) { Elf_Shdr *Shdr = GetShdrByIndex(Idx); if ((*Filter)(Shdr)) { switch (Shdr->sh_type) { case SHT_PROGBITS: /* Copy. */ memcpy(mCoffFile + mCoffSectionsOffset[Idx], (UINT8*)mEhdr + Shdr->sh_offset, (size_t) Shdr->sh_size); break; case SHT_NOBITS: memset(mCoffFile + mCoffSectionsOffset[Idx], 0, (size_t) Shdr->sh_size); break; default: // // Ignore for unkown section type. // VerboseMsg ("%s unknown section type %x. We directly copy this section into Coff file", mInImageName, (unsigned)Shdr->sh_type); break; } } } // // Second: apply relocations. // VerboseMsg ("Applying Relocations..."); for (Idx = 0; Idx < mEhdr->e_shnum; Idx++) { // // Determine if this is a relocation section. // Elf_Shdr *RelShdr = GetShdrByIndex(Idx); if ((RelShdr->sh_type != SHT_REL) && (RelShdr->sh_type != SHT_RELA)) { continue; } // // Relocation section found. Now extract section information that the relocations // apply to in the ELF data and the new COFF data. // SecShdr = GetShdrByIndex(RelShdr->sh_info); SecOffset = mCoffSectionsOffset[RelShdr->sh_info]; // // Only process relocations for the current filter type. // if (RelShdr->sh_type == SHT_RELA && (*Filter)(SecShdr)) { UINT64 RelIdx; // // Determine the symbol table referenced by the relocation data. // Elf_Shdr *SymtabShdr = GetShdrByIndex(RelShdr->sh_link); UINT8 *Symtab = (UINT8*)mEhdr + SymtabShdr->sh_offset; // // Process all relocation entries for this section. // for (RelIdx = 0; RelIdx < RelShdr->sh_size; RelIdx += (UINT32) RelShdr->sh_entsize) { // // Set pointer to relocation entry // Elf_Rela *Rel = (Elf_Rela *)((UINT8*)mEhdr + RelShdr->sh_offset + RelIdx); // // Set pointer to symbol table entry associated with the relocation entry. // Elf_Sym *Sym = (Elf_Sym *)(Symtab + ELF_R_SYM(Rel->r_info) * SymtabShdr->sh_entsize); Elf_Shdr *SymShdr; UINT8 *Targ; // // Check section header index found in symbol table and get the section // header location. // if (Sym->st_shndx == SHN_UNDEF || Sym->st_shndx == SHN_ABS || Sym->st_shndx > mEhdr->e_shnum) { Error (NULL, 0, 3000, "Invalid", "%s bad symbol definition.", mInImageName); } SymShdr = GetShdrByIndex(Sym->st_shndx); // // Convert the relocation data to a pointer into the coff file. // // Note: // r_offset is the virtual address of the storage unit to be relocated. // sh_addr is the virtual address for the base of the section. // // r_offset in a memory address. // Convert it to a pointer in the coff file. // Targ = mCoffFile + SecOffset + (Rel->r_offset - SecShdr->sh_addr); // // Determine how to handle each relocation type based on the machine type. // if (mEhdr->e_machine == EM_X86_64) { switch (ELF_R_TYPE(Rel->r_info)) { case R_X86_64_NONE: break; case R_X86_64_64: // // Absolute relocation. // VerboseMsg ("R_X86_64_64"); VerboseMsg ("Offset: 0x%08X, Addend: 0x%016LX", (UINT32)(SecOffset + (Rel->r_offset - SecShdr->sh_addr)), *(UINT64 *)Targ); *(UINT64 *)Targ = *(UINT64 *)Targ - SymShdr->sh_addr + mCoffSectionsOffset[Sym->st_shndx]; VerboseMsg ("Relocation: 0x%016LX", *(UINT64*)Targ); break; case R_X86_64_32: VerboseMsg ("R_X86_64_32"); VerboseMsg ("Offset: 0x%08X, Addend: 0x%08X", (UINT32)(SecOffset + (Rel->r_offset - SecShdr->sh_addr)), *(UINT32 *)Targ); *(UINT32 *)Targ = (UINT32)((UINT64)(*(UINT32 *)Targ) - SymShdr->sh_addr + mCoffSectionsOffset[Sym->st_shndx]); VerboseMsg ("Relocation: 0x%08X", *(UINT32*)Targ); break; case R_X86_64_32S: VerboseMsg ("R_X86_64_32S"); VerboseMsg ("Offset: 0x%08X, Addend: 0x%08X", (UINT32)(SecOffset + (Rel->r_offset - SecShdr->sh_addr)), *(UINT32 *)Targ); *(INT32 *)Targ = (INT32)((INT64)(*(INT32 *)Targ) - SymShdr->sh_addr + mCoffSectionsOffset[Sym->st_shndx]); VerboseMsg ("Relocation: 0x%08X", *(UINT32*)Targ); break; case R_X86_64_PC32: // // Relative relocation: Symbol - Ip + Addend // VerboseMsg ("R_X86_64_PC32"); VerboseMsg ("Offset: 0x%08X, Addend: 0x%08X", (UINT32)(SecOffset + (Rel->r_offset - SecShdr->sh_addr)), *(UINT32 *)Targ); *(UINT32 *)Targ = (UINT32) (*(UINT32 *)Targ + (mCoffSectionsOffset[Sym->st_shndx] - SymShdr->sh_addr) - (SecOffset - SecShdr->sh_addr)); VerboseMsg ("Relocation: 0x%08X", *(UINT32 *)Targ); break; default: Error (NULL, 0, 3000, "Invalid", "%s unsupported ELF EM_X86_64 relocation 0x%x.", mInImageName, (unsigned) ELF_R_TYPE(Rel->r_info)); } } else if (mEhdr->e_machine == EM_AARCH64) { switch (ELF_R_TYPE(Rel->r_info)) { case R_AARCH64_ADR_PREL_PG_HI21: case R_AARCH64_ADD_ABS_LO12_NC: case R_AARCH64_LDST8_ABS_LO12_NC: case R_AARCH64_LDST16_ABS_LO12_NC: case R_AARCH64_LDST32_ABS_LO12_NC: case R_AARCH64_LDST64_ABS_LO12_NC: case R_AARCH64_LDST128_ABS_LO12_NC: // // AArch64 PG_H21 relocations are typically paired with ABS_LO12 // relocations, where a PC-relative reference with +/- 4 GB range is // split into a relative high part and an absolute low part. Since // the absolute low part represents the offset into a 4 KB page, we // have to make sure that the 4 KB relative offsets of both the // section containing the reference as well as the section to which // it refers have not been changed during PE/COFF conversion (i.e., // in ScanSections64() above). // if (((SecShdr->sh_addr ^ SecOffset) & 0xfff) != 0 || ((SymShdr->sh_addr ^ mCoffSectionsOffset[Sym->st_shndx]) & 0xfff) != 0 || mCoffAlignment < 0x1000) { Error (NULL, 0, 3000, "Invalid", "WriteSections64(): %s AARCH64 small code model requires 4 KB section alignment.", mInImageName); break; } /* fall through */ case R_AARCH64_ADR_PREL_LO21: case R_AARCH64_CONDBR19: case R_AARCH64_LD_PREL_LO19: case R_AARCH64_CALL26: case R_AARCH64_JUMP26: // // The GCC toolchains (i.e., binutils) may corrupt section relative // relocations when emitting relocation sections into fully linked // binaries. More specifically, they tend to fail to take into // account the fact that a '.rodata + XXX' relocation needs to have // its addend recalculated once .rodata is merged into the .text // section, and the relocation emitted into the .rela.text section. // // We cannot really recover from this loss of information, so the // only workaround is to prevent having to recalculate any relative // relocations at all, by using a linker script that ensures that // the offset between the Place and the Symbol is the same in both // the ELF and the PE/COFF versions of the binary. // if ((SymShdr->sh_addr - SecShdr->sh_addr) != (mCoffSectionsOffset[Sym->st_shndx] - SecOffset)) { Error (NULL, 0, 3000, "Invalid", "WriteSections64(): %s AARCH64 relative relocations require identical ELF and PE/COFF section offsets", mInImageName); } break; // Absolute relocations. case R_AARCH64_ABS64: *(UINT64 *)Targ = *(UINT64 *)Targ - SymShdr->sh_addr + mCoffSectionsOffset[Sym->st_shndx]; break; default: Error (NULL, 0, 3000, "Invalid", "WriteSections64(): %s unsupported ELF EM_AARCH64 relocation 0x%x.", mInImageName, (unsigned) ELF_R_TYPE(Rel->r_info)); } } else { Error (NULL, 0, 3000, "Invalid", "Not a supported machine type"); } } } } return TRUE; }
/* Process one elf relocation with addend. */ static int elf_reloc_internal(linker_file_t lf, Elf_Addr relocbase, const void *data, int type, int local, elf_lookup_fn lookup) { Elf64_Addr *where, val; Elf32_Addr *where32, val32; Elf_Addr addr; Elf_Addr addend; Elf_Size rtype, symidx; const Elf_Rel *rel; const Elf_Rela *rela; switch (type) { case ELF_RELOC_REL: rel = (const Elf_Rel *)data; where = (Elf_Addr *) (relocbase + rel->r_offset); rtype = ELF_R_TYPE(rel->r_info); symidx = ELF_R_SYM(rel->r_info); /* Addend is 32 bit on 32 bit relocs */ switch (rtype) { case R_X86_64_PC32: case R_X86_64_32: case R_X86_64_32S: addend = *(Elf32_Addr *)where; break; default: addend = *where; break; } break; case ELF_RELOC_RELA: rela = (const Elf_Rela *)data; where = (Elf_Addr *) (relocbase + rela->r_offset); addend = rela->r_addend; rtype = ELF_R_TYPE(rela->r_info); symidx = ELF_R_SYM(rela->r_info); break; default: panic("unknown reloc type %d\n", type); } switch (rtype) { case R_X86_64_NONE: /* none */ break; case R_X86_64_64: /* S + A */ if (lookup(lf, symidx, 1, &addr)) return -1; val = addr + addend; if (*where != val) *where = val; break; case R_X86_64_PC32: /* S + A - P */ if (lookup(lf, symidx, 1, &addr)) return -1; where32 = (Elf32_Addr *)where; val32 = (Elf32_Addr)(addr + addend - (Elf_Addr)where); if (*where32 != val32) *where32 = val32; break; case R_X86_64_32: /* S + A zero extend */ case R_X86_64_32S: /* S + A sign extend */ if (lookup(lf, symidx, 1, &addr)) return -1; val32 = (Elf32_Addr)(addr + addend); where32 = (Elf32_Addr *)where; if (*where32 != val32) *where32 = val32; break; case R_X86_64_COPY: /* none */ /* * There shouldn't be copy relocations in kernel * objects. */ kprintf("kldload: unexpected R_COPY relocation\n"); return -1; break; case R_X86_64_GLOB_DAT: /* S */ case R_X86_64_JMP_SLOT: /* XXX need addend + offset */ if (lookup(lf, symidx, 1, &addr)) return -1; if (*where != addr) *where = addr; break; case R_X86_64_RELATIVE: /* B + A */ addr = relocbase + addend; val = addr; if (*where != val) *where = val; break; default: kprintf("kldload: unexpected relocation type %ld\n", rtype); return -1; } return(0); }
bool ElfRelocations::ApplyRelaReloc(const ELF::Rela* rela, ELF::Addr sym_addr, bool resolved CRAZY_UNUSED, Error* error) { const ELF::Word rela_type = ELF_R_TYPE(rela->r_info); const ELF::Word CRAZY_UNUSED rela_symbol = ELF_R_SYM(rela->r_info); const ELF::Sword CRAZY_UNUSED addend = rela->r_addend; const ELF::Addr reloc = static_cast<ELF::Addr>(rela->r_offset + load_bias_); RLOG(" rela reloc=%p offset=%p type=%d addend=%p\n", reloc, rela->r_offset, rela_type, addend); // Apply the relocation. ELF::Addr* CRAZY_UNUSED target = reinterpret_cast<ELF::Addr*>(reloc); switch (rela_type) { #ifdef __aarch64__ case R_AARCH64_JUMP_SLOT: RLOG(" R_AARCH64_JUMP_SLOT target=%p addr=%p\n", target, sym_addr + addend); *target = sym_addr + addend; break; case R_AARCH64_GLOB_DAT: RLOG(" R_AARCH64_GLOB_DAT target=%p addr=%p\n", target, sym_addr + addend); *target = sym_addr + addend; break; case R_AARCH64_ABS64: RLOG(" R_AARCH64_ABS64 target=%p (%p) addr=%p\n", target, *target, sym_addr + addend); *target += sym_addr + addend; break; case R_AARCH64_RELATIVE: RLOG(" R_AARCH64_RELATIVE target=%p (%p) bias=%p\n", target, *target, load_bias_ + addend); if (__builtin_expect(rela_symbol, 0)) { *error = "Invalid relative relocation with symbol"; return false; } *target = load_bias_ + addend; break; case R_AARCH64_COPY: // NOTE: These relocations are forbidden in shared libraries. RLOG(" R_AARCH64_COPY\n"); *error = "Invalid R_AARCH64_COPY relocation in shared library"; return false; #endif // __aarch64__ default: error->Format("Invalid relocation type (%d)", rela_type); return false; } return true; }
int _rtld_relocate_nonplt_objects(Obj_Entry *obj) { const Elf_Rela *rela; for (rela = obj->rela; rela < obj->relalim; rela++) { Elf_Addr *where; const Elf_Sym *def; const Obj_Entry *defobj; Elf_Addr tmp; unsigned long symnum; where = (Elf_Addr *)(obj->relocbase + rela->r_offset); symnum = ELF_R_SYM(rela->r_info); switch (ELF_R_TYPE(rela->r_info)) { case R_TYPE(NONE): break; case R_TYPE(32): /* word32 S + A */ case R_TYPE(GLOB_DAT): /* word32 S + A */ def = _rtld_find_symdef(symnum, obj, &defobj, false); if (def == NULL) return -1; tmp = (Elf_Addr)(defobj->relocbase + def->st_value + rela->r_addend); if (*where != tmp) *where = tmp; rdbg(("32/GLOB_DAT %s in %s --> %p in %s", obj->strtab + obj->symtab[symnum].st_name, obj->path, (void *)*where, defobj->path)); break; case R_TYPE(RELATIVE): /* word32 B + A */ tmp = (Elf_Addr)(obj->relocbase + rela->r_addend); if (*where != tmp) *where = tmp; rdbg(("RELATIVE in %s --> %p", obj->path, (void *)*where)); break; case R_TYPE(COPY): /* * These are deferred until all other relocations have * been done. All we do here is make sure that the * COPY relocation is not in a shared library. They * are allowed only in executable files. */ if (obj->isdynamic) { _rtld_error( "%s: Unexpected R_COPY relocation in shared library", obj->path); return -1; } rdbg(("COPY (avoid in main)")); break; default: rdbg(("sym = %lu, type = %lu, offset = %p, " "addend = %p, contents = %p, symbol = %s", symnum, (u_long)ELF_R_TYPE(rela->r_info), (void *)rela->r_offset, (void *)rela->r_addend, (void *)*where, obj->strtab + obj->symtab[symnum].st_name)); _rtld_error("%s: Unsupported relocation type %ld " "in non-PLT relocations", obj->path, (u_long) ELF_R_TYPE(rela->r_info)); return -1; } } return 0; }
void _rtld_relocate_nonplt_self(Elf_Dyn *dynp, Elf_Addr relocbase) { const Elf_Rel *rel = NULL, *rellim; Elf_Addr relsz = 0; const Elf_Sym *symtab = NULL, *sym; Elf_Addr *where; Elf_Addr *got = NULL; Elf_Word local_gotno = 0, symtabno = 0, gotsym = 0; size_t i; for (; dynp->d_tag != DT_NULL; dynp++) { switch (dynp->d_tag) { case DT_REL: rel = (const Elf_Rel *)(relocbase + dynp->d_un.d_ptr); break; case DT_RELSZ: relsz = dynp->d_un.d_val; break; case DT_SYMTAB: symtab = (const Elf_Sym *)(relocbase + dynp->d_un.d_ptr); break; case DT_PLTGOT: got = (Elf_Addr *)(relocbase + dynp->d_un.d_ptr); break; case DT_MIPS_LOCAL_GOTNO: local_gotno = dynp->d_un.d_val; break; case DT_MIPS_SYMTABNO: symtabno = dynp->d_un.d_val; break; case DT_MIPS_GOTSYM: gotsym = dynp->d_un.d_val; break; } } i = GOT1_RESERVED_FOR_RTLD(got) ? 2 : 1; /* Relocate the local GOT entries */ got += i; for (; i < local_gotno; i++) { *got++ += relocbase; } sym = symtab + gotsym; /* Now do the global GOT entries */ for (i = gotsym; i < symtabno; i++) { *got = sym->st_value + relocbase; ++sym; ++got; } rellim = (const Elf_Rel *)((const char *)rel + relsz); for (; rel < rellim; rel++) { Elf_Word r_symndx, r_type; where = (void *)(relocbase + rel->r_offset); r_symndx = ELF_R_SYM(rel->r_info); r_type = ELF_R_TYPE(rel->r_info); switch (r_type & 0xff) { case R_TYPE(REL32): { const size_t rlen = ELF_R_NXTTYPE_64_P(r_type) ? sizeof(Elf_Sxword) : sizeof(Elf_Sword); Elf_Sxword old = load_ptr(where, rlen); Elf_Sxword val = old; #ifdef __mips_n64 assert(r_type == R_TYPE(REL32) || r_type == (R_TYPE(REL32)|(R_TYPE(64) << 8))); #endif assert(r_symndx < gotsym); sym = symtab + r_symndx; assert(ELF_ST_BIND(sym->st_info) == STB_LOCAL); val += relocbase; dbg("REL32/L(%p) %p -> %p in <self>", where, (void *)old, (void *)val); store_ptr(where, val, rlen); break; } case R_TYPE(GPREL32): case R_TYPE(NONE): break; default: abort(); break; } } }
int _rtld_relocate_nonplt_objects(Obj_Entry *obj) { const Elf_Rel *rel; Elf_Addr *got = obj->pltgot; const Elf_Sym *sym, *def; const Obj_Entry *defobj; Elf_Word i; #ifdef SUPPORT_OLD_BROKEN_LD int broken; #endif #ifdef SUPPORT_OLD_BROKEN_LD broken = 0; sym = obj->symtab; for (i = 1; i < 12; i++) if (sym[i].st_info == ELF_ST_INFO(STB_LOCAL, STT_NOTYPE)) broken = 1; dbg(("%s: broken=%d", obj->path, broken)); #endif i = (got[1] & 0x80000000) ? 2 : 1; /* Relocate the local GOT entries */ got += i; for (; i < obj->local_gotno; i++) *got++ += (Elf_Addr)obj->relocbase; sym = obj->symtab + obj->gotsym; /* Now do the global GOT entries */ for (i = obj->gotsym; i < obj->symtabno; i++) { rdbg((" doing got %d sym %p (%s, %lx)", i - obj->gotsym, sym, sym->st_name + obj->strtab, (u_long) *got)); #ifdef SUPPORT_OLD_BROKEN_LD if (ELF_ST_TYPE(sym->st_info) == STT_FUNC && broken && sym->st_shndx == SHN_UNDEF) { /* * XXX DANGER WILL ROBINSON! * You might think this is stupid, as it intentionally * defeats lazy binding -- and you'd be right. * Unfortunately, for lazy binding to work right, we * need to a way to force the GOT slots used for * function pointers to be resolved immediately. This * is supposed to be done automatically by the linker, * by not outputting a PLT slot and setting st_value * to 0 if there are non-PLT references, but older * versions of GNU ld do not do this. */ def = _rtld_find_symdef(i, obj, &defobj, false); if (def == NULL) return -1; *got = def->st_value + (Elf_Addr)defobj->relocbase; } else #endif if (ELF_ST_TYPE(sym->st_info) == STT_FUNC && sym->st_value != 0 && sym->st_shndx == SHN_UNDEF) { /* * If there are non-PLT references to the function, * st_value should be 0, forcing us to resolve the * address immediately. * * XXX DANGER WILL ROBINSON! * The linker is not outputting PLT slots for calls to * functions that are defined in the same shared * library. This is a bug, because it can screw up * link ordering rules if the symbol is defined in * more than one module. For now, if there is a * definition, we fail the test above and force a full * symbol lookup. This means that all intra-module * calls are bound immediately. - mycroft, 2003/09/24 */ *got = sym->st_value + (Elf_Addr)obj->relocbase; } else if (sym->st_info == ELF_ST_INFO(STB_GLOBAL, STT_SECTION)) { /* Symbols with index SHN_ABS are not relocated. */ if (sym->st_shndx != SHN_ABS) *got = sym->st_value + (Elf_Addr)obj->relocbase; } else { def = _rtld_find_symdef(i, obj, &defobj, false); if (def == NULL) return -1; *got = def->st_value + (Elf_Addr)defobj->relocbase; } rdbg((" --> now %lx", (u_long) *got)); ++sym; ++got; } got = obj->pltgot; for (rel = obj->rel; rel < obj->rellim; rel++) { Elf_Word r_symndx, r_type; void *where; where = obj->relocbase + rel->r_offset; r_symndx = ELF_R_SYM(rel->r_info); r_type = ELF_R_TYPE(rel->r_info); switch (r_type & 0xff) { case R_TYPE(NONE): break; case R_TYPE(REL32): { /* 32-bit PC-relative reference */ const size_t rlen = ELF_R_NXTTYPE_64_P(r_type) ? sizeof(Elf_Sxword) : sizeof(Elf_Sword); Elf_Sxword old = load_ptr(where, rlen); Elf_Sxword val = old; def = obj->symtab + r_symndx; if (r_symndx >= obj->gotsym) { val += got[obj->local_gotno + r_symndx - obj->gotsym]; rdbg(("REL32/G(%p) %p --> %p (%s) in %s", where, (void *)old, (void *)val, obj->strtab + def->st_name, obj->path)); } else { /* * XXX: ABI DIFFERENCE! * * Old NetBSD binutils would generate shared * libs with section-relative relocations being * already adjusted for the start address of * the section. * * New binutils, OTOH, generate shared libs * with the same relocations being based at * zero, so we need to add in the start address * of the section. * * --rkb, Oct 6, 2001 */ if (def->st_info == ELF_ST_INFO(STB_LOCAL, STT_SECTION) #ifdef SUPPORT_OLD_BROKEN_LD && !broken #endif ) val += (Elf_Addr)def->st_value; val += (Elf_Addr)obj->relocbase; rdbg(("REL32/L(%p) %p -> %p (%s) in %s", where, (void *)old, (void *)val, obj->strtab + def->st_name, obj->path)); } store_ptr(where, val, rlen); break; } #if ELFSIZE == 64 case R_TYPE(TLS_DTPMOD64): #else case R_TYPE(TLS_DTPMOD32): #endif { Elf_Addr old = load_ptr(where, ELFSIZE / 8); Elf_Addr val = old; def = _rtld_find_symdef(r_symndx, obj, &defobj, false); if (def == NULL) return -1; val += (Elf_Addr)defobj->tlsindex; store_ptr(where, val, ELFSIZE / 8); rdbg(("DTPMOD %s in %s --> %p in %s", obj->strtab + obj->symtab[r_symndx].st_name, obj->path, (void *)old, defobj->path)); break; } #if ELFSIZE == 64 case R_TYPE(TLS_DTPREL64): #else case R_TYPE(TLS_DTPREL32): #endif { Elf_Addr old = load_ptr(where, ELFSIZE / 8); Elf_Addr val = old; def = _rtld_find_symdef(r_symndx, obj, &defobj, false); if (def == NULL) return -1; if (!defobj->tls_done && _rtld_tls_offset_allocate(obj)) return -1; val += (Elf_Addr)def->st_value - TLS_DTV_OFFSET; store_ptr(where, val, ELFSIZE / 8); rdbg(("DTPREL %s in %s --> %p in %s", obj->strtab + obj->symtab[r_symndx].st_name, obj->path, (void *)old, defobj->path)); break; } #if ELFSIZE == 64 case R_TYPE(TLS_TPREL64): #else case R_TYPE(TLS_TPREL32): #endif { Elf_Addr old = load_ptr(where, ELFSIZE / 8); Elf_Addr val = old; def = _rtld_find_symdef(r_symndx, obj, &defobj, false); if (def == NULL) return -1; if (!defobj->tls_done && _rtld_tls_offset_allocate(obj)) return -1; val += (Elf_Addr)(def->st_value + defobj->tlsoffset - TLS_TP_OFFSET); store_ptr(where, val, ELFSIZE / 8); rdbg(("TPREL %s in %s --> %p in %s", obj->strtab + obj->symtab[r_symndx].st_name, obj->path, where, defobj->path)); break; } default: rdbg(("sym = %lu, type = %lu, offset = %p, " "contents = %p, symbol = %s", (u_long)r_symndx, (u_long)ELF_R_TYPE(rel->r_info), (void *)rel->r_offset, (void *)load_ptr(where, sizeof(Elf_Sword)), obj->strtab + obj->symtab[r_symndx].st_name)); _rtld_error("%s: Unsupported relocation type %ld " "in non-PLT relocations", obj->path, (u_long) ELF_R_TYPE(rel->r_info)); return -1; } } return 0; }
/* * extract an ELF REL table * - need to canonicalise the entries in case section addition/removal has * rearranged the symbol table and the section table */ static int extract_elf_rel(struct module_verify_data *mvdata, int secix, const Elf_Rel *reltab, size_t nrels, const char *sh_name) { struct { #if defined(MODULES_ARE_ELF32) uint32_t r_offset; uint32_t st_value; uint32_t st_size; uint16_t st_shndx; uint8_t r_type; uint8_t st_info; uint8_t st_other; #elif defined(MODULES_ARE_ELF64) uint64_t r_offset; uint64_t st_value; uint64_t st_size; uint32_t r_type; uint16_t st_shndx; uint8_t st_info; uint8_t st_other; #else #error unsupported module type #endif } __attribute__((packed)) relocation; const Elf_Rel *reloc; const Elf_Sym *symbol; size_t loop; /* contribute the relevant bits from a join of { RELA, SYMBOL, SECTION } */ for (loop = 0; loop < nrels; loop++) { int st_shndx; reloc = &reltab[loop]; /* decode the relocation */ relocation.r_offset = reloc->r_offset; relocation.r_type = ELF_R_TYPE(reloc->r_info); /* decode the symbol referenced by the relocation */ symbol = &mvdata->symbols[ELF_R_SYM(reloc->r_info)]; relocation.st_info = symbol->st_info; relocation.st_other = symbol->st_other; relocation.st_value = symbol->st_value; relocation.st_size = symbol->st_size; relocation.st_shndx = symbol->st_shndx; st_shndx = symbol->st_shndx; /* canonicalise the section used by the symbol */ if (st_shndx > SHN_UNDEF && st_shndx < mvdata->nsects) relocation.st_shndx = mvdata->canonmap[st_shndx]; crypto_digest_update_val(mvdata, relocation); /* undefined symbols must be named if referenced */ if (st_shndx == SHN_UNDEF) { const char *name = mvdata->strings + symbol->st_name; crypto_digest_update_data(mvdata, name, strlen(name) + 1); } } _debug("%08zx %02x digested the %s section, nrels %zu\n", mvdata->signed_size, mvdata->csum, sh_name, nrels); return 0; }
int kobj_reloc(kobj_t ko, uintptr_t relocbase, const void *data, bool isrela, bool local) { Elf_Addr *where; Elf_Addr addr; Elf_Addr addend; uintptr_t rtype, symidx; const Elf_Rel *rel; const Elf_Rela *rela; if (isrela) { rela = (const Elf_Rela *)data; where = (Elf_Addr *) (relocbase + rela->r_offset); addend = rela->r_addend; rtype = ELF_R_TYPE(rela->r_info); symidx = ELF_R_SYM(rela->r_info); } else { rel = (const Elf_Rel *)data; where = (Elf_Addr *) (relocbase + rel->r_offset); addend = *where; rtype = ELF_R_TYPE(rel->r_info); symidx = ELF_R_SYM(rel->r_info); } switch (rtype) { case R_ALPHA_NONE: break; case R_ALPHA_REFQUAD: addr = kobj_sym_lookup(ko, symidx); if (addr == 0) return -1; addr += addend; if (*where != addr) *where = addr; break; case R_ALPHA_GLOB_DAT: addr = kobj_sym_lookup(ko, symidx); if (addr == 0) return -1; addr += addend; if (*where != addr) *where = addr; break; case R_ALPHA_JMP_SLOT: /* No point in lazy binding for kernel modules. */ addr = kobj_sym_lookup(ko, symidx); if (addr == 0) return -1; if (*where != addr) *where = addr; break; case R_ALPHA_RELATIVE: addr = relocbase + addend; if (*where != addr) *where = addr; break; case R_ALPHA_COPY: /* * There shouldn't be copy relocations in kernel * objects. */ printf("kobj_reloc: unexpected R_COPY relocation\n"); return -1; default: printf("kobj_reloc: unexpected relocation type %d\n", (int)rtype); return -1; } return 0; }
/* Relocate symbols. */ grub_err_t grub_arch_dl_relocate_symbols (grub_dl_t mod, void *ehdr, Elf_Shdr *s, grub_dl_segment_t seg) { Elf64_Rela *rel, *max; for (rel = (Elf64_Rela *) ((char *) ehdr + s->sh_offset), max = (Elf64_Rela *) ((char *) rel + s->sh_size); rel < max; rel = (Elf64_Rela *) ((char *) rel + s->sh_entsize)) { Elf64_Word *addr32; Elf64_Xword *addr64; Elf64_Sym *sym; if (seg->size < rel->r_offset) return grub_error (GRUB_ERR_BAD_MODULE, "reloc offset is out of the segment"); addr32 = (Elf64_Word *) ((char *) seg->addr + rel->r_offset); addr64 = (Elf64_Xword *) addr32; sym = (Elf64_Sym *) ((char *) mod->symtab + mod->symsize * ELF_R_SYM (rel->r_info)); switch (ELF_R_TYPE (rel->r_info)) { case R_X86_64_64: *addr64 += rel->r_addend + sym->st_value; break; case R_X86_64_PC32: { grub_int64_t value; value = ((grub_int32_t) *addr32) + rel->r_addend + sym->st_value - (Elf64_Xword) (grub_addr_t) seg->addr - rel->r_offset; if (value != (grub_int32_t) value) return grub_error (GRUB_ERR_BAD_MODULE, "relocation out of range"); *addr32 = value; } break; case R_X86_64_PC64: { *addr64 += rel->r_addend + sym->st_value - (Elf64_Xword) (grub_addr_t) seg->addr - rel->r_offset; } break; case R_X86_64_32: { grub_uint64_t value = *addr32 + rel->r_addend + sym->st_value; if (value != (grub_uint32_t) value) return grub_error (GRUB_ERR_BAD_MODULE, "relocation out of range"); *addr32 = value; } break; case R_X86_64_32S: { grub_int64_t value = ((grub_int32_t) *addr32) + rel->r_addend + sym->st_value; if (value != (grub_int32_t) value) return grub_error (GRUB_ERR_BAD_MODULE, "relocation out of range"); *addr32 = value; } break; default: return grub_error (GRUB_ERR_NOT_IMPLEMENTED_YET, N_("relocation 0x%x is not implemented yet"), ELF_R_TYPE (rel->r_info)); } } return GRUB_ERR_NONE; }
/* Process one elf relocation with addend. */ static int elf_reloc_internal(linker_file_t lf, Elf_Addr relocbase, const void *data, int type, int local, elf_lookup_fn lookup) { Elf_Addr *where; Elf_Addr addr; Elf_Addr addend; Elf_Word rtype, symidx; const Elf_Rel *rel; const Elf_Rela *rela; switch (type) { case ELF_RELOC_REL: rel = (const Elf_Rel *)data; where = (Elf_Addr *) (relocbase + rel->r_offset); addend = load_ptr(where); rtype = ELF_R_TYPE(rel->r_info); symidx = ELF_R_SYM(rel->r_info); break; case ELF_RELOC_RELA: rela = (const Elf_Rela *)data; where = (Elf_Addr *) (relocbase + rela->r_offset); addend = rela->r_addend; rtype = ELF_R_TYPE(rela->r_info); symidx = ELF_R_SYM(rela->r_info); break; default: panic("unknown reloc type %d\n", type); } if (local) { if (rtype == R_ARM_RELATIVE) { /* A + B */ addr = elf_relocaddr(lf, relocbase + addend); if (load_ptr(where) != addr) store_ptr(where, addr); } return (0); } switch (rtype) { case R_ARM_NONE: /* none */ break; case R_ARM_ABS32: addr = lookup(lf, symidx, 1); if (addr == 0) return -1; store_ptr(where, addr + load_ptr(where)); break; case R_ARM_COPY: /* none */ /* * There shouldn't be copy relocations in kernel * objects. */ printf("kldload: unexpected R_COPY relocation\n"); return -1; break; case R_ARM_JUMP_SLOT: addr = lookup(lf, symidx, 1); if (addr) { store_ptr(where, addr); return (0); } return (-1); case R_ARM_RELATIVE: break; default: printf("kldload: unexpected relocation type %d\n", rtype); return -1; } return(0); }
__attribute__((__visibility__("hidden"))) struct funcdesc_value volatile * _dl_linux_resolver (struct elf_resolve *tpnt, int reloc_entry) { int reloc_type; ELF_RELOC *this_reloc; char *strtab; ElfW(Sym) *symtab; int symtab_index; char *rel_addr; struct elf_resolve *new_tpnt; char *new_addr; struct funcdesc_value funcval; struct funcdesc_value volatile *got_entry; char *symname; rel_addr = (char *)tpnt->dynamic_info[DT_JMPREL]; this_reloc = (ELF_RELOC *)(intptr_t)(rel_addr + reloc_entry); reloc_type = ELF_R_TYPE(this_reloc->r_info); symtab_index = ELF_R_SYM(this_reloc->r_info); symtab = (Elf32_Sym *) tpnt->dynamic_info[DT_SYMTAB]; strtab = (char *) tpnt->dynamic_info[DT_STRTAB]; symname= strtab + symtab[symtab_index].st_name; if (reloc_type != R_UBICOM32_FUNCDESC_VALUE) { _dl_dprintf(2, "%s: Incorrect relocation type in jump relocations\n", _dl_progname); _dl_exit(1); } /* Address of GOT entry fix up */ got_entry = (struct funcdesc_value *) DL_RELOC_ADDR(tpnt->loadaddr, this_reloc->r_offset); /* Get the address to be used to fill in the GOT entry. */ new_addr = _dl_lookup_hash(symname, tpnt->symbol_scope, NULL, 0, &new_tpnt); if (!new_addr) { new_addr = _dl_lookup_hash(symname, NULL, NULL, 0, &new_tpnt); if (!new_addr) { _dl_dprintf(2, "_dl_linux_resolver: %s: can't resolve symbol '%s'\n", _dl_progname, symname); _dl_exit(1); } } funcval.entry_point = new_addr; funcval.got_value = new_tpnt->loadaddr.got_value; #if defined (__SUPPORT_LD_DEBUG__) if (_dl_debug_bindings) { _dl_dprintf(_dl_debug_file, "\nresolve function: %s", symname); if (_dl_debug_detail) _dl_dprintf(_dl_debug_file, "\n\tpatched (%x,%x) ==> (%x,%x) @ %x\n", got_entry->entry_point, got_entry->got_value, funcval.entry_point, funcval.got_value, got_entry); } if (1 || !_dl_debug_nofixups) { got_entry->entry_point = ((void *)&_dl_ubicom32_resolve_pending); got_entry->got_value = funcval.got_value; got_entry->entry_point = funcval.entry_point; } #else /* * initially set the entry point to resolve pending before starting * the update. This has the effect of putting all other requests in a * holding pattern until the resolution is completed. */ got_entry->entry_point = ((void*)&_dl_ubicom32_resolve_pending); got_entry->got_value = funcval.got_value; got_entry->entry_point = funcval.entry_point; #endif return got_entry; }
bool ElfRelocations::ApplyRelReloc(const ELF::Rel* rel, ELF::Addr sym_addr, bool resolved CRAZY_UNUSED, Error* error) { const ELF::Word rel_type = ELF_R_TYPE(rel->r_info); const ELF::Word CRAZY_UNUSED rel_symbol = ELF_R_SYM(rel->r_info); const ELF::Addr reloc = static_cast<ELF::Addr>(rel->r_offset + load_bias_); RLOG(" rel reloc=%p offset=%p type=%d\n", reloc, rel->r_offset, rel_type); // Apply the relocation. ELF::Addr* CRAZY_UNUSED target = reinterpret_cast<ELF::Addr*>(reloc); switch (rel_type) { #ifdef __arm__ case R_ARM_JUMP_SLOT: RLOG(" R_ARM_JUMP_SLOT target=%p addr=%p\n", target, sym_addr); *target = sym_addr; break; case R_ARM_GLOB_DAT: RLOG(" R_ARM_GLOB_DAT target=%p addr=%p\n", target, sym_addr); *target = sym_addr; break; case R_ARM_ABS32: RLOG(" R_ARM_ABS32 target=%p (%p) addr=%p\n", target, *target, sym_addr); *target += sym_addr; break; case R_ARM_REL32: RLOG(" R_ARM_REL32 target=%p (%p) addr=%p offset=%p\n", target, *target, sym_addr, rel->r_offset); *target += sym_addr - rel->r_offset; break; case R_ARM_RELATIVE: RLOG(" R_ARM_RELATIVE target=%p (%p) bias=%p\n", target, *target, load_bias_); if (__builtin_expect(rel_symbol, 0)) { *error = "Invalid relative relocation with symbol"; return false; } *target += load_bias_; break; case R_ARM_COPY: // NOTE: These relocations are forbidden in shared libraries. // The Android linker has special code to deal with this, which // is not needed here. RLOG(" R_ARM_COPY\n"); *error = "Invalid R_ARM_COPY relocation in shared library"; return false; #endif // __arm__ #ifdef __i386__ case R_386_JMP_SLOT: *target = sym_addr; break; case R_386_GLOB_DAT: *target = sym_addr; break; case R_386_RELATIVE: if (rel_symbol) { *error = "Invalid relative relocation with symbol"; return false; } *target += load_bias_; break; case R_386_32: *target += sym_addr; break; case R_386_PC32: *target += (sym_addr - reloc); break; #endif // __i386__ #ifdef __mips__ case R_MIPS_REL32: if (resolved) *target += sym_addr; else *target += load_bias_; break; #endif // __mips__ default: error->Format("Invalid relocation type (%d)", rel_type); return false; } return true; }
static int relocate_file(linker_file_t lf) { elf_file_t ef = lf->priv; const Elf_Rel *rellim; const Elf_Rel *rel; const Elf_Rela *relalim; const Elf_Rela *rela; const char *symname; const Elf_Sym *sym; int i; Elf_Size symidx; Elf_Addr base; /* Perform relocations without addend if there are any: */ for (i = 0; i < ef->nreltab; i++) { rel = ef->reltab[i].rel; if (rel == NULL) panic("lost a reltab!"); rellim = rel + ef->reltab[i].nrel; base = findbase(ef, ef->reltab[i].sec); if (base == 0) panic("lost base for reltab"); for ( ; rel < rellim; rel++) { symidx = ELF_R_SYM(rel->r_info); if (symidx >= ef->ddbsymcnt) continue; sym = ef->ddbsymtab + symidx; /* Local relocs are already done */ if (ELF_ST_BIND(sym->st_info) == STB_LOCAL) continue; if (elf_reloc(lf, base, rel, ELF_RELOC_REL, elf_obj_lookup)) { symname = symbol_name(ef, rel->r_info); kprintf("link_elf_obj_obj: symbol %s undefined\n", symname); return ENOENT; } } } /* Perform relocations with addend if there are any: */ for (i = 0; i < ef->nrelatab; i++) { rela = ef->relatab[i].rela; if (rela == NULL) panic("lost a relatab!"); relalim = rela + ef->relatab[i].nrela; base = findbase(ef, ef->relatab[i].sec); if (base == 0) panic("lost base for relatab"); for ( ; rela < relalim; rela++) { symidx = ELF_R_SYM(rela->r_info); if (symidx >= ef->ddbsymcnt) continue; sym = ef->ddbsymtab + symidx; /* Local relocs are already done */ if (ELF_ST_BIND(sym->st_info) == STB_LOCAL) continue; if (elf_reloc(lf, base, rela, ELF_RELOC_RELA, elf_obj_lookup)) { symname = symbol_name(ef, rela->r_info); kprintf("link_elf_obj_obj: symbol %s undefined\n", symname); return ENOENT; } } } return 0; }
void *elf_hook(char const *module_filename, void const *module_address, char const *name, void const *substitution) { static size_t pagesize; int descriptor; //file descriptor of shared module Elf_Shdr *dynsym = NULL, // ".dynsym" section header *rel_plt = NULL, // ".rel.plt" section header *rel_dyn = NULL; // ".rel.dyn" section header Elf_Sym *symbol = NULL; //symbol table entry for symbol named "name" Elf_Rel *rel_plt_table = NULL, //array with ".rel.plt" entries *rel_dyn_table = NULL; //array with ".rel.dyn" entries size_t i, name_index, //index of symbol named "name" in ".dyn.sym" rel_plt_amount, // amount of ".rel.plt" entries rel_dyn_amount, // amount of ".rel.dyn" entries *name_address = NULL; //address of relocation for symbol named "name" void *original = NULL; //address of the symbol being substituted if (NULL == module_address || NULL == name || NULL == substitution) return original; if (!pagesize) pagesize = sysconf(_SC_PAGESIZE); descriptor = open(module_filename, O_RDONLY); if (descriptor < 0) return original; if ( section_by_type(descriptor, SHT_DYNSYM, &dynsym) || //get ".dynsym" section symbol_by_name(descriptor, dynsym, name, &symbol, &name_index) || //actually, we need only the index of symbol named "name" in the ".dynsym" table section_by_name(descriptor, REL_PLT, &rel_plt) || //get ".rel.plt" (for 32-bit) or ".rela.plt" (for 64-bit) section section_by_name(descriptor, REL_DYN, &rel_dyn) //get ".rel.dyn" (for 32-bit) or ".rela.dyn" (for 64-bit) section ) { //if something went wrong free(dynsym); free(rel_plt); free(rel_dyn); free(symbol); close(descriptor); return original; } //release the data used free(dynsym); free(symbol); rel_plt_table = (Elf_Rel *)(((size_t)module_address) + rel_plt->sh_addr); //init the ".rel.plt" array rel_plt_amount = rel_plt->sh_size / sizeof(Elf_Rel); //and get its size rel_dyn_table = (Elf_Rel *)(((size_t)module_address) + rel_dyn->sh_addr); //init the ".rel.dyn" array rel_dyn_amount = rel_dyn->sh_size / sizeof(Elf_Rel); //and get its size //release the data used free(rel_plt); free(rel_dyn); //and descriptor close(descriptor); //now we've got ".rel.plt" (needed for PIC) table and ".rel.dyn" (for non-PIC) table and the symbol's index for (i = 0; i < rel_plt_amount; ++i) //lookup the ".rel.plt" table if (ELF_R_SYM(rel_plt_table[i].r_info) == name_index) //if we found the symbol to substitute in ".rel.plt" { name_address = (size_t *)(((size_t)module_address) + rel_plt_table[i].r_offset); original = (void *)*name_address; mprotect((void *)(((size_t)name_address) & (((size_t)-1) ^ (pagesize - 1))), pagesize, PROT_READ | PROT_WRITE); //mark a memory page that contains the relocation as writable *name_address = (size_t)substitution; //and replace it with the substitutional mprotect((void *)(((size_t)name_address) & (((size_t)-1) ^ (pagesize - 1))), pagesize, PROT_READ | PROT_EXEC); //mark a memory page that contains the relocation as executable break; //the target symbol appears in ".rel.plt" only once } if (original) return original; //we will get here only with 32-bit non-PIC module for (i = 0; i < rel_dyn_amount; ++i) //lookup the ".rel.dyn" table if (ELF_R_SYM(rel_dyn_table[i].r_info) == name_index) //if we found the symbol to substitute in ".rel.dyn" { name_address = (size_t *)(((size_t)module_address) + rel_dyn_table[i].r_offset); //get the relocation address (address of a relative CALL (0xE8) instruction's argument) if (!original) original = (void *)(*name_address + (size_t)name_address + sizeof(size_t)); //calculate an address of the original function by a relative CALL (0xE8) instruction's argument mprotect((void *)(((size_t)name_address) & (((size_t)-1) ^ (pagesize - 1))), pagesize, PROT_READ | PROT_WRITE); //mark a memory page that contains the relocation as writable if (errno) return NULL; *name_address = (size_t)substitution - (size_t)name_address - sizeof(size_t); //calculate a new relative CALL (0xE8) instruction's argument for the substitutional function and write it down mprotect((void *)(((size_t)name_address) & (((size_t)-1) ^ (pagesize - 1))), pagesize, PROT_READ | PROT_EXEC); //mark a memory page that contains the relocation back as executable if (errno) //if something went wrong { *name_address = (size_t)original - (size_t)name_address - sizeof(size_t); //then restore the original function address return NULL; } } return original; }
/* Process one elf relocation with addend. */ int elf_reloc(linker_file_t lf, const void *data, int type) { Elf_Addr relocbase = (Elf_Addr) lf->address; Elf_Addr *where; Elf_Addr addr; Elf_Addr addend; Elf_Word rtype, symidx; const Elf_Rel *rel; const Elf_Rela *rela; switch (type) { case ELF_RELOC_REL: rel = (const Elf_Rel *)data; where = (Elf_Addr *) (relocbase + rel->r_offset); addend = *where; rtype = ELF_R_TYPE(rel->r_info); symidx = ELF_R_SYM(rel->r_info); break; case ELF_RELOC_RELA: rela = (const Elf_Rela *)data; where = (Elf_Addr *) (relocbase + rela->r_offset); addend = rela->r_addend; rtype = ELF_R_TYPE(rela->r_info); symidx = ELF_R_SYM(rela->r_info); break; default: panic("elf_reloc: unknown relocation mode %d\n", type); } switch (rtype) { case R_PPC_NONE: break; case R_PPC_GLOB_DAT: addr = elf_lookup(lf, symidx, 1); if (addr == 0) return -1; addr += addend; if (*where != addr) *where = addr; break; case R_PPC_JMP_SLOT: /* No point in lazy binding for kernel modules. */ addr = elf_lookup(lf, symidx, 1); if (addr == 0) return -1; if (*where != addr) *where = addr; break; case R_PPC_RELATIVE: addr = relocbase + addend + *where; if (*where != addr) *where = addr; break; case R_PPC_COPY: /* * There shouldn't be copy relocations in kernel * objects. */ printf("kldload: unexpected R_COPY relocation\n"); return -1; default: printf("kldload: unexpected relocation type %d\n", (int) rtype); return -1; } return(0); }
int _rtld_relocate_nonplt_objects(Obj_Entry *obj) { const Elf_Rel *rel; for (rel = obj->rel; rel < obj->rellim; rel++) { Elf_Addr *where; const Elf_Sym *def; const Obj_Entry *defobj; Elf_Addr tmp; unsigned long symnum; where = (Elf_Addr *)(obj->relocbase + rel->r_offset); symnum = ELF_R_SYM(rel->r_info); switch (ELF_R_TYPE(rel->r_info)) { case R_TYPE(NONE): break; #if 1 /* XXX should not occur */ case R_TYPE(PC24): { /* word32 S - P + A */ Elf32_Sword addend; /* * Extract addend and sign-extend if needed. */ addend = *where; if (addend & 0x00800000) addend |= 0xff000000; def = _rtld_find_symdef(symnum, obj, &defobj, false); if (def == NULL) return -1; tmp = (Elf_Addr)obj->relocbase + def->st_value - (Elf_Addr)where + (addend << 2); if ((tmp & 0xfe000000) != 0xfe000000 && (tmp & 0xfe000000) != 0) { _rtld_error( "%s: R_ARM_PC24 relocation @ %p to %s failed " "(displacement %ld (%#lx) out of range)", obj->path, where, obj->strtab + obj->symtab[symnum].st_name, (long) tmp, (long) tmp); return -1; } tmp >>= 2; *where = (*where & 0xff000000) | (tmp & 0x00ffffff); rdbg(("PC24 %s in %s --> %p @ %p in %s", obj->strtab + obj->symtab[symnum].st_name, obj->path, (void *)*where, where, defobj->path)); break; } #endif case R_TYPE(ABS32): /* word32 B + S + A */ case R_TYPE(GLOB_DAT): /* word32 B + S */ def = _rtld_find_symdef(symnum, obj, &defobj, false); if (def == NULL) return -1; if (__predict_true(RELOC_ALIGNED_P(where))) { tmp = *where + (Elf_Addr)defobj->relocbase + def->st_value; /* Set the Thumb bit, if needed. */ if (ELF_ST_TYPE(def->st_info) == STT_ARM_TFUNC) tmp |= 1; *where = tmp; } else { tmp = load_ptr(where) + (Elf_Addr)defobj->relocbase + def->st_value; /* Set the Thumb bit, if needed. */ if (ELF_ST_TYPE(def->st_info) == STT_ARM_TFUNC) tmp |= 1; store_ptr(where, tmp); } rdbg(("ABS32/GLOB_DAT %s in %s --> %p @ %p in %s", obj->strtab + obj->symtab[symnum].st_name, obj->path, (void *)tmp, where, defobj->path)); break; case R_TYPE(RELATIVE): /* word32 B + A */ if (__predict_true(RELOC_ALIGNED_P(where))) { tmp = *where + (Elf_Addr)obj->relocbase; *where = tmp; } else { tmp = load_ptr(where) + (Elf_Addr)obj->relocbase; store_ptr(where, tmp); } rdbg(("RELATIVE in %s --> %p", obj->path, (void *)tmp)); break; case R_TYPE(COPY): /* * These are deferred until all other relocations have * been done. All we do here is make sure that the * COPY relocation is not in a shared library. They * are allowed only in executable files. */ if (obj->isdynamic) { _rtld_error( "%s: Unexpected R_COPY relocation in shared library", obj->path); return -1; } rdbg(("COPY (avoid in main)")); break; #if defined(__HAVE_TLS_VARIANT_I) || defined(__HAVE_TLS_VARIANT_II) case R_TYPE(TLS_DTPOFF32): def = _rtld_find_symdef(symnum, obj, &defobj, false); if (def == NULL) return -1; tmp = (Elf_Addr)(def->st_value); if (__predict_true(RELOC_ALIGNED_P(where))) *where = tmp; else store_ptr(where, tmp); rdbg(("TLS_DTPOFF32 %s in %s --> %p", obj->strtab + obj->symtab[symnum].st_name, obj->path, (void *)tmp)); break; case R_TYPE(TLS_DTPMOD32): def = _rtld_find_symdef(symnum, obj, &defobj, false); if (def == NULL) return -1; tmp = (Elf_Addr)(defobj->tlsindex); if (__predict_true(RELOC_ALIGNED_P(where))) *where = tmp; else store_ptr(where, tmp); rdbg(("TLS_DTPMOD32 %s in %s --> %p", obj->strtab + obj->symtab[symnum].st_name, obj->path, (void *)tmp)); break; case R_TYPE(TLS_TPOFF32): def = _rtld_find_symdef(symnum, obj, &defobj, false); if (def == NULL) return -1; if (!defobj->tls_done && _rtld_tls_offset_allocate(obj)) return -1; tmp = (Elf_Addr)def->st_value + defobj->tlsoffset + sizeof(struct tls_tcb); if (__predict_true(RELOC_ALIGNED_P(where))) *where = tmp; else store_ptr(where, tmp); rdbg(("TLS_TPOFF32 %s in %s --> %p", obj->strtab + obj->symtab[symnum].st_name, obj->path, (void *)tmp)); break; #endif default: rdbg(("sym = %lu, type = %lu, offset = %p, " "contents = %p, symbol = %s", symnum, (u_long)ELF_R_TYPE(rel->r_info), (void *)rel->r_offset, (void *)load_ptr(where), obj->strtab + obj->symtab[symnum].st_name)); _rtld_error("%s: Unsupported relocation type %ld " "in non-PLT relocations", obj->path, (u_long) ELF_R_TYPE(rel->r_info)); return -1; } } return 0; }
/* Process the non-PLT relocations. */ int reloc_non_plt(Obj_Entry *obj) { const Elf_Rel *rellim; const Elf_Rel *rel; rellim = (const Elf_Rel *) ((caddr_t) obj->rel + obj->relsize); for (rel = obj->rel; rel < rellim; rel++) { Elf_Addr *where = (Elf_Addr *) (obj->relocbase + rel->r_offset); switch (ELF_R_TYPE(rel->r_info)) { case R_386_NONE: break; case R_386_32: { const Elf_Sym *def; const Obj_Entry *defobj; def = find_symdef(ELF_R_SYM(rel->r_info), obj, &defobj, false); if (def == NULL) return -1; *where += (Elf_Addr) (defobj->relocbase + def->st_value); } break; case R_386_PC32: /* * I don't think the dynamic linker should ever see this * type of relocation. But the binutils-2.6 tools sometimes * generate it. */ { const Elf_Sym *def; const Obj_Entry *defobj; def = find_symdef(ELF_R_SYM(rel->r_info), obj, &defobj, false); if (def == NULL) return -1; *where += (Elf_Addr) (defobj->relocbase + def->st_value) - (Elf_Addr) where; } break; case R_386_COPY: /* * These are deferred until all other relocations have * been done. All we do here is make sure that the COPY * relocation is not in a shared library. They are allowed * only in executable files. */ if (!obj->mainprog) { _rtld_error("%s: Unexpected R_386_COPY relocation" " in shared library", obj->path); return -1; } break; case R_386_GLOB_DAT: { const Elf_Sym *def; const Obj_Entry *defobj; def = find_symdef(ELF_R_SYM(rel->r_info), obj, &defobj, false); if (def == NULL) return -1; *where = (Elf_Addr) (defobj->relocbase + def->st_value); } break; case R_386_RELATIVE: *where += (Elf_Addr) obj->relocbase; break; default: _rtld_error("%s: Unsupported relocation type %d" " in non-PLT relocations\n", obj->path, ELF_R_TYPE(rel->r_info)); return -1; } } return 0; }
int kobj_reloc(kobj_t ko, uintptr_t relocbase, const void *data, bool isrela, bool local) { Elf64_Addr *where, val; Elf32_Addr *where32, val32; Elf64_Addr addr; Elf64_Addr addend; uintptr_t rtype, symidx; const Elf_Rel *rel; const Elf_Rela *rela; if (isrela) { rela = (const Elf_Rela *)data; where = (Elf64_Addr *)(relocbase + rela->r_offset); addend = rela->r_addend; rtype = ELF_R_TYPE(rela->r_info); symidx = ELF_R_SYM(rela->r_info); } else { rel = (const Elf_Rel *)data; where = (Elf64_Addr *)(relocbase + rel->r_offset); rtype = ELF_R_TYPE(rel->r_info); symidx = ELF_R_SYM(rel->r_info); /* Addend is 32 bit on 32 bit relocs */ switch (rtype) { case R_X86_64_PC32: case R_X86_64_32S: addend = *(Elf32_Addr *)where; break; default: addend = *where; break; } } switch (rtype) { case R_X86_64_NONE: /* none */ break; case R_X86_64_64: /* S + A */ addr = kobj_sym_lookup(ko, symidx); val = addr + addend; if (addr == 0) return -1; if (*where != val) *where = val; break; case R_X86_64_PC32: /* S + A - P */ addr = kobj_sym_lookup(ko, symidx); where32 = (Elf32_Addr *)where; val32 = (Elf32_Addr)(addr + addend - (Elf64_Addr)where); if (addr == 0) return -1; if (*where32 != val32) *where32 = val32; break; case R_X86_64_32S: /* S + A sign extend */ addr = kobj_sym_lookup(ko, symidx); val32 = (Elf32_Addr)(addr + addend); where32 = (Elf32_Addr *)where; if (addr == 0) return -1; if (*where32 != val32) *where32 = val32; break; case R_X86_64_COPY: /* none */ /* * There shouldn't be copy relocations in kernel * objects. */ printf("kobj_reloc: unexpected R_COPY relocation\n"); return -1; case R_X86_64_GLOB_DAT: /* S */ case R_X86_64_JUMP_SLOT:/* XXX need addend + offset */ addr = kobj_sym_lookup(ko, symidx); if (addr == 0) return -1; if (*where != addr) *where = addr; break; case R_X86_64_RELATIVE: /* B + A */ addr = relocbase + addend; val = addr; if (*where != val) *where = val; break; default: printf("kobj_reloc: unexpected relocation type %ld\n", rtype); return -1; } return 0; }
int reloc_non_plt(Obj_Entry *obj, Obj_Entry *obj_rtld, int flags, RtldLockState *lockstate) { const Elf_Rel *rel; const Elf_Rel *rellim; Elf_Addr *got = obj->pltgot; const Elf_Sym *sym, *def; const Obj_Entry *defobj; Elf_Word i; #ifdef SUPPORT_OLD_BROKEN_LD int broken; #endif /* The relocation for the dynamic loader has already been done. */ if (obj == obj_rtld) return (0); if ((flags & SYMLOOK_IFUNC) != 0) /* XXX not implemented */ return (0); #ifdef SUPPORT_OLD_BROKEN_LD broken = 0; sym = obj->symtab; for (i = 1; i < 12; i++) if (sym[i].st_info == ELF_ST_INFO(STB_LOCAL, STT_NOTYPE)) broken = 1; dbg("%s: broken=%d", obj->path, broken); #endif i = GOT1_RESERVED_FOR_RTLD(got) ? 2 : 1; /* Relocate the local GOT entries */ got += i; dbg("got:%p for %d entries adding %p", got, obj->local_gotno, obj->relocbase); for (; i < obj->local_gotno; i++) { *got += (Elf_Addr)obj->relocbase; got++; } sym = obj->symtab + obj->gotsym; dbg("got:%p for %d entries", got, obj->symtabno); /* Now do the global GOT entries */ for (i = obj->gotsym; i < obj->symtabno; i++) { dbg(" doing got %d sym %p (%s, %lx)", i - obj->gotsym, sym, sym->st_name + obj->strtab, (u_long) *got); #ifdef SUPPORT_OLD_BROKEN_LD if (ELF_ST_TYPE(sym->st_info) == STT_FUNC && broken && sym->st_shndx == SHN_UNDEF) { /* * XXX DANGER WILL ROBINSON! * You might think this is stupid, as it intentionally * defeats lazy binding -- and you'd be right. * Unfortunately, for lazy binding to work right, we * need to a way to force the GOT slots used for * function pointers to be resolved immediately. This * is supposed to be done automatically by the linker, * by not outputting a PLT slot and setting st_value * to 0 if there are non-PLT references, but older * versions of GNU ld do not do this. */ def = find_symdef(i, obj, &defobj, flags, NULL, lockstate); if (def == NULL) return -1; *got = def->st_value + (Elf_Addr)defobj->relocbase; } else #endif if (ELF_ST_TYPE(sym->st_info) == STT_FUNC && sym->st_value != 0 && sym->st_shndx == SHN_UNDEF) { /* * If there are non-PLT references to the function, * st_value should be 0, forcing us to resolve the * address immediately. * * XXX DANGER WILL ROBINSON! * The linker is not outputting PLT slots for calls to * functions that are defined in the same shared * library. This is a bug, because it can screw up * link ordering rules if the symbol is defined in * more than one module. For now, if there is a * definition, we fail the test above and force a full * symbol lookup. This means that all intra-module * calls are bound immediately. - mycroft, 2003/09/24 */ *got = sym->st_value + (Elf_Addr)obj->relocbase; if ((Elf_Addr)(*got) == (Elf_Addr)obj->relocbase) { dbg("Warning2, i:%d maps to relocbase address:%p", i, obj->relocbase); } } else if (sym->st_info == ELF_ST_INFO(STB_GLOBAL, STT_SECTION)) { /* Symbols with index SHN_ABS are not relocated. */ if (sym->st_shndx != SHN_ABS) { *got = sym->st_value + (Elf_Addr)obj->relocbase; if ((Elf_Addr)(*got) == (Elf_Addr)obj->relocbase) { dbg("Warning3, i:%d maps to relocbase address:%p", i, obj->relocbase); } } } else { /* TODO: add cache here */ def = find_symdef(i, obj, &defobj, flags, NULL, lockstate); if (def == NULL) { dbg("Warning4, can't find symbole %d", i); return -1; } *got = def->st_value + (Elf_Addr)defobj->relocbase; if ((Elf_Addr)(*got) == (Elf_Addr)obj->relocbase) { dbg("Warning4, i:%d maps to relocbase address:%p", i, obj->relocbase); dbg("via first obj symbol %s", obj->strtab + obj->symtab[i].st_name); dbg("found in obj %p:%s", defobj, defobj->path); } } dbg(" --> now %lx", (u_long) *got); ++sym; ++got; } got = obj->pltgot; rellim = (const Elf_Rel *)((const char *)obj->rel + obj->relsize); for (rel = obj->rel; rel < rellim; rel++) { Elf_Word r_symndx, r_type; void *where; where = obj->relocbase + rel->r_offset; r_symndx = ELF_R_SYM(rel->r_info); r_type = ELF_R_TYPE(rel->r_info); switch (r_type & 0xff) { case R_TYPE(NONE): break; case R_TYPE(REL32): { /* 32-bit PC-relative reference */ const size_t rlen = ELF_R_NXTTYPE_64_P(r_type) ? sizeof(Elf_Sxword) : sizeof(Elf_Sword); Elf_Sxword old = load_ptr(where, rlen); Elf_Sxword val = old; def = obj->symtab + r_symndx; if (r_symndx >= obj->gotsym) { val += got[obj->local_gotno + r_symndx - obj->gotsym]; dbg("REL32/G(%p) %p --> %p (%s) in %s", where, (void *)old, (void *)val, obj->strtab + def->st_name, obj->path); } else { /* * XXX: ABI DIFFERENCE! * * Old NetBSD binutils would generate shared * libs with section-relative relocations being * already adjusted for the start address of * the section. * * New binutils, OTOH, generate shared libs * with the same relocations being based at * zero, so we need to add in the start address * of the section. * * --rkb, Oct 6, 2001 */ if (def->st_info == ELF_ST_INFO(STB_LOCAL, STT_SECTION) #ifdef SUPPORT_OLD_BROKEN_LD && !broken #endif ) val += (Elf_Addr)def->st_value; val += (Elf_Addr)obj->relocbase; dbg("REL32/L(%p) %p -> %p (%s) in %s", where, (void *)old, (void *)val, obj->strtab + def->st_name, obj->path); } store_ptr(where, val, rlen); break; } case R_TYPE(COPY): /* * These are deferred until all other relocations have * been done. All we do here is make sure that the * COPY relocation is not in a shared library. They * are allowed only in executable files. */ if (!obj->mainprog) { _rtld_error("%s: Unexpected R_MIPS_COPY " "relocation in shared library", obj->path); return (-1); } break; #ifdef __mips_n64 case R_TYPE(TLS_DTPMOD64): #else case R_TYPE(TLS_DTPMOD32): #endif { const size_t rlen = sizeof(Elf_Addr); Elf_Addr old = load_ptr(where, rlen); Elf_Addr val = old; def = find_symdef(r_symndx, obj, &defobj, flags, NULL, lockstate); if (def == NULL) return -1; val += (Elf_Addr)defobj->tlsindex; store_ptr(where, val, rlen); dbg("DTPMOD %s in %s %p --> %p in %s", obj->strtab + obj->symtab[r_symndx].st_name, obj->path, (void *)old, (void*)val, defobj->path); break; } #ifdef __mips_n64 case R_TYPE(TLS_DTPREL64): #else case R_TYPE(TLS_DTPREL32): #endif { const size_t rlen = sizeof(Elf_Addr); Elf_Addr old = load_ptr(where, rlen); Elf_Addr val = old; def = find_symdef(r_symndx, obj, &defobj, flags, NULL, lockstate); if (def == NULL) return -1; if (!defobj->tls_done && !allocate_tls_offset(obj)) return -1; val += (Elf_Addr)def->st_value - TLS_DTP_OFFSET; store_ptr(where, val, rlen); dbg("DTPREL %s in %s %p --> %p in %s", obj->strtab + obj->symtab[r_symndx].st_name, obj->path, (void*)old, (void *)val, defobj->path); break; } #ifdef __mips_n64 case R_TYPE(TLS_TPREL64): #else case R_TYPE(TLS_TPREL32): #endif { const size_t rlen = sizeof(Elf_Addr); Elf_Addr old = load_ptr(where, rlen); Elf_Addr val = old; def = find_symdef(r_symndx, obj, &defobj, flags, NULL, lockstate); if (def == NULL) return -1; if (!defobj->tls_done && !allocate_tls_offset(obj)) return -1; val += (Elf_Addr)(def->st_value + defobj->tlsoffset - TLS_TP_OFFSET - TLS_TCB_SIZE); store_ptr(where, val, rlen); dbg("TPREL %s in %s %p --> %p in %s", obj->strtab + obj->symtab[r_symndx].st_name, obj->path, (void*)old, (void *)val, defobj->path); break; } default: dbg("sym = %lu, type = %lu, offset = %p, " "contents = %p, symbol = %s", (u_long)r_symndx, (u_long)ELF_R_TYPE(rel->r_info), (void *)rel->r_offset, (void *)load_ptr(where, sizeof(Elf_Sword)), obj->strtab + obj->symtab[r_symndx].st_name); _rtld_error("%s: Unsupported relocation type %ld " "in non-PLT relocations", obj->path, (u_long) ELF_R_TYPE(rel->r_info)); return -1; } } return 0; }
static int klp_find_object_symbol(const char *objname, const char *name, unsigned long sympos, unsigned long *addr) { struct klp_find_arg args = { .objname = objname, .name = name, .addr = 0, .count = 0, .pos = sympos, }; mutex_lock(&module_mutex); if (objname) module_kallsyms_on_each_symbol(klp_find_callback, &args); else kallsyms_on_each_symbol(klp_find_callback, &args); mutex_unlock(&module_mutex); /* * Ensure an address was found. If sympos is 0, ensure symbol is unique; * otherwise ensure the symbol position count matches sympos. */ if (args.addr == 0) pr_err("symbol '%s' not found in symbol table\n", name); else if (args.count > 1 && sympos == 0) { pr_err("unresolvable ambiguity for symbol '%s' in object '%s'\n", name, objname); } else if (sympos != args.count && sympos > 0) { pr_err("symbol position %lu for symbol '%s' in object '%s' not found\n", sympos, name, objname ? objname : "vmlinux"); } else { *addr = args.addr; return 0; } *addr = 0; return -EINVAL; } static int klp_resolve_symbols(Elf_Shdr *relasec, struct module *pmod) { int i, cnt, vmlinux, ret; char objname[MODULE_NAME_LEN]; char symname[KSYM_NAME_LEN]; char *strtab = pmod->core_kallsyms.strtab; Elf_Rela *relas; Elf_Sym *sym; unsigned long sympos, addr; /* * Since the field widths for objname and symname in the sscanf() * call are hard-coded and correspond to MODULE_NAME_LEN and * KSYM_NAME_LEN respectively, we must make sure that MODULE_NAME_LEN * and KSYM_NAME_LEN have the values we expect them to have. * * Because the value of MODULE_NAME_LEN can differ among architectures, * we use the smallest/strictest upper bound possible (56, based on * the current definition of MODULE_NAME_LEN) to prevent overflows. */ BUILD_BUG_ON(MODULE_NAME_LEN < 56 || KSYM_NAME_LEN != 128); relas = (Elf_Rela *) relasec->sh_addr; /* For each rela in this klp relocation section */ for (i = 0; i < relasec->sh_size / sizeof(Elf_Rela); i++) { sym = pmod->core_kallsyms.symtab + ELF_R_SYM(relas[i].r_info); if (sym->st_shndx != SHN_LIVEPATCH) { pr_err("symbol %s is not marked as a livepatch symbol\n", strtab + sym->st_name); return -EINVAL; } /* Format: .klp.sym.objname.symname,sympos */ cnt = sscanf(strtab + sym->st_name, ".klp.sym.%55[^.].%127[^,],%lu", objname, symname, &sympos); if (cnt != 3) { pr_err("symbol %s has an incorrectly formatted name\n", strtab + sym->st_name); return -EINVAL; } /* klp_find_object_symbol() treats a NULL objname as vmlinux */ vmlinux = !strcmp(objname, "vmlinux"); ret = klp_find_object_symbol(vmlinux ? NULL : objname, symname, sympos, &addr); if (ret) return ret; sym->st_value = addr; } return 0; }
STATIC BOOLEAN WriteSections64 ( SECTION_FILTER_TYPES FilterType ) { UINT32 Idx; Elf_Shdr *SecShdr; UINT32 SecOffset; BOOLEAN (*Filter)(Elf_Shdr *); // // Initialize filter pointer // switch (FilterType) { case SECTION_TEXT: Filter = IsTextShdr; break; case SECTION_HII: Filter = IsHiiRsrcShdr; break; case SECTION_DATA: Filter = IsDataShdr; break; default: return FALSE; } // // First: copy sections. // for (Idx = 0; Idx < mEhdr->e_shnum; Idx++) { Elf_Shdr *Shdr = GetShdrByIndex(Idx); if ((*Filter)(Shdr)) { switch (Shdr->sh_type) { case SHT_PROGBITS: /* Copy. */ memcpy(mCoffFile + mCoffSectionsOffset[Idx], (UINT8*)mEhdr + Shdr->sh_offset, (size_t) Shdr->sh_size); break; case SHT_NOBITS: memset(mCoffFile + mCoffSectionsOffset[Idx], 0, (size_t) Shdr->sh_size); break; default: // // Ignore for unkown section type. // VerboseMsg ("%s unknown section type %x. We directly copy this section into Coff file", mInImageName, (unsigned)Shdr->sh_type); break; } } } // // Second: apply relocations. // VerboseMsg ("Applying Relocations..."); for (Idx = 0; Idx < mEhdr->e_shnum; Idx++) { // // Determine if this is a relocation section. // Elf_Shdr *RelShdr = GetShdrByIndex(Idx); if ((RelShdr->sh_type != SHT_REL) && (RelShdr->sh_type != SHT_RELA)) { continue; } // // Relocation section found. Now extract section information that the relocations // apply to in the ELF data and the new COFF data. // SecShdr = GetShdrByIndex(RelShdr->sh_info); SecOffset = mCoffSectionsOffset[RelShdr->sh_info]; // // Only process relocations for the current filter type. // if (RelShdr->sh_type == SHT_RELA && (*Filter)(SecShdr)) { UINT64 RelIdx; // // Determine the symbol table referenced by the relocation data. // Elf_Shdr *SymtabShdr = GetShdrByIndex(RelShdr->sh_link); UINT8 *Symtab = (UINT8*)mEhdr + SymtabShdr->sh_offset; // // Process all relocation entries for this section. // for (RelIdx = 0; RelIdx < RelShdr->sh_size; RelIdx += (UINT32) RelShdr->sh_entsize) { // // Set pointer to relocation entry // Elf_Rela *Rel = (Elf_Rela *)((UINT8*)mEhdr + RelShdr->sh_offset + RelIdx); // // Set pointer to symbol table entry associated with the relocation entry. // Elf_Sym *Sym = (Elf_Sym *)(Symtab + ELF_R_SYM(Rel->r_info) * SymtabShdr->sh_entsize); Elf_Shdr *SymShdr; UINT8 *Targ; // // Check section header index found in symbol table and get the section // header location. // if (Sym->st_shndx == SHN_UNDEF || Sym->st_shndx == SHN_ABS || Sym->st_shndx > mEhdr->e_shnum) { Error (NULL, 0, 3000, "Invalid", "%s bad symbol definition.", mInImageName); } SymShdr = GetShdrByIndex(Sym->st_shndx); // // Convert the relocation data to a pointer into the coff file. // // Note: // r_offset is the virtual address of the storage unit to be relocated. // sh_addr is the virtual address for the base of the section. // // r_offset in a memory address. // Convert it to a pointer in the coff file. // Targ = mCoffFile + SecOffset + (Rel->r_offset - SecShdr->sh_addr); // // Determine how to handle each relocation type based on the machine type. // if (mEhdr->e_machine == EM_X86_64) { switch (ELF_R_TYPE(Rel->r_info)) { case R_X86_64_NONE: break; case R_X86_64_64: // // Absolute relocation. // VerboseMsg ("R_X86_64_64"); VerboseMsg ("Offset: 0x%08X, Addend: 0x%016LX", (UINT32)(SecOffset + (Rel->r_offset - SecShdr->sh_addr)), *(UINT64 *)Targ); *(UINT64 *)Targ = *(UINT64 *)Targ - SymShdr->sh_addr + mCoffSectionsOffset[Sym->st_shndx]; VerboseMsg ("Relocation: 0x%016LX", *(UINT64*)Targ); break; case R_X86_64_32: VerboseMsg ("R_X86_64_32"); VerboseMsg ("Offset: 0x%08X, Addend: 0x%08X", (UINT32)(SecOffset + (Rel->r_offset - SecShdr->sh_addr)), *(UINT32 *)Targ); *(UINT32 *)Targ = (UINT32)((UINT64)(*(UINT32 *)Targ) - SymShdr->sh_addr + mCoffSectionsOffset[Sym->st_shndx]); VerboseMsg ("Relocation: 0x%08X", *(UINT32*)Targ); break; case R_X86_64_32S: VerboseMsg ("R_X86_64_32S"); VerboseMsg ("Offset: 0x%08X, Addend: 0x%08X", (UINT32)(SecOffset + (Rel->r_offset - SecShdr->sh_addr)), *(UINT32 *)Targ); *(INT32 *)Targ = (INT32)((INT64)(*(INT32 *)Targ) - SymShdr->sh_addr + mCoffSectionsOffset[Sym->st_shndx]); VerboseMsg ("Relocation: 0x%08X", *(UINT32*)Targ); break; case R_X86_64_PC32: // // Relative relocation: Symbol - Ip + Addend // VerboseMsg ("R_X86_64_PC32"); VerboseMsg ("Offset: 0x%08X, Addend: 0x%08X", (UINT32)(SecOffset + (Rel->r_offset - SecShdr->sh_addr)), *(UINT32 *)Targ); *(UINT32 *)Targ = (UINT32) (*(UINT32 *)Targ + (mCoffSectionsOffset[Sym->st_shndx] - SymShdr->sh_addr) - (SecOffset - SecShdr->sh_addr)); VerboseMsg ("Relocation: 0x%08X", *(UINT32 *)Targ); break; default: Error (NULL, 0, 3000, "Invalid", "%s unsupported ELF EM_X86_64 relocation 0x%x.", mInImageName, (unsigned) ELF_R_TYPE(Rel->r_info)); } } else if (mEhdr->e_machine == EM_AARCH64) { // AARCH64 GCC uses RELA relocation, so all relocations have to be fixed up. // As opposed to ARM32 using REL. switch (ELF_R_TYPE(Rel->r_info)) { case R_AARCH64_ADR_PREL_LO21: if (Rel->r_addend != 0 ) { /* TODO */ Error (NULL, 0, 3000, "Invalid", "AArch64: R_AARCH64_ADR_PREL_LO21 Need to fixup with addend!."); } break; case R_AARCH64_CONDBR19: if (Rel->r_addend != 0 ) { /* TODO */ Error (NULL, 0, 3000, "Invalid", "AArch64: R_AARCH64_CONDBR19 Need to fixup with addend!."); } break; case R_AARCH64_LD_PREL_LO19: if (Rel->r_addend != 0 ) { /* TODO */ Error (NULL, 0, 3000, "Invalid", "AArch64: R_AARCH64_LD_PREL_LO19 Need to fixup with addend!."); } break; case R_AARCH64_CALL26: case R_AARCH64_JUMP26: if (Rel->r_addend != 0 ) { // Some references to static functions sometime start at the base of .text + addend. // It is safe to ignore these relocations because they patch a `BL` instructions that // contains an offset from the instruction itself and there is only a single .text section. // So we check if the symbol is a "section symbol" if (ELF64_ST_TYPE (Sym->st_info) == STT_SECTION) { break; } Error (NULL, 0, 3000, "Invalid", "AArch64: R_AARCH64_JUMP26 Need to fixup with addend!."); } break; case R_AARCH64_ADR_PREL_PG_HI21: // TODO : AArch64 'small' memory model. Error (NULL, 0, 3000, "Invalid", "WriteSections64(): %s unsupported ELF EM_AARCH64 relocation R_AARCH64_ADR_PREL_PG_HI21.", mInImageName); break; case R_AARCH64_ADD_ABS_LO12_NC: // TODO : AArch64 'small' memory model. Error (NULL, 0, 3000, "Invalid", "WriteSections64(): %s unsupported ELF EM_AARCH64 relocation R_AARCH64_ADD_ABS_LO12_NC.", mInImageName); break; // Absolute relocations. case R_AARCH64_ABS64: *(UINT64 *)Targ = *(UINT64 *)Targ - SymShdr->sh_addr + mCoffSectionsOffset[Sym->st_shndx]; break; default: Error (NULL, 0, 3000, "Invalid", "WriteSections64(): %s unsupported ELF EM_AARCH64 relocation 0x%x.", mInImageName, (unsigned) ELF_R_TYPE(Rel->r_info)); } } else { Error (NULL, 0, 3000, "Invalid", "Not a supported machine type"); } } } } return TRUE; }
int _dl_md_reloc(elf_object_t *object, int rel, int relsz) { long i; long numrel; long relrel; int fails = 0; Elf_Addr loff; Elf_Addr prev_value = 0; const Elf_Sym *prev_sym = NULL; Elf_RelA *rels; struct load_list *llist; loff = object->obj_base; numrel = object->Dyn.info[relsz] / sizeof(Elf_RelA); relrel = rel == DT_RELA ? object->relacount : 0; rels = (Elf_RelA *)(object->Dyn.info[rel]); if (rels == NULL) return(0); if (relrel > numrel) { _dl_printf("relacount > numrel: %ld > %ld\n", relrel, numrel); _dl_exit(20); } /* * unprotect some segments if we need it. */ if ((object->dyn.textrel == 1) && (rel == DT_REL || rel == DT_RELA)) { for (llist = object->load_list; llist != NULL; llist = llist->next) { if (!(llist->prot & PROT_WRITE)) _dl_mprotect(llist->start, llist->size, PROT_READ | PROT_WRITE); } } /* tight loop for leading RELATIVE relocs */ for (i = 0; i < relrel; i++, rels++) { Elf_Addr *where; #ifdef DEBUG if (ELF_R_TYPE(rels->r_info) != R_TYPE(RELATIVE)) { _dl_printf("RELACOUNT wrong\n"); _dl_exit(20); } #endif where = (Elf_Addr *)(rels->r_offset + loff); *where = rels->r_addend + loff; } for (; i < numrel; i++, rels++) { Elf_Addr *where, value, ooff, mask; Elf_Word type; const Elf_Sym *sym, *this; const char *symn; type = ELF_R_TYPE(rels->r_info); if (RELOC_ERROR(type)) { _dl_printf("relocation error %d idx %d\n", type, i); _dl_exit(20); } if (type == R_TYPE(NONE)) continue; if (type == R_TYPE(JUMP_SLOT) && rel != DT_JMPREL) continue; where = (Elf_Addr *)(rels->r_offset + loff); if (RELOC_USE_ADDEND(type)) value = rels->r_addend; else value = 0; sym = NULL; symn = NULL; if (RELOC_RESOLVE_SYMBOL(type)) { sym = object->dyn.symtab; sym += ELF_R_SYM(rels->r_info); symn = object->dyn.strtab + sym->st_name; if (sym->st_shndx != SHN_UNDEF && ELF_ST_BIND(sym->st_info) == STB_LOCAL) { value += loff; } else if (sym == prev_sym) { value += prev_value; } else { this = NULL; ooff = _dl_find_symbol_bysym(object, ELF_R_SYM(rels->r_info), &this, SYM_SEARCH_ALL|SYM_WARNNOTFOUND| ((type == R_TYPE(JUMP_SLOT))? SYM_PLT:SYM_NOTPLT), sym, NULL); if (this == NULL) { resolve_failed: if (ELF_ST_BIND(sym->st_info) != STB_WEAK) fails++; continue; } prev_sym = sym; prev_value = (Elf_Addr)(ooff + this->st_value); value += prev_value; } } if (type == R_TYPE(JUMP_SLOT)) { _dl_reloc_plt(where, value); continue; } if (type == R_TYPE(COPY)) { void *dstaddr = where; const void *srcaddr; const Elf_Sym *dstsym = sym, *srcsym = NULL; Elf_Addr soff; soff = _dl_find_symbol(symn, &srcsym, SYM_SEARCH_OTHER|SYM_WARNNOTFOUND|SYM_NOTPLT, dstsym, object, NULL); if (srcsym == NULL) goto resolve_failed; srcaddr = (void *)(soff + srcsym->st_value); _dl_bcopy(srcaddr, dstaddr, dstsym->st_size); continue; } if (RELOC_PC_RELATIVE(type)) value -= (Elf_Addr)where; if (RELOC_BASE_RELATIVE(type)) value += loff; mask = RELOC_VALUE_BITMASK(type); value >>= RELOC_VALUE_RIGHTSHIFT(type); value &= mask; if (RELOC_TARGET_SIZE(type) > 32) { *where &= ~mask; *where |= value; } else { Elf32_Addr *where32 = (Elf32_Addr *)where; *where32 &= ~mask; *where32 |= value; } } /* reprotect the unprotected segments */ if ((object->dyn.textrel == 1) && (rel == DT_REL || rel == DT_RELA)) { for (llist = object->load_list; llist != NULL; llist = llist->next) { if (!(llist->prot & PROT_WRITE)) _dl_mprotect(llist->start, llist->size, llist->prot); } } return (fails); }