Example #1
0
/* Relocate the jump slots in an object. */
int
reloc_jmpslots(Obj_Entry *obj, int flags, RtldLockState *lockstate)
{
	if (obj->jmpslots_done)
		return 0;
	/* All PLT relocations are the same kind: Elf_Rel or Elf_Rela. */
	if (obj->pltrelsize != 0) {
		const Elf_Rel *rellim;
		const Elf_Rel *rel;

		rellim = (const Elf_Rel *)
			((char *)obj->pltrel + obj->pltrelsize);
		for (rel = obj->pltrel;  rel < rellim;  rel++) {
			Elf_Addr *where;
			const Elf_Sym *def;
			const Obj_Entry *defobj;

			assert(ELF_R_TYPE(rel->r_info) == R_IA_64_IPLTLSB);
			where = (Elf_Addr *)(obj->relocbase + rel->r_offset);
			def = find_symdef(ELF_R_SYM(rel->r_info), obj,
			    &defobj, SYMLOOK_IN_PLT | flags, NULL, lockstate);
			if (def == NULL)
				return -1;
			reloc_jmpslot(where,
				      (Elf_Addr)(defobj->relocbase
						 + def->st_value),
				      defobj, obj, rel);
		}
	} else {
		const Elf_Rela *relalim;
		const Elf_Rela *rela;

		relalim = (const Elf_Rela *)
			((char *)obj->pltrela + obj->pltrelasize);
		for (rela = obj->pltrela;  rela < relalim;  rela++) {
			Elf_Addr *where;
			const Elf_Sym *def;
			const Obj_Entry *defobj;

			where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
			def = find_symdef(ELF_R_SYM(rela->r_info), obj,
			    &defobj, SYMLOOK_IN_PLT | flags, NULL, lockstate);
			if (def == NULL)
				return -1;
			reloc_jmpslot(where,
				      (Elf_Addr)(defobj->relocbase
						 + def->st_value),
				      defobj, obj, (Elf_Rel *)rela);
		}
	}
	obj->jmpslots_done = true;
	return 0;
}
Example #2
0
int
reloc_gnu_ifunc(Obj_Entry *obj, int flags, RtldLockState *lockstate)
{
    const Elf_Rela *relalim;
    const Elf_Rela *rela;

    if (!obj->gnu_ifunc)
	return (0);
    relalim = (const Elf_Rela *)((char *)obj->pltrela + obj->pltrelasize);
    for (rela = obj->pltrela;  rela < relalim;  rela++) {
	Elf_Addr *where, target;
	const Elf_Sym *def;
	const Obj_Entry *defobj;

	switch (ELF_R_TYPE(rela->r_info)) {
	case R_X86_64_JMP_SLOT:
	  where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
	  def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
		SYMLOOK_IN_PLT | flags, NULL, lockstate);
	  if (def == NULL)
	      return (-1);
	  if (ELF_ST_TYPE(def->st_info) != STT_GNU_IFUNC)
	      continue;
	  lock_release(rtld_bind_lock, lockstate);
	  target = (Elf_Addr)rtld_resolve_ifunc(defobj, def);
	  wlock_acquire(rtld_bind_lock, lockstate);
	  reloc_jmpslot(where, target, defobj, obj, (const Elf_Rel *)rela);
	  break;
	}
    }
    obj->gnu_ifunc = false;
    return (0);
}
Example #3
0
Elf_Addr
_mips_rtld_bind(Obj_Entry *obj, Elf_Size reloff)
{
        Elf_Addr *got = obj->pltgot;
        const Elf_Sym *def;
        const Obj_Entry *defobj;
        Elf_Addr *where;
        Elf_Addr target;
        RtldLockState lockstate;

	rlock_acquire(rtld_bind_lock, &lockstate);
	if (sigsetjmp(lockstate.env, 0) != 0)
		lock_upgrade(rtld_bind_lock, &lockstate);

	where = &got[obj->local_gotno + reloff - obj->gotsym];
        def = find_symdef(reloff, obj, &defobj, SYMLOOK_IN_PLT, NULL,
           &lockstate);
        if (def == NULL)
		rtld_die();

        target = (Elf_Addr)(defobj->relocbase + def->st_value);
        dbg("bind now/fixup at %s sym # %jd in %s --> was=%p new=%p",
	    obj->path,
	    (intmax_t)reloff, defobj->strtab + def->st_name, 
	    (void *)*where, (void *)target);
	if (!ld_bind_not)
		*where = target;
	lock_release(rtld_bind_lock, &lockstate);
	return (Elf_Addr)target;
}
Example #4
0
/*
 *  * LD_BIND_NOW was set - force relocation for all jump slots
 *   */
int
reloc_jmpslots(Obj_Entry *obj, RtldLockState *lockstate)
{
	const Obj_Entry *defobj;
	const Elf_Rel *rellim;
	const Elf_Rel *rel;
	const Elf_Sym *def;
	Elf_Addr *where;
	Elf_Addr target;
	
	rellim = (const Elf_Rel *)((char *)obj->pltrel + obj->pltrelsize);
	for (rel = obj->pltrel; rel < rellim; rel++) {
		assert(ELF_R_TYPE(rel->r_info) == R_ARM_JUMP_SLOT);
		where = (Elf_Addr *)(obj->relocbase + rel->r_offset);
		def = find_symdef(ELF_R_SYM(rel->r_info), obj, &defobj,
		    true, NULL, lockstate);
		if (def == NULL) {
			dbg("reloc_jmpslots: sym not found");
			return (-1);
		}
		
		target = (Elf_Addr)(defobj->relocbase + def->st_value);		
		reloc_jmpslot(where, target, defobj, obj,
		    (const Elf_Rel *) rel);
	}
	
	obj->jmpslots_done = true;
	
	return (0);
}
Example #5
0
/* Process the PLT relocations. */
int
reloc_plt(Obj_Entry *obj, bool bind_now)
{
	const Elf_Rel *rellim;
	const Elf_Rel *rel;

	/* Process the PLT relocations. */
	rellim = (const Elf_Rel *) ((caddr_t) obj->pltrel + obj->pltrelsize);
	if (bind_now) {
	    /* Fully resolve procedure addresses now */
	    for (rel = obj->pltrel;  rel < rellim;  rel++) {
		Elf_Addr *where = (Elf_Addr *)
		  (obj->relocbase + rel->r_offset);
		const Elf_Sym *def;
		const Obj_Entry *defobj;

		assert(ELF_R_TYPE(rel->r_info) == R_386_JMP_SLOT);

		def = find_symdef(ELF_R_SYM(rel->r_info), obj, &defobj, true);
		if (def == NULL)
		    return -1;

		*where = (Elf_Addr) (defobj->relocbase + def->st_value);
	    }
	} else {	/* Just relocate the GOT slots pointing into the PLT */
	    for (rel = obj->pltrel;  rel < rellim;  rel++) {
		Elf_Addr *where = (Elf_Addr *)
		  (obj->relocbase + rel->r_offset);
		*where += (Elf_Addr) obj->relocbase;
	    }
	}
    return 0;
}
Example #6
0
/* Relocate the jump slots in an object. */
int
reloc_jmpslots(Obj_Entry *obj, int flags, RtldLockState *lockstate)
{
    const Elf_Rel *rellim;
    const Elf_Rel *rel;

    if (obj->jmpslots_done)
	return 0;
    rellim = (const Elf_Rel *)((char *)obj->pltrel + obj->pltrelsize);
    for (rel = obj->pltrel;  rel < rellim;  rel++) {
	Elf_Addr *where, target;
	const Elf_Sym *def;
	const Obj_Entry *defobj;

	switch (ELF_R_TYPE(rel->r_info)) {
	case R_386_JMP_SLOT:
	  where = (Elf_Addr *)(obj->relocbase + rel->r_offset);
	  def = find_symdef(ELF_R_SYM(rel->r_info), obj, &defobj,
		SYMLOOK_IN_PLT | flags, NULL, lockstate);
	  if (def == NULL)
	      return (-1);
	  if (ELF_ST_TYPE(def->st_info) == STT_GNU_IFUNC) {
	      obj->gnu_ifunc = true;
	      continue;
	  }
	  target = (Elf_Addr)(defobj->relocbase + def->st_value);
	  reloc_jmpslot(where, target, defobj, obj, rel);
	  break;

	case R_386_IRELATIVE:
	  break;

	default:
	  _rtld_error("Unknown relocation type %x in PLT",
	    ELF_R_TYPE(rel->r_info));
	  return (-1);
	}
    }

    obj->jmpslots_done = true;
    return 0;
}
Example #7
0
Elf_Addr
_mips_rtld_bind(Obj_Entry *obj, Elf_Size reloff)
{
        Elf_Addr *got = obj->pltgot;
        const Elf_Sym *def;
        const Obj_Entry *defobj;
        Elf_Addr target;

        def = find_symdef(reloff, obj, &defobj, SYMLOOK_IN_PLT, NULL,
	    NULL);
        if (def == NULL)
		_rtld_error("bind failed no symbol");

        target = (Elf_Addr)(defobj->relocbase + def->st_value);
        dbg("bind now/fixup at %s sym # %jd in %s --> was=%p new=%p",
	    obj->path,
	    (intmax_t)reloff, defobj->strtab + def->st_name, 
	    (void *)got[obj->local_gotno + reloff - obj->gotsym],
	    (void *)target);
        got[obj->local_gotno + reloff - obj->gotsym] = target;
	return (Elf_Addr)target;
}
Example #8
0
/*
 * LD_BIND_NOW was set - force relocation for all jump slots
 */
int
reloc_jmpslots(Obj_Entry *obj, int flags, RtldLockState *lockstate)
{
	const Obj_Entry *defobj;
	const Elf_Rela *relalim;
	const Elf_Rela *rela;
	const Elf_Sym *def;

	if (obj->jmpslots_done)
		return (0);

	relalim = (const Elf_Rela *)((const char *)obj->pltrela +
	    obj->pltrelasize);
	for (rela = obj->pltrela; rela < relalim; rela++) {
		Elf_Addr *where, target;

		where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
		switch(ELF_R_TYPE(rela->r_info)) {
		case R_AARCH64_JUMP_SLOT:
			def = find_symdef(ELF_R_SYM(rela->r_info), obj,
			    &defobj, SYMLOOK_IN_PLT | flags, NULL, lockstate);
			if (def == NULL)
				return (-1);
			if (ELF_ST_TYPE(def->st_info) == STT_GNU_IFUNC) {
				obj->gnu_ifunc = true;
				continue;
			}
			target = (Elf_Addr)(defobj->relocbase + def->st_value);
			reloc_jmpslot(where, target, defobj, obj,
			    (const Elf_Rel *)rela);
			break;
		}
	}
	obj->jmpslots_done = true;

	return (0);
}
Example #9
0
static void
reloc_tlsdesc(const Obj_Entry *obj, const Elf_Rela *rela, Elf_Addr *where,
    int flags, RtldLockState *lockstate)
{
	const Elf_Sym *def;
	const Obj_Entry *defobj;
	Elf_Addr offs;


	offs = 0;
	if (ELF_R_SYM(rela->r_info) != 0) {
		def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, flags,
			    NULL, lockstate);
		if (def == NULL)
			rtld_die();
		offs = def->st_value;
		obj = defobj;
		if (def->st_shndx == SHN_UNDEF) {
			/* Weak undefined thread variable */
			where[0] = (Elf_Addr)_rtld_tlsdesc_undef;
			where[1] = rela->r_addend;
			return;
		}
	}
	offs += rela->r_addend;

	if (obj->tlsoffset != 0) {
		/* Variable is in initialy allocated TLS segment */
		where[0] = (Elf_Addr)_rtld_tlsdesc_static;
		where[1] = obj->tlsoffset + offs;
	} else {
		/* TLS offest is unknown at load time, use dynamic resolving */
		where[0] = (Elf_Addr)_rtld_tlsdesc_dynamic;
		where[1] = reloc_tlsdesc_alloc(obj->tlsindex, offs);
	}
}
Example #10
0
int
reloc_non_plt(Obj_Entry *obj, Obj_Entry *obj_rtld, int flags,
    RtldLockState *lockstate)
{
	const Elf_Rel *rel;
	const Elf_Rel *rellim;
	Elf_Addr *got = obj->pltgot;
	const Elf_Sym *sym, *def;
	const Obj_Entry *defobj;
	Elf_Word i;
#ifdef SUPPORT_OLD_BROKEN_LD
	int broken;
#endif

	/* The relocation for the dynamic loader has already been done. */
	if (obj == obj_rtld)
		return (0);

	if ((flags & SYMLOOK_IFUNC) != 0)
		/* XXX not implemented */
		return (0);

#ifdef SUPPORT_OLD_BROKEN_LD
	broken = 0;
	sym = obj->symtab;
	for (i = 1; i < 12; i++)
		if (sym[i].st_info == ELF_ST_INFO(STB_LOCAL, STT_NOTYPE))
			broken = 1;
	dbg("%s: broken=%d", obj->path, broken);
#endif

	i = (got[1] & GOT1_MASK) ? 2 : 1;

	/* Relocate the local GOT entries */
	got += i;
	dbg("got:%p for %d entries adding %p",
	    got, obj->local_gotno, obj->relocbase);
	for (; i < obj->local_gotno; i++) {
		*got += (Elf_Addr)obj->relocbase;
		got++;
	}
	sym = obj->symtab + obj->gotsym;

	dbg("got:%p for %d entries",
	    got, obj->symtabno);
	/* Now do the global GOT entries */
	for (i = obj->gotsym; i < obj->symtabno; i++) {
		dbg(" doing got %d sym %p (%s, %lx)", i - obj->gotsym, sym,
		    sym->st_name + obj->strtab, (u_long) *got);

#ifdef SUPPORT_OLD_BROKEN_LD
		if (ELF_ST_TYPE(sym->st_info) == STT_FUNC &&
		    broken && sym->st_shndx == SHN_UNDEF) {
			/*
			 * XXX DANGER WILL ROBINSON!
			 * You might think this is stupid, as it intentionally
			 * defeats lazy binding -- and you'd be right.
			 * Unfortunately, for lazy binding to work right, we
			 * need to a way to force the GOT slots used for
			 * function pointers to be resolved immediately.  This
			 * is supposed to be done automatically by the linker,
			 * by not outputting a PLT slot and setting st_value
			 * to 0 if there are non-PLT references, but older
			 * versions of GNU ld do not do this.
			 */
			def = find_symdef(i, obj, &defobj, flags, NULL,
			    lockstate);
			if (def == NULL)
				return -1;
			*got = def->st_value + (Elf_Addr)defobj->relocbase;
		} else
#endif
		if (ELF_ST_TYPE(sym->st_info) == STT_FUNC &&
		    sym->st_value != 0 && sym->st_shndx == SHN_UNDEF) {
			/*
			 * If there are non-PLT references to the function,
			 * st_value should be 0, forcing us to resolve the
			 * address immediately.
			 *
			 * XXX DANGER WILL ROBINSON!
			 * The linker is not outputting PLT slots for calls to
			 * functions that are defined in the same shared
			 * library.  This is a bug, because it can screw up
			 * link ordering rules if the symbol is defined in
			 * more than one module.  For now, if there is a
			 * definition, we fail the test above and force a full
			 * symbol lookup.  This means that all intra-module
			 * calls are bound immediately.  - mycroft, 2003/09/24
			 */
			*got = sym->st_value + (Elf_Addr)obj->relocbase;
			if ((Elf_Addr)(*got) == (Elf_Addr)obj->relocbase) {
				dbg("Warning2, i:%d maps to relocbase address:%p",
				    i, obj->relocbase);
			}

		} else if (sym->st_info == ELF_ST_INFO(STB_GLOBAL, STT_SECTION)) {
			/* Symbols with index SHN_ABS are not relocated. */
			if (sym->st_shndx != SHN_ABS) {
				*got = sym->st_value +
				    (Elf_Addr)obj->relocbase;
				if ((Elf_Addr)(*got) == (Elf_Addr)obj->relocbase) {
					dbg("Warning3, i:%d maps to relocbase address:%p",
					    i, obj->relocbase);
				}
			}
		} else {
			/* TODO: add cache here */
			def = find_symdef(i, obj, &defobj, flags, NULL,
			    lockstate);
			if (def == NULL) {
				dbg("Warning4, can't find symbole %d", i);
				return -1;
			}
			*got = def->st_value + (Elf_Addr)defobj->relocbase;
			if ((Elf_Addr)(*got) == (Elf_Addr)obj->relocbase) {
				dbg("Warning4, i:%d maps to relocbase address:%p",
				    i, obj->relocbase);
				dbg("via first obj symbol %s",
				    obj->strtab + obj->symtab[i].st_name);
				dbg("found in obj %p:%s",
				    defobj, defobj->path);
			}
		}

		dbg("  --> now %lx", (u_long) *got);
		++sym;
		++got;
	}

	got = obj->pltgot;
	rellim = (const Elf_Rel *)((caddr_t)obj->rel + obj->relsize);
	for (rel = obj->rel; rel < rellim; rel++) {
		Elf_Word	r_symndx, r_type;
		void		*where;

		where = obj->relocbase + rel->r_offset;
		r_symndx = ELF_R_SYM(rel->r_info);
		r_type = ELF_R_TYPE(rel->r_info);

		switch (r_type & 0xff) {
		case R_TYPE(NONE):
			break;

		case R_TYPE(REL32): {
			/* 32-bit PC-relative reference */
			const size_t rlen =
			    ELF_R_NXTTYPE_64_P(r_type)
				? sizeof(Elf_Sxword)
				: sizeof(Elf_Sword);
			Elf_Sxword old = load_ptr(where, rlen);
			Elf_Sxword val = old;

			def = obj->symtab + r_symndx;

			if (r_symndx >= obj->gotsym) {
				val += got[obj->local_gotno + r_symndx - obj->gotsym];
				dbg("REL32/G(%p) %p --> %p (%s) in %s",
				    where, (void *)old, (void *)val,
				    obj->strtab + def->st_name,
				    obj->path);
			} else {
				/*
				 * XXX: ABI DIFFERENCE!
				 *
				 * Old NetBSD binutils would generate shared
				 * libs with section-relative relocations being
				 * already adjusted for the start address of
				 * the section.
				 *
				 * New binutils, OTOH, generate shared libs
				 * with the same relocations being based at
				 * zero, so we need to add in the start address
				 * of the section.
				 *
				 * --rkb, Oct 6, 2001
				 */

				if (def->st_info ==
				    ELF_ST_INFO(STB_LOCAL, STT_SECTION)
#ifdef SUPPORT_OLD_BROKEN_LD
				    && !broken
#endif
				    )
					val += (Elf_Addr)def->st_value;

				val += (Elf_Addr)obj->relocbase;

				dbg("REL32/L(%p) %p -> %p (%s) in %s",
				    where, (void *)old, (void *)val,
				    obj->strtab + def->st_name, obj->path);
			}
			store_ptr(where, val, rlen);
			break;
		}

#ifdef __mips_n64
		case R_TYPE(TLS_DTPMOD64):
#else
		case R_TYPE(TLS_DTPMOD32): 
#endif
		{

			const size_t rlen = sizeof(Elf_Addr);
			Elf_Addr old = load_ptr(where, rlen);
			Elf_Addr val = old;

        		def = find_symdef(r_symndx, obj, &defobj, flags, NULL,
	    			lockstate);
			if (def == NULL)
				return -1;

			val += (Elf_Addr)defobj->tlsindex;

			store_ptr(where, val, rlen);
			dbg("DTPMOD %s in %s %p --> %p in %s",
			    obj->strtab + obj->symtab[r_symndx].st_name,
			    obj->path, (void *)old, (void*)val, defobj->path);
			break;
		}

#ifdef __mips_n64
		case R_TYPE(TLS_DTPREL64):
#else
		case R_TYPE(TLS_DTPREL32):
#endif
		{
			const size_t rlen = sizeof(Elf_Addr);
			Elf_Addr old = load_ptr(where, rlen);
			Elf_Addr val = old;

        		def = find_symdef(r_symndx, obj, &defobj, flags, NULL,
	    			lockstate);
			if (def == NULL)
				return -1;

			if (!defobj->tls_done && allocate_tls_offset(obj))
				return -1;

			val += (Elf_Addr)def->st_value - TLS_DTP_OFFSET;
			store_ptr(where, val, rlen);

			dbg("DTPREL %s in %s %p --> %p in %s",
			    obj->strtab + obj->symtab[r_symndx].st_name,
			    obj->path, (void*)old, (void *)val, defobj->path);
			break;
		}

#ifdef __mips_n64
		case R_TYPE(TLS_TPREL64):
#else
		case R_TYPE(TLS_TPREL32):
#endif
		{
			const size_t rlen = sizeof(Elf_Addr);
			Elf_Addr old = load_ptr(where, rlen);
			Elf_Addr val = old;

        		def = find_symdef(r_symndx, obj, &defobj, flags, NULL,
	    			lockstate);

			if (def == NULL)
				return -1;

			if (!defobj->tls_done && allocate_tls_offset(obj))
				return -1;

			val += (Elf_Addr)(def->st_value + defobj->tlsoffset
			    - TLS_TP_OFFSET - TLS_TCB_SIZE);
			store_ptr(where, val, rlen);

			dbg("TPREL %s in %s %p --> %p in %s",
			    obj->strtab + obj->symtab[r_symndx].st_name,
			    obj->path, (void*)old, (void *)val, defobj->path);
			break;
		}



		default:
			dbg("sym = %lu, type = %lu, offset = %p, "
			    "contents = %p, symbol = %s",
			    (u_long)r_symndx, (u_long)ELF_R_TYPE(rel->r_info),
			    (void *)rel->r_offset,
			    (void *)load_ptr(where, sizeof(Elf_Sword)),
			    obj->strtab + obj->symtab[r_symndx].st_name);
			_rtld_error("%s: Unsupported relocation type %ld "
			    "in non-PLT relocations",
			    obj->path, (u_long) ELF_R_TYPE(rel->r_info));
			return -1;
		}
	}

	return 0;
}
Example #11
0
/* Process the non-PLT relocations. */
int
reloc_non_plt(Obj_Entry *obj, Obj_Entry *obj_rtld, int flags,
    RtldLockState *lockstate)
{
	const Elf_Rela *relalim;
	const Elf_Rela *rela;
	SymCache *cache;
	const Elf_Sym *def;
	const Obj_Entry *defobj;
	Elf_Addr *where, symval;
	Elf32_Addr *where32;
	int r;

	r = -1;
	/*
	 * The dynamic loader may be called from a thread, we have
	 * limited amounts of stack available so we cannot use alloca().
	 */
	if (obj != obj_rtld) {
		cache = calloc(obj->dynsymcount, sizeof(SymCache));
		/* No need to check for NULL here */
	} else
		cache = NULL;

	relalim = (const Elf_Rela *)((caddr_t)obj->rela + obj->relasize);
	for (rela = obj->rela;  rela < relalim;  rela++) {
		/*
		 * First, resolve symbol for relocations which
		 * reference symbols.
		 */
		switch (ELF_R_TYPE(rela->r_info)) {
		case R_X86_64_64:
		case R_X86_64_PC32:
		case R_X86_64_GLOB_DAT:
		case R_X86_64_TPOFF64:
		case R_X86_64_TPOFF32:
		case R_X86_64_DTPMOD64:
		case R_X86_64_DTPOFF64:
		case R_X86_64_DTPOFF32:
			def = find_symdef(ELF_R_SYM(rela->r_info), obj,
			    &defobj, flags, cache, lockstate);
			if (def == NULL)
				goto done;
			/*
			 * If symbol is IFUNC, only perform relocation
			 * when caller allowed it by passing
			 * SYMLOOK_IFUNC flag.  Skip the relocations
			 * otherwise.
			 *
			 * Also error out in case IFUNC relocations
			 * are specified for TLS, which cannot be
			 * usefully interpreted.
			 */
			if (ELF_ST_TYPE(def->st_info) == STT_GNU_IFUNC) {
				switch (ELF_R_TYPE(rela->r_info)) {
				case R_X86_64_64:
				case R_X86_64_PC32:
				case R_X86_64_GLOB_DAT:
					if ((flags & SYMLOOK_IFUNC) == 0) {
						obj->non_plt_gnu_ifunc = true;
						continue;
					}
					symval = (Elf_Addr)rtld_resolve_ifunc(
					    defobj, def);
					break;
				case R_X86_64_TPOFF64:
				case R_X86_64_TPOFF32:
				case R_X86_64_DTPMOD64:
				case R_X86_64_DTPOFF64:
				case R_X86_64_DTPOFF32:
					_rtld_error("%s: IFUNC for TLS reloc",
					    obj->path);
					goto done;
				}
			} else {
				if ((flags & SYMLOOK_IFUNC) != 0)
					continue;
				symval = (Elf_Addr)defobj->relocbase +
				    def->st_value;
			}
			break;
		default:
			if ((flags & SYMLOOK_IFUNC) != 0)
				continue;
			break;
		}
		where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
		where32 = (Elf32_Addr *)where;

		switch (ELF_R_TYPE(rela->r_info)) {
		case R_X86_64_NONE:
			break;
		case R_X86_64_64:
			*where = symval + rela->r_addend;
			break;
		case R_X86_64_PC32:
			/*
			 * I don't think the dynamic linker should
			 * ever see this type of relocation.  But the
			 * binutils-2.6 tools sometimes generate it.
			 */
			*where32 = (Elf32_Addr)(unsigned long)(symval +
		            rela->r_addend - (Elf_Addr)where);
			break;
		/* missing: R_X86_64_GOT32 R_X86_64_PLT32 */
		case R_X86_64_COPY:
			/*
			 * These are deferred until all other relocations have
			 * been done.  All we do here is make sure that the COPY
			 * relocation is not in a shared library.  They are
			 * allowed only in executable files.
			 */
			if (!obj->mainprog) {
				_rtld_error("%s: Unexpected R_X86_64_COPY "
				    "relocation in shared library", obj->path);
				goto done;
			}
			break;
		case R_X86_64_GLOB_DAT:
			*where = symval;
			break;
		case R_X86_64_TPOFF64:
			/*
			 * We lazily allocate offsets for static TLS
			 * as we see the first relocation that
			 * references the TLS block. This allows us to
			 * support (small amounts of) static TLS in
			 * dynamically loaded modules. If we run out
			 * of space, we generate an error.
			 */
			if (!defobj->tls_done) {
				if (!allocate_tls_offset((Obj_Entry*) defobj)) {
					_rtld_error("%s: No space available "
					    "for static Thread Local Storage",
					    obj->path);
					goto done;
				}
			}
			*where = (Elf_Addr)(def->st_value - defobj->tlsoffset +
			    rela->r_addend);
			break;
		case R_X86_64_TPOFF32:
			/*
			 * We lazily allocate offsets for static TLS
			 * as we see the first relocation that
			 * references the TLS block. This allows us to
			 * support (small amounts of) static TLS in
			 * dynamically loaded modules. If we run out
			 * of space, we generate an error.
			 */
			if (!defobj->tls_done) {
				if (!allocate_tls_offset((Obj_Entry*) defobj)) {
					_rtld_error("%s: No space available "
					    "for static Thread Local Storage",
					    obj->path);
					goto done;
				}
			}
			*where32 = (Elf32_Addr)(def->st_value -
			    defobj->tlsoffset + rela->r_addend);
			break;
		case R_X86_64_DTPMOD64:
			*where += (Elf_Addr)defobj->tlsindex;
			break;
		case R_X86_64_DTPOFF64:
			*where += (Elf_Addr)(def->st_value + rela->r_addend);
			break;
		case R_X86_64_DTPOFF32:
			*where32 += (Elf32_Addr)(def->st_value +
			    rela->r_addend);
			break;
		case R_X86_64_RELATIVE:
			*where = (Elf_Addr)(obj->relocbase + rela->r_addend);
			break;
		/*
		 * missing:
		 * R_X86_64_GOTPCREL, R_X86_64_32, R_X86_64_32S, R_X86_64_16,
		 * R_X86_64_PC16, R_X86_64_8, R_X86_64_PC8
		 */
		default:
			_rtld_error("%s: Unsupported relocation type %u"
			    " in non-PLT relocations\n", obj->path,
			    (unsigned int)ELF_R_TYPE(rela->r_info));
			goto done;
		}
	}
	r = 0;
done:
	free(cache);
	return (r);
}
Example #12
0
/* Process the non-PLT relocations. */
int
reloc_non_plt(Obj_Entry *obj, Obj_Entry *obj_rtld, int flags,
    RtldLockState *lockstate)
{
	const Elf_Rel *rellim;
	const Elf_Rel *rel;
	SymCache *cache;
	const Elf_Sym *def;
	const Obj_Entry *defobj;
	Elf_Addr *where, symval, add;
	int r;

	r = -1;
	/*
	 * The dynamic loader may be called from a thread, we have
	 * limited amounts of stack available so we cannot use alloca().
	 */
	if (obj != obj_rtld) {
		cache = calloc(obj->dynsymcount, sizeof(SymCache));
		/* No need to check for NULL here */
	} else
		cache = NULL;

	rellim = (const Elf_Rel *)((caddr_t) obj->rel + obj->relsize);
	for (rel = obj->rel;  rel < rellim;  rel++) {
		switch (ELF_R_TYPE(rel->r_info)) {
		case R_386_32:
		case R_386_PC32:
		case R_386_GLOB_DAT:
		case R_386_TLS_TPOFF:
		case R_386_TLS_TPOFF32:
		case R_386_TLS_DTPMOD32:
		case R_386_TLS_DTPOFF32:
			def = find_symdef(ELF_R_SYM(rel->r_info), obj, &defobj,
			    flags, cache, lockstate);
			if (def == NULL)
				goto done;
			if (ELF_ST_TYPE(def->st_info) == STT_GNU_IFUNC) {
				switch (ELF_R_TYPE(rel->r_info)) {
				case R_386_32:
				case R_386_PC32:
				case R_386_GLOB_DAT:
					if ((flags & SYMLOOK_IFUNC) == 0) {
						obj->non_plt_gnu_ifunc = true;
						continue;
					}
					symval = (Elf_Addr)rtld_resolve_ifunc(
					    defobj, def);
					break;
				case R_386_TLS_TPOFF:
				case R_386_TLS_TPOFF32:
				case R_386_TLS_DTPMOD32:
				case R_386_TLS_DTPOFF32:
					_rtld_error("%s: IFUNC for TLS reloc",
					    obj->path);
					goto done;
				}
			} else {
				if ((flags & SYMLOOK_IFUNC) != 0)
					continue;
				symval = (Elf_Addr)defobj->relocbase +
				    def->st_value;
			}
			break;
		default:
			if ((flags & SYMLOOK_IFUNC) != 0)
				continue;
			break;
		}
		where = (Elf_Addr *)(obj->relocbase + rel->r_offset);

		switch (ELF_R_TYPE(rel->r_info)) {
		case R_386_NONE:
			break;
		case R_386_32:
			*where += symval;
			break;
		case R_386_PC32:
		    /*
		     * I don't think the dynamic linker should ever
		     * see this type of relocation.  But the
		     * binutils-2.6 tools sometimes generate it.
		     */
		    *where += symval - (Elf_Addr)where;
		    break;
		case R_386_COPY:
			/*
			 * These are deferred until all other
			 * relocations have been done.  All we do here
			 * is make sure that the COPY relocation is
			 * not in a shared library.  They are allowed
			 * only in executable files.
			 */
			if (!obj->mainprog) {
				_rtld_error("%s: Unexpected R_386_COPY "
				    "relocation in shared library", obj->path);
				goto done;
			}
			break;
		case R_386_GLOB_DAT:
			*where = symval;
			break;
		case R_386_RELATIVE:
			*where += (Elf_Addr)obj->relocbase;
			break;
		case R_386_TLS_TPOFF:
		case R_386_TLS_TPOFF32:
			/*
			 * We lazily allocate offsets for static TLS
			 * as we see the first relocation that
			 * references the TLS block. This allows us to
			 * support (small amounts of) static TLS in
			 * dynamically loaded modules. If we run out
			 * of space, we generate an error.
			 */
			if (!defobj->tls_done) {
				if (!allocate_tls_offset((Obj_Entry*) defobj)) {
					_rtld_error("%s: No space available "
					    "for static Thread Local Storage",
					    obj->path);
					goto done;
				}
			}
			add = (Elf_Addr)(def->st_value - defobj->tlsoffset);
			if (ELF_R_TYPE(rel->r_info) == R_386_TLS_TPOFF)
				*where += add;
			else
				*where -= add;
			break;
		case R_386_TLS_DTPMOD32:
			*where += (Elf_Addr)defobj->tlsindex;
			break;
		case R_386_TLS_DTPOFF32:
			*where += (Elf_Addr) def->st_value;
			break;
		default:
			_rtld_error("%s: Unsupported relocation type %d"
			    " in non-PLT relocations\n", obj->path,
			    ELF_R_TYPE(rel->r_info));
			goto done;
		}
	}
	r = 0;
done:
	free(cache);
	return (r);
}
Example #13
0
/* Relocate a non-PLT object with addend. */
static int
reloc_non_plt_obj(Obj_Entry *obj_rtld, Obj_Entry *obj, const Elf_Rela *rela,
    SymCache *cache, int flags, RtldLockState *lockstate)
{
	struct fptr **fptrs;
	Elf_Addr *where = (Elf_Addr *) (obj->relocbase + rela->r_offset);

	switch (ELF_R_TYPE(rela->r_info)) {
	case R_IA_64_REL64LSB:
		/*
		 * We handle rtld's relocations in rtld_start.S
		 */
		if (obj != obj_rtld)
			store64(where,
				load64(where) + (Elf_Addr) obj->relocbase);
		break;

	case R_IA_64_DIR64LSB: {
		const Elf_Sym *def;
		const Obj_Entry *defobj;
		Elf_Addr target;

		def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
		    flags, cache, lockstate);
		if (def == NULL)
			return -1;

		target = (def->st_shndx != SHN_UNDEF)
		    ? (Elf_Addr)(defobj->relocbase + def->st_value) : 0;
		store64(where, target + rela->r_addend);
		break;
	}

	case R_IA_64_FPTR64LSB: {
		/*
		 * We have to make sure that all @fptr references to
		 * the same function are identical so that code can
		 * compare function pointers.
		 */
		const Elf_Sym *def;
		const Obj_Entry *defobj;
		struct fptr *fptr = 0;
		Elf_Addr target, gp;
		int sym_index;

		def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
		    SYMLOOK_IN_PLT | flags, cache, lockstate);
		if (def == NULL) {
			/*
			 * XXX r_debug_state is problematic and find_symdef()
			 * returns NULL for it. This probably has something to
			 * do with symbol versioning (r_debug_state is in the
			 * symbol map). If we return -1 in that case we abort
			 * relocating rtld, which typically is fatal. So, for
			 * now just skip the symbol when we're relocating
			 * rtld. We don't care about r_debug_state unless we
			 * are being debugged.
			 */
			if (obj != obj_rtld)
				return -1;
			break;
		}

		if (def->st_shndx != SHN_UNDEF) {
			target = (Elf_Addr)(defobj->relocbase + def->st_value);
			gp = (Elf_Addr)defobj->pltgot;

			/* rtld is allowed to reference itself only */
			assert(!obj->rtld || obj == defobj);
			fptrs = defobj->priv;
			if (fptrs == NULL)
				fptrs = alloc_fptrs((Obj_Entry *) defobj, 
				    obj->rtld);

			sym_index = def - defobj->symtab;

			/*
			 * Find the @fptr, using fptrs as a helper.
			 */
			if (fptrs)
				fptr = fptrs[sym_index];
			if (!fptr) {
				fptr = alloc_fptr(target, gp);
				if (fptrs)
					fptrs[sym_index] = fptr;
			}
		} else
			fptr = NULL;

		store64(where, (Elf_Addr)fptr);
		break;
	}

	case R_IA_64_IPLTLSB: {
		/*
		 * Relocation typically used to populate C++ virtual function
		 * tables. It creates a 128-bit function descriptor at the
		 * specified memory address.
		 */
		const Elf_Sym *def;
		const Obj_Entry *defobj;
		struct fptr *fptr;
		Elf_Addr target, gp;

		def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
		    flags, cache, lockstate);
		if (def == NULL)
			return -1;

		if (def->st_shndx != SHN_UNDEF) {
			target = (Elf_Addr)(defobj->relocbase + def->st_value);
			gp = (Elf_Addr)defobj->pltgot;
		} else {
			target = 0;
			gp = 0;
		}

		fptr = (void*)where;
		store64(&fptr->target, target);
		store64(&fptr->gp, gp);
		break;
	}

	case R_IA_64_DTPMOD64LSB: {
		const Elf_Sym *def;
		const Obj_Entry *defobj;

		def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
		    flags, cache, lockstate);
		if (def == NULL)
			return -1;

		store64(where, defobj->tlsindex);
		break;
	}

	case R_IA_64_DTPREL64LSB: {
		const Elf_Sym *def;
		const Obj_Entry *defobj;

		def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
		    flags, cache, lockstate);
		if (def == NULL)
			return -1;

		store64(where, def->st_value + rela->r_addend);
		break;
	}

	case R_IA_64_TPREL64LSB: {
		const Elf_Sym *def;
		const Obj_Entry *defobj;

		def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
		    flags, cache, lockstate);
		if (def == NULL)
			return -1;

		/*
		 * We lazily allocate offsets for static TLS as we
		 * see the first relocation that references the
		 * TLS block. This allows us to support (small
		 * amounts of) static TLS in dynamically loaded
		 * modules. If we run out of space, we generate an
		 * error.
		 */
		if (!defobj->tls_done) {
			if (!allocate_tls_offset((Obj_Entry*) defobj)) {
				_rtld_error("%s: No space available for static "
				    "Thread Local Storage", obj->path);
				return -1;
			}
		}

		store64(where, defobj->tlsoffset + def->st_value + rela->r_addend);
		break;
	}

	case R_IA_64_NONE:
		break;

	default:
		_rtld_error("%s: Unsupported relocation type %u"
			    " in non-PLT relocations\n", obj->path,
			    (unsigned int)ELF_R_TYPE(rela->r_info));
		return -1;
	}

	return(0);
}
Example #14
0
static int
reloc_nonplt_object(Obj_Entry *obj, const Elf_Rel *rel, SymCache *cache,
    RtldLockState *lockstate)
{
	Elf_Addr        *where;
	const Elf_Sym   *def;
	const Obj_Entry *defobj;
	Elf_Addr         tmp;
	unsigned long	 symnum;

	where = (Elf_Addr *)(obj->relocbase + rel->r_offset);
	symnum = ELF_R_SYM(rel->r_info);

	switch (ELF_R_TYPE(rel->r_info)) {
	case R_ARM_NONE:
		break;
		
#if 1 /* XXX should not occur */
	case R_ARM_PC24: {	/* word32 S - P + A */
		Elf32_Sword addend;
		
		/*
		 * Extract addend and sign-extend if needed.
		 */
		addend = *where;
		if (addend & 0x00800000)
			addend |= 0xff000000;
		
		def = find_symdef(symnum, obj, &defobj, false, cache,
		    lockstate);
		if (def == NULL)
				return -1;
			tmp = (Elf_Addr)obj->relocbase + def->st_value
			    - (Elf_Addr)where + (addend << 2);
			if ((tmp & 0xfe000000) != 0xfe000000 &&
			    (tmp & 0xfe000000) != 0) {
				_rtld_error(
				"%s: R_ARM_PC24 relocation @ %p to %s failed "
				"(displacement %ld (%#lx) out of range)",
				    obj->path, where,
				    obj->strtab + obj->symtab[symnum].st_name,
				    (long) tmp, (long) tmp);
				return -1;
			}
			tmp >>= 2;
			*where = (*where & 0xff000000) | (tmp & 0x00ffffff);
			dbg("PC24 %s in %s --> %p @ %p in %s",
			    obj->strtab + obj->symtab[symnum].st_name,
			    obj->path, (void *)*where, where, defobj->path);
			break;
		}
#endif

		case R_ARM_ABS32:	/* word32 B + S + A */
		case R_ARM_GLOB_DAT:	/* word32 B + S */
			def = find_symdef(symnum, obj, &defobj, false, cache,
			    lockstate);
			if (def == NULL)
				return -1;
			if (__predict_true(RELOC_ALIGNED_P(where))) {
				tmp =  *where + (Elf_Addr)defobj->relocbase +
				    def->st_value;
				*where = tmp;
			} else {
				tmp = load_ptr(where) +
				    (Elf_Addr)defobj->relocbase +
				    def->st_value;
				store_ptr(where, tmp);
			}
			dbg("ABS32/GLOB_DAT %s in %s --> %p @ %p in %s",
			    obj->strtab + obj->symtab[symnum].st_name,
			    obj->path, (void *)tmp, where, defobj->path);
			break;

		case R_ARM_RELATIVE:	/* word32 B + A */
			if (__predict_true(RELOC_ALIGNED_P(where))) {
				tmp = *where + (Elf_Addr)obj->relocbase;
				*where = tmp;
			} else {
				tmp = load_ptr(where) +
				    (Elf_Addr)obj->relocbase;
				store_ptr(where, tmp);
			}
			dbg("RELATIVE in %s --> %p", obj->path,
			    (void *)tmp);
			break;

		case R_ARM_COPY:
			/*
			 * These are deferred until all other relocations have
			 * been done.  All we do here is make sure that the
			 * COPY relocation is not in a shared library.  They
			 * are allowed only in executable files.
			 */
			if (!obj->mainprog) {
				_rtld_error(
			"%s: Unexpected R_COPY relocation in shared library",
				    obj->path);
				return -1;
			}
			dbg("COPY (avoid in main)");
			break;

		default:
			dbg("sym = %lu, type = %lu, offset = %p, "
			    "contents = %p, symbol = %s",
			    symnum, (u_long)ELF_R_TYPE(rel->r_info),
			    (void *)rel->r_offset, (void *)load_ptr(where),
			    obj->strtab + obj->symtab[symnum].st_name);
			_rtld_error("%s: Unsupported relocation type %ld "
			    "in non-PLT relocations\n",
			    obj->path, (u_long) ELF_R_TYPE(rel->r_info));
			return -1;
	}
	return 0;
}
Example #15
0
/* Process the non-PLT relocations. */
int
reloc_non_plt(Obj_Entry *obj)
{
	const Elf_Rel *rellim;
	const Elf_Rel *rel;

	rellim = (const Elf_Rel *) ((caddr_t) obj->rel + obj->relsize);
	for (rel = obj->rel;  rel < rellim;  rel++) {
	    Elf_Addr *where = (Elf_Addr *) (obj->relocbase + rel->r_offset);

	    switch (ELF_R_TYPE(rel->r_info)) {

	    case R_386_NONE:
		break;

	    case R_386_32:
		{
		    const Elf_Sym *def;
		    const Obj_Entry *defobj;

		    def = find_symdef(ELF_R_SYM(rel->r_info), obj, &defobj,
		      false);
		    if (def == NULL)
			return -1;

		    *where += (Elf_Addr) (defobj->relocbase + def->st_value);
		}
		break;

	    case R_386_PC32:
		/*
		 * I don't think the dynamic linker should ever see this
		 * type of relocation.  But the binutils-2.6 tools sometimes
		 * generate it.
		 */
		{
		    const Elf_Sym *def;
		    const Obj_Entry *defobj;

		    def = find_symdef(ELF_R_SYM(rel->r_info), obj, &defobj,
		      false);
		    if (def == NULL)
			return -1;

		    *where +=
		      (Elf_Addr) (defobj->relocbase + def->st_value) -
		      (Elf_Addr) where;
		}
		break;

	    case R_386_COPY:
		/*
		 * These are deferred until all other relocations have
		 * been done.  All we do here is make sure that the COPY
		 * relocation is not in a shared library.  They are allowed
		 * only in executable files.
		 */
		if (!obj->mainprog) {
		    _rtld_error("%s: Unexpected R_386_COPY relocation"
		      " in shared library", obj->path);
		    return -1;
		}
		break;

	    case R_386_GLOB_DAT:
		{
		    const Elf_Sym *def;
		    const Obj_Entry *defobj;

		    def = find_symdef(ELF_R_SYM(rel->r_info), obj, &defobj,
		      false);
		    if (def == NULL)
			return -1;

		    *where = (Elf_Addr) (defobj->relocbase + def->st_value);
		}
		break;

	    case R_386_RELATIVE:
		*where += (Elf_Addr) obj->relocbase;
		break;

	    default:
		_rtld_error("%s: Unsupported relocation type %d"
		  " in non-PLT relocations\n", obj->path,
		  ELF_R_TYPE(rel->r_info));
		return -1;
	    }
	}
    return 0;
}