Example #1
0
void
attribute_hidden
_dl_tlsdesc_lazy_resolver_fixup (struct tlsdesc volatile *td,
				 Elf32_Addr *got)
{
  struct link_map *l = (struct link_map *)got[1];
  lookup_t result;
  unsigned long value;

  if (_dl_tlsdesc_resolve_early_return_p
      (td, (void*)(D_PTR (l, l_info[ADDRIDX (DT_TLSDESC_PLT)]) + l->l_addr)))
    return;

  if (td->argument.value & 0x80000000)
    {
      /* A global symbol, this is the symbol index.  */
      /* The code below was borrowed from _dl_fixup().  */
      const Elf_Symndx symndx = td->argument.value ^ 0x80000000;
      const ElfW(Sym) *const symtab
	= (const void *) D_PTR (l, l_info[DT_SYMTAB]);
      const char *strtab = (const void *) D_PTR (l, l_info[DT_STRTAB]);
      const ElfW(Sym) *sym = &symtab[symndx];

      /* Look up the target symbol.  If the normal lookup rules are not
	 used don't look in the global scope.  */
      if (ELFW(ST_BIND) (sym->st_info) != STB_LOCAL
	  && __builtin_expect (ELFW(ST_VISIBILITY) (sym->st_other), 0) == 0)
	{
	  const struct r_found_version *version = NULL;

	  if (l->l_info[VERSYMIDX (DT_VERSYM)] != NULL)
	    {
	      const ElfW(Half) *vernum =
		(const void *) D_PTR (l, l_info[VERSYMIDX (DT_VERSYM)]);
	      ElfW(Half) ndx = vernum[symndx] & 0x7fff;
	      version = &l->l_versions[ndx];
	      if (version->hash == 0)
		version = NULL;
	    }

	  result = _dl_lookup_symbol_x
	    (strtab + sym->st_name, l, &sym,
	     l->l_scope, version, ELF_RTYPE_CLASS_PLT,
	     DL_LOOKUP_ADD_DEPENDENCY, NULL);
	  if (sym)
	    value = sym->st_value;
	  else
	    {
	      td->entry = _dl_tlsdesc_undefweak;
	      goto done;
	    }
	}
      else
	{
	  /* We already found the symbol.  The module (and therefore its load
	     address) is also known.  */
	  result = l;
	  value = sym->st_value;
	}
    }
Example #2
0
void
attribute_hidden
_dl_tlsdesc_resolve_rela_fixup (struct tlsdesc *td, struct link_map *l)
{
  const ElfW(Rela) *reloc = atomic_load_relaxed (&td->arg);

  /* After GL(dl_load_lock) is grabbed only one caller can see td->entry in
     initial state in _dl_tlsdesc_resolve_early_return_p, other concurrent
     callers will return and retry calling td->entry.  The updated td->entry
     synchronizes with the single writer so all read accesses here can use
     relaxed order.  */
  if (_dl_tlsdesc_resolve_early_return_p
      (td, (void*)(D_PTR (l, l_info[ADDRIDX (DT_TLSDESC_PLT)]) + l->l_addr)))
    return;

  /* The code below was borrowed from _dl_fixup(),
     except for checking for STB_LOCAL.  */
  const ElfW(Sym) *const symtab
    = (const void *) D_PTR (l, l_info[DT_SYMTAB]);
  const char *strtab = (const void *) D_PTR (l, l_info[DT_STRTAB]);
  const ElfW(Sym) *sym = &symtab[ELFW(R_SYM) (reloc->r_info)];
  lookup_t result;

   /* Look up the target symbol.  If the normal lookup rules are not
      used don't look in the global scope.  */
  if (ELFW(ST_BIND) (sym->st_info) != STB_LOCAL
      && __builtin_expect (ELFW(ST_VISIBILITY) (sym->st_other), 0) == 0)
    {
      const struct r_found_version *version = NULL;

      if (l->l_info[VERSYMIDX (DT_VERSYM)] != NULL)
	{
	  const ElfW(Half) *vernum =
	    (const void *) D_PTR (l, l_info[VERSYMIDX (DT_VERSYM)]);
	  ElfW(Half) ndx = vernum[ELFW(R_SYM) (reloc->r_info)] & 0x7fff;
	  version = &l->l_versions[ndx];
	  if (version->hash == 0)
	    version = NULL;
	}

      result = _dl_lookup_symbol_x (strtab + sym->st_name, l, &sym,
				    l->l_scope, version, ELF_RTYPE_CLASS_PLT,
				    DL_LOOKUP_ADD_DEPENDENCY, NULL);
    }
  else
    {
Example #3
0
void
internal_hidden_function
_dl_tlsdesc_resolve_rel_fixup (struct tlsdesc volatile *td,
			       struct link_map *l,
			       ptrdiff_t entry_check_offset)
{
  const ElfW(Rel) *reloc = td->arg;

  if (_dl_tlsdesc_resolve_early_return_p (td, __builtin_return_address (0)
					  - entry_check_offset))
    return;

  /* The code below was borrowed from _dl_fixup(),
     except for checking for STB_LOCAL.  */
  const ElfW(Sym) *const symtab
    = (const void *) D_PTR (l, l_info[DT_SYMTAB]);
  const char *strtab = (const void *) D_PTR (l, l_info[DT_STRTAB]);
  const ElfW(Sym) *sym = &symtab[ELFW(R_SYM) (reloc->r_info)];
  lookup_t result;

   /* Look up the target symbol.  If the normal lookup rules are not
      used don't look in the global scope.  */
  if (ELFW(ST_BIND) (sym->st_info) != STB_LOCAL
      && __builtin_expect (ELFW(ST_VISIBILITY) (sym->st_other), 0) == 0)
    {
      const struct r_found_version *version = NULL;

      if (l->l_info[VERSYMIDX (DT_VERSYM)] != NULL)
	{
	  const ElfW(Half) *vernum =
	    (const void *) D_PTR (l, l_info[VERSYMIDX (DT_VERSYM)]);
	  ElfW(Half) ndx = vernum[ELFW(R_SYM) (reloc->r_info)] & 0x7fff;
	  version = &l->l_versions[ndx];
	  if (version->hash == 0)
	    version = NULL;
	}

      result = _dl_lookup_symbol_x (strtab + sym->st_name, l, &sym,
				    l->l_scope, version, ELF_RTYPE_CLASS_PLT,
				    DL_LOOKUP_ADD_DEPENDENCY, NULL);
    }
  else
    {
Example #4
0
void
_dl_reloc_overflow (struct link_map *map,
		   const char *name,
		   Elf64_Addr *const reloc_addr,
		   const Elf64_Sym *refsym)
{
  char buffer[128];
  char *t;
  t = stpcpy (buffer, name);
  t = stpcpy (t, " reloc at 0x");
  _itoa_word ((unsigned long) reloc_addr, t, 16, 0);
  if (refsym)
    {
      const char *strtab;

      strtab = (const void *) D_PTR (map, l_info[DT_STRTAB]);
      t = stpcpy (t, " for symbol `");
      t = stpcpy (t, strtab + refsym->st_name);
      t = stpcpy (t, "'");
    }
  t = stpcpy (t, " out of range");
  _dl_signal_error (0, map->l_name, NULL, buffer);
}
Example #5
0
   first time each PLT entry is called.  We must perform the relocation
   specified in the PLT of the given shared object, and return the resolved
   function address to the trampoline, which will restart the original call
   to that address.  Future calls will bounce directly from the PLT to the
   function.  */

DL_FIXUP_VALUE_TYPE
__attribute ((noinline)) ARCH_FIXUP_ATTRIBUTE
_dl_fixup (
# ifdef ELF_MACHINE_RUNTIME_FIXUP_ARGS
	   ELF_MACHINE_RUNTIME_FIXUP_ARGS,
# endif
	   struct link_map *l, ElfW(Word) reloc_arg)
{
  const ElfW(Sym) *const symtab
    = (const void *) D_PTR (l, l_info[DT_SYMTAB]);
  const char *strtab = (const void *) D_PTR (l, l_info[DT_STRTAB]);

  const PLTREL *const reloc
    = (const void *) (D_PTR (l, l_info[DT_JMPREL]) + reloc_offset);
  const ElfW(Sym) *sym = &symtab[ELFW(R_SYM) (reloc->r_info)];
  void *const rel_addr = (void *)(l->l_addr + reloc->r_offset);
  lookup_t result;
  DL_FIXUP_VALUE_TYPE value;

  /* Sanity check that we're really looking at a PLT relocation.  */
  assert (ELFW(R_TYPE)(reloc->r_info) == ELF_MACHINE_JMP_SLOT);

   /* Look up the target symbol.  If the normal lookup rules are not
      used don't look in the global scope.  */
  if (__builtin_expect (ELFW(ST_VISIBILITY) (sym->st_other), 0) == 0)
Example #6
0
     a dependency of the current object.  */
  for (n = 0; n < map->l_searchlist.r_nlist; n++)
    if (_dl_name_match_p (name, map->l_searchlist.r_list[n]))
      return map->l_searchlist.r_list[n];

  /* Should never happen.  */
  return NULL;
}


static int
internal_function
match_symbol (const char *name, ElfW(Word) hash, const char *string,
	      struct link_map *map, int verbose, int weak)
{
  const char *strtab = (const void *) D_PTR (map, l_info[DT_STRTAB]);
  ElfW(Addr) def_offset;
  ElfW(Verdef) *def;
  /* Initialize to make the compiler happy.  */
  const char *errstring = NULL;
  int result = 0;

  /* Display information about what we are doing while debugging.  */
  if (__builtin_expect (_dl_debug_mask & DL_DEBUG_VERSIONS, 0))
    _dl_debug_printf ("\
checking for version `%s' in file %s required by file %s\n",
		      string, map->l_name[0] ? map->l_name : _dl_argv[0],
		      name);

  if (__builtin_expect (map->l_info[VERSYMIDX (DT_VERDEF)] == NULL, 0))
    {
Example #7
0
void
_dl_map_object_deps (struct link_map *map,
		     struct link_map **preloads, unsigned int npreloads,
		     int trace_mode, int open_mode)
{
  struct list *known = __alloca (sizeof *known * (1 + npreloads + 1));
  struct list *runp, *tail;
  unsigned int nlist, i;
  /* Object name.  */
  const char *name;
  int errno_saved;
  int errno_reason;
  struct dl_exception exception;

  /* No loaded object so far.  */
  nlist = 0;

  /* First load MAP itself.  */
  preload (known, &nlist, map);

  /* Add the preloaded items after MAP but before any of its dependencies.  */
  for (i = 0; i < npreloads; ++i)
    preload (known, &nlist, preloads[i]);

  /* Terminate the lists.  */
  known[nlist - 1].next = NULL;

  /* Pointer to last unique object.  */
  tail = &known[nlist - 1];

  struct scratch_buffer needed_space;
  scratch_buffer_init (&needed_space);

  /* Process each element of the search list, loading each of its
     auxiliary objects and immediate dependencies.  Auxiliary objects
     will be added in the list before the object itself and
     dependencies will be appended to the list as we step through it.
     This produces a flat, ordered list that represents a
     breadth-first search of the dependency tree.

     The whole process is complicated by the fact that we better
     should use alloca for the temporary list elements.  But using
     alloca means we cannot use recursive function calls.  */
  errno_saved = errno;
  errno_reason = 0;
  errno = 0;
  name = NULL;
  for (runp = known; runp; )
    {
      struct link_map *l = runp->map;
      struct link_map **needed = NULL;
      unsigned int nneeded = 0;

      /* Unless otherwise stated, this object is handled.  */
      runp->done = 1;

      /* Allocate a temporary record to contain the references to the
	 dependencies of this object.  */
      if (l->l_searchlist.r_list == NULL && l->l_initfini == NULL
	  && l != map && l->l_ldnum > 0)
	{
	  /* l->l_ldnum includes space for the terminating NULL.  */
	  if (!scratch_buffer_set_array_size
	      (&needed_space, l->l_ldnum, sizeof (struct link_map *)))
	    _dl_signal_error (ENOMEM, map->l_name, NULL,
			      N_("cannot allocate dependency buffer"));
	  needed = needed_space.data;
	}

      if (l->l_info[DT_NEEDED] || l->l_info[AUXTAG] || l->l_info[FILTERTAG])
	{
	  const char *strtab = (const void *) D_PTR (l, l_info[DT_STRTAB]);
	  struct openaux_args args;
	  struct list *orig;
	  const ElfW(Dyn) *d;

	  args.strtab = strtab;
	  args.map = l;
	  args.trace_mode = trace_mode;
	  args.open_mode = open_mode;
	  orig = runp;

	  for (d = l->l_ld; d->d_tag != DT_NULL; ++d)
	    if (__builtin_expect (d->d_tag, DT_NEEDED) == DT_NEEDED)
	      {
		/* Map in the needed object.  */
		struct link_map *dep;

		/* Recognize DSTs.  */
		name = expand_dst (l, strtab + d->d_un.d_val, 0);
		/* Store the tag in the argument structure.  */
		args.name = name;

		int err = _dl_catch_exception (&exception, openaux, &args);
		if (__glibc_unlikely (exception.errstring != NULL))
		  {
		    if (err)
		      errno_reason = err;
		    else
		      errno_reason = -1;
		    goto out;
		  }
		else
		  dep = args.aux;

		if (! dep->l_reserved)
		  {
		    /* Allocate new entry.  */
		    struct list *newp;

		    newp = alloca (sizeof (struct list));

		    /* Append DEP to the list.  */
		    newp->map = dep;
		    newp->done = 0;
		    newp->next = NULL;
		    tail->next = newp;
		    tail = newp;
		    ++nlist;
		    /* Set the mark bit that says it's already in the list.  */
		    dep->l_reserved = 1;
		  }

		/* Remember this dependency.  */
		if (needed != NULL)
		  needed[nneeded++] = dep;
	      }
	    else if (d->d_tag == DT_AUXILIARY || d->d_tag == DT_FILTER)
	      {
		struct list *newp;

		/* Recognize DSTs.  */
		name = expand_dst (l, strtab + d->d_un.d_val,
				   d->d_tag == DT_AUXILIARY);
		/* Store the tag in the argument structure.  */
		args.name = name;

		/* Say that we are about to load an auxiliary library.  */
		if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_LIBS,
				      0))
		  _dl_debug_printf ("load auxiliary object=%s"
				    " requested by file=%s\n",
				    name,
				    DSO_FILENAME (l->l_name));

		/* We must be prepared that the addressed shared
		   object is not available.  For filter objects the dependency
		   must be available.  */
		int err = _dl_catch_exception (&exception, openaux, &args);
		if (__glibc_unlikely (exception.errstring != NULL))
		  {
		    if (d->d_tag == DT_AUXILIARY)
		      {
			/* We are not interested in the error message.  */
			_dl_exception_free (&exception);
			/* Simply ignore this error and continue the work.  */
			continue;
		      }
		    else
		      {
			if (err)
			  errno_reason = err;
			else
			  errno_reason = -1;
			goto out;
		      }
		  }

		/* The auxiliary object is actually available.
		   Incorporate the map in all the lists.  */

		/* Allocate new entry.  This always has to be done.  */
		newp = alloca (sizeof (struct list));

		/* We want to insert the new map before the current one,
		   but we have no back links.  So we copy the contents of
		   the current entry over.  Note that ORIG and NEWP now
		   have switched their meanings.  */
		memcpy (newp, orig, sizeof (*newp));

		/* Initialize new entry.  */
		orig->done = 0;
		orig->map = args.aux;

		/* Remember this dependency.  */
		if (needed != NULL)
		  needed[nneeded++] = args.aux;

		/* We must handle two situations here: the map is new,
		   so we must add it in all three lists.  If the map
		   is already known, we have two further possibilities:
		   - if the object is before the current map in the
		   search list, we do nothing.  It is already found
		   early
		   - if the object is after the current one, we must
		   move it just before the current map to make sure
		   the symbols are found early enough
		*/
		if (args.aux->l_reserved)
		  {
		    /* The object is already somewhere in the list.
		       Locate it first.  */
		    struct list *late;

		    /* This object is already in the search list we
		       are building.  Don't add a duplicate pointer.
		       Just added by _dl_map_object.  */
		    for (late = newp; late->next != NULL; late = late->next)
		      if (late->next->map == args.aux)
			break;

		    if (late->next != NULL)
		      {
			/* The object is somewhere behind the current
			   position in the search path.  We have to
			   move it to this earlier position.  */
			orig->next = newp;

			/* Now remove the later entry from the list
			   and adjust the tail pointer.  */
			if (tail == late->next)
			  tail = late;
			late->next = late->next->next;

			/* We must move the object earlier in the chain.  */
			if (args.aux->l_prev != NULL)
			  args.aux->l_prev->l_next = args.aux->l_next;
			if (args.aux->l_next != NULL)
			  args.aux->l_next->l_prev = args.aux->l_prev;

			args.aux->l_prev = newp->map->l_prev;
			newp->map->l_prev = args.aux;
			if (args.aux->l_prev != NULL)
			  args.aux->l_prev->l_next = args.aux;
			args.aux->l_next = newp->map;
		      }
		    else
		      {
			/* The object must be somewhere earlier in the
			   list.  Undo to the current list element what
			   we did above.  */
			memcpy (orig, newp, sizeof (*newp));
			continue;
		      }
		  }
		else
		  {
		    /* This is easy.  We just add the symbol right here.  */
		    orig->next = newp;
		    ++nlist;
		    /* Set the mark bit that says it's already in the list.  */
		    args.aux->l_reserved = 1;

		    /* The only problem is that in the double linked
		       list of all objects we don't have this new
		       object at the correct place.  Correct this here.  */
		    if (args.aux->l_prev)
		      args.aux->l_prev->l_next = args.aux->l_next;
		    if (args.aux->l_next)
		      args.aux->l_next->l_prev = args.aux->l_prev;

		    args.aux->l_prev = newp->map->l_prev;
		    newp->map->l_prev = args.aux;
		    if (args.aux->l_prev != NULL)
		      args.aux->l_prev->l_next = args.aux;
		    args.aux->l_next = newp->map;
		  }

		/* Move the tail pointer if necessary.  */
		if (orig == tail)
		  tail = newp;

		/* Move on the insert point.  */
		orig = newp;
	      }
	}

      /* Terminate the list of dependencies and store the array address.  */
      if (needed != NULL)
	{
	  needed[nneeded++] = NULL;

	  struct link_map **l_initfini = (struct link_map **)
	    malloc ((2 * nneeded + 1) * sizeof needed[0]);
	  if (l_initfini == NULL)
	    {
	      scratch_buffer_free (&needed_space);
	      _dl_signal_error (ENOMEM, map->l_name, NULL,
				N_("cannot allocate dependency list"));
	    }
	  l_initfini[0] = l;
	  memcpy (&l_initfini[1], needed, nneeded * sizeof needed[0]);
	  memcpy (&l_initfini[nneeded + 1], l_initfini,
		  nneeded * sizeof needed[0]);
	  atomic_write_barrier ();
	  l->l_initfini = l_initfini;
	  l->l_free_initfini = 1;
	}

      /* If we have no auxiliary objects just go on to the next map.  */
      if (runp->done)
	do
	  runp = runp->next;
	while (runp != NULL && runp->done);
    }

 out:
  scratch_buffer_free (&needed_space);

  if (errno == 0 && errno_saved != 0)
    __set_errno (errno_saved);

  struct link_map **old_l_initfini = NULL;
  if (map->l_initfini != NULL && map->l_type == lt_loaded)
    {
      /* This object was previously loaded as a dependency and we have
	 a separate l_initfini list.  We don't need it anymore.  */
      assert (map->l_searchlist.r_list == NULL);
      old_l_initfini = map->l_initfini;
    }

  /* Store the search list we built in the object.  It will be used for
     searches in the scope of this object.  */
  struct link_map **l_initfini =
    (struct link_map **) malloc ((2 * nlist + 1)
				 * sizeof (struct link_map *));
  if (l_initfini == NULL)
    _dl_signal_error (ENOMEM, map->l_name, NULL,
		      N_("cannot allocate symbol search list"));


  map->l_searchlist.r_list = &l_initfini[nlist + 1];
  map->l_searchlist.r_nlist = nlist;

  for (nlist = 0, runp = known; runp; runp = runp->next)
    {
      if (__builtin_expect (trace_mode, 0) && runp->map->l_faked)
	/* This can happen when we trace the loading.  */
	--map->l_searchlist.r_nlist;
      else
	map->l_searchlist.r_list[nlist++] = runp->map;

      /* Now clear all the mark bits we set in the objects on the search list
	 to avoid duplicates, so the next call starts fresh.  */
      runp->map->l_reserved = 0;
    }

  if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_PRELINK, 0) != 0
      && map == GL(dl_ns)[LM_ID_BASE]._ns_loaded)
    {
      /* If we are to compute conflicts, we have to build local scope
	 for each library, not just the ultimate loader.  */
      for (i = 0; i < nlist; ++i)
	{
	  struct link_map *l = map->l_searchlist.r_list[i];
	  unsigned int j, cnt;

	  /* The local scope has been already computed.  */
	  if (l == map
	      || (l->l_local_scope[0]
		  && l->l_local_scope[0]->r_nlist) != 0)
	    continue;

	  if (l->l_info[AUXTAG] || l->l_info[FILTERTAG])
	    {
	      /* As current DT_AUXILIARY/DT_FILTER implementation needs to be
		 rewritten, no need to bother with prelinking the old
		 implementation.  */
	      _dl_signal_error (EINVAL, l->l_name, NULL, N_("\
Filters not supported with LD_TRACE_PRELINKING"));
	    }

	  cnt = _dl_build_local_scope (l_initfini, l);
	  assert (cnt <= nlist);
	  for (j = 0; j < cnt; j++)
	    {
	      l_initfini[j]->l_reserved = 0;
	      if (j && __builtin_expect (l_initfini[j]->l_info[DT_SYMBOLIC]
					 != NULL, 0))
		l->l_symbolic_in_local_scope = true;
	    }

	  l->l_local_scope[0] =
	    (struct r_scope_elem *) malloc (sizeof (struct r_scope_elem)
					    + (cnt
					       * sizeof (struct link_map *)));
	  if (l->l_local_scope[0] == NULL)
	    _dl_signal_error (ENOMEM, map->l_name, NULL,
			      N_("cannot allocate symbol search list"));
	  l->l_local_scope[0]->r_nlist = cnt;
	  l->l_local_scope[0]->r_list =
	    (struct link_map **) (l->l_local_scope[0] + 1);
	  memcpy (l->l_local_scope[0]->r_list, l_initfini,
		  cnt * sizeof (struct link_map *));
	}
    }