コード例 #1
0
ファイル: dl-tls.c プロジェクト: htakata/uClibc
/* Initialize static TLS area and DTV for current (only) thread.
   libpthread implementations should provide their own hook
   to handle all threads.  */
void
attribute_hidden __attribute_noinline__
_dl_nothread_init_static_tls (struct link_map *map)
{
# ifdef TLS_TCB_AT_TP
	void *dest = (char *) THREAD_SELF - map->l_tls_offset;
# elif defined(TLS_DTV_AT_TP)
	void *dest = (char *) THREAD_SELF + map->l_tls_offset + TLS_PRE_TCB_SIZE;
# else
#  error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
# endif

	/* Fill in the DTV slot so that a later LD/GD access will find it.  */
	dtv_t *dtv = THREAD_DTV ();
	if (!(map->l_tls_modid <= dtv[-1].counter)) {
		_dl_dprintf(2, "map->l_tls_modid <= dtv[-1].counter FAILED!\n");
		_dl_exit(30);
	}
	dtv[map->l_tls_modid].pointer.val = dest;
	dtv[map->l_tls_modid].pointer.is_static = true;

	/* Initialize the memory.  */
	_dl_memcpy(dest, map->l_tls_initimage, map->l_tls_initimage_size);
	_dl_memset((dest + map->l_tls_initimage_size), '\0',
		map->l_tls_blocksize - map->l_tls_initimage_size);
}
コード例 #2
0
ファイル: dl-hash.c プロジェクト: jameshilliard/WECB-BH-GPL
/*
 * We call this function when we have just read an ELF library or executable.
 * We add the relevant info to the symbol chain, so that we can resolve all
 * externals properly.
 */
struct elf_resolve *_dl_add_elf_hash_table(const char *libname,
	DL_LOADADDR_TYPE loadaddr, unsigned long *dynamic_info, unsigned long dynamic_addr,
	attribute_unused unsigned long dynamic_size)
{
	Elf_Symndx *hash_addr;
	struct elf_resolve *tpnt;
	int i;

	if (!_dl_loaded_modules) {
		tpnt = _dl_loaded_modules = (struct elf_resolve *) _dl_malloc(sizeof(struct elf_resolve));
		_dl_memset(tpnt, 0, sizeof(struct elf_resolve));
	} else {
		tpnt = _dl_loaded_modules;
		while (tpnt->next)
			tpnt = tpnt->next;
		tpnt->next = (struct elf_resolve *) _dl_malloc(sizeof(struct elf_resolve));
		_dl_memset(tpnt->next, 0, sizeof(struct elf_resolve));
		tpnt->next->prev = tpnt;
		tpnt = tpnt->next;
	}

	tpnt->next = NULL;
	tpnt->init_flag = 0;
	tpnt->libname = _dl_strdup(libname);
	tpnt->dynamic_addr = (ElfW(Dyn) *)dynamic_addr;
	tpnt->libtype = loaded_file;

#ifdef __LDSO_GNU_HASH_SUPPORT__
	if (dynamic_info[DT_GNU_HASH_IDX] != 0) {
		Elf32_Word *hash32 = (Elf_Symndx*)dynamic_info[DT_GNU_HASH_IDX];

		tpnt->nbucket = *hash32++;
		Elf32_Word symbias = *hash32++;
		Elf32_Word bitmask_nwords = *hash32++;
		/* Must be a power of two.  */
		_dl_assert ((bitmask_nwords & (bitmask_nwords - 1)) == 0);
		tpnt->l_gnu_bitmask_idxbits = bitmask_nwords - 1;
		tpnt->l_gnu_shift = *hash32++;

		tpnt->l_gnu_bitmask = (ElfW(Addr) *) hash32;
		hash32 += __ELF_NATIVE_CLASS / 32 * bitmask_nwords;

		tpnt->l_gnu_buckets = hash32;
		hash32 += tpnt->nbucket;
		tpnt->l_gnu_chain_zero = hash32 - symbias;
	} else
コード例 #3
0
ファイル: dl-tls.c プロジェクト: htakata/uClibc
internal_function
_dl_allocate_tls_storage (void)
{
  void *result;
  size_t size = _dl_tls_static_size;

# if defined(TLS_DTV_AT_TP)
  /* Memory layout is:
     [ TLS_PRE_TCB_SIZE ] [ TLS_TCB_SIZE ] [ TLS blocks ]
			  ^ This should be returned.  */
  size += (TLS_PRE_TCB_SIZE + _dl_tls_static_align - 1)
	  & ~(_dl_tls_static_align - 1);
# endif

  /* Allocate a correctly aligned chunk of memory.  */
  result = _dl_memalign (_dl_tls_static_align, size);
  if (__builtin_expect (result != NULL, 1))
    {
      /* Allocate the DTV.  */
      void *allocated = result;

# ifdef TLS_TCB_AT_TP
      /* The TCB follows the TLS blocks.  */
      result = (char *) result + size - TLS_TCB_SIZE;

      /* Clear the TCB data structure.  We can't ask the caller (i.e.
	 libpthread) to do it, because we will initialize the DTV et al.  */
      _dl_memset (result, '\0', TLS_TCB_SIZE);
# elif defined(TLS_DTV_AT_TP)
      result = (char *) result + size - _dl_tls_static_size;

      /* Clear the TCB data structure and TLS_PRE_TCB_SIZE bytes before it.
	 We can't ask the caller (i.e. libpthread) to do it, because we will
	 initialize the DTV et al.  */
      _dl_memset ((char *) result - TLS_PRE_TCB_SIZE, '\0',
	      TLS_PRE_TCB_SIZE + TLS_TCB_SIZE);
# endif

      result = allocate_dtv (result);
      if (result == NULL)
	_dl_free (allocated);
    }

  return result;
}
コード例 #4
0
ファイル: dl-tls.c プロジェクト: htakata/uClibc
void *
_dl_calloc (size_t __nmemb, size_t __size)
{
	void *result;
	size_t size = (__size * __nmemb);

	if (_dl_calloc_function)
		return (*_dl_calloc_function) (__nmemb, __size);

	if ((result = _dl_malloc(size)) != NULL) {
		_dl_memset(result, 0, size);
	}

	return result;
}
コード例 #5
0
ファイル: dl-tls.c プロジェクト: htakata/uClibc
static void *
allocate_and_init (struct link_map *map)
{
	void *newp;

	newp = _dl_memalign (map->l_tls_align, map->l_tls_blocksize);
	if (newp == NULL)
	{
		_dl_dprintf(2, "%s:%d: Out of memory!!!\n", __FUNCTION__, __LINE__);
		_dl_exit(1);
	}

	/* Initialize the memory.  */
	_dl_memcpy (newp, map->l_tls_initimage, map->l_tls_initimage_size);
	_dl_memset ((newp + map->l_tls_initimage_size), '\0',
		map->l_tls_blocksize - map->l_tls_initimage_size);

	return newp;
}
コード例 #6
0
ファイル: dl-tls.c プロジェクト: htakata/uClibc
void
_dl_add_to_slotinfo (struct link_map  *l)
{
  /* Now that we know the object is loaded successfully add
     modules containing TLS data to the dtv info table.  We
     might have to increase its size.  */
  struct dtv_slotinfo_list *listp;
  struct dtv_slotinfo_list *prevp;
  size_t idx = l->l_tls_modid;

  _dl_debug_early("Adding to slotinfo for %s\n", l->l_name);

  /* Find the place in the dtv slotinfo list.  */
  listp = _dl_tls_dtv_slotinfo_list;
  prevp = NULL;		/* Needed to shut up gcc.  */
  do
    {
      /* Does it fit in the array of this list element?  */
      if (idx < listp->len)
	break;
      idx -= listp->len;
      prevp = listp;
      listp = listp->next;
    }
  while (listp != NULL);

  if (listp == NULL)
    {
      /* When we come here it means we have to add a new element
	 to the slotinfo list.  And the new module must be in
	 the first slot.  */
      _dl_assert (idx == 0);

      listp = prevp->next = (struct dtv_slotinfo_list *)
	_dl_malloc (sizeof (struct dtv_slotinfo_list)
		+ TLS_SLOTINFO_SURPLUS * sizeof (struct dtv_slotinfo));
      if (listp == NULL)
	{
	  /* We ran out of memory.  We will simply fail this
	     call but don't undo anything we did so far.  The
	     application will crash or be terminated anyway very
	     soon.  */

	  /* We have to do this since some entries in the dtv
	     slotinfo array might already point to this
	     generation.  */
	  ++_dl_tls_generation;

	  _dl_dprintf (_dl_debug_file,
			"cannot create TLS data structures: ABORT\n");
	  _dl_exit (127);
	}

      listp->len = TLS_SLOTINFO_SURPLUS;
      listp->next = NULL;
      _dl_memset (listp->slotinfo, '\0',
	      TLS_SLOTINFO_SURPLUS * sizeof (struct dtv_slotinfo));
    }

  /* Add the information into the slotinfo data structure.  */
  listp->slotinfo[idx].map = l;
  listp->slotinfo[idx].gen = _dl_tls_generation + 1;
  /* ??? ideally this would be done once per call to dlopen.  However there's
     no easy way to indicate whether a library used TLS, so do it here
	 instead. */
  /* Bump the TLS generation number.  */
  _dl_tls_generation++;
}
コード例 #7
0
ファイル: dl-tls.c プロジェクト: htakata/uClibc
struct link_map *
_dl_update_slotinfo (unsigned long int req_modid)
{
  struct link_map *the_map = NULL;
  dtv_t *dtv = THREAD_DTV ();

  /* The global dl_tls_dtv_slotinfo array contains for each module
     index the generation counter current when the entry was created.
     This array never shrinks so that all module indices which were
     valid at some time can be used to access it.  Before the first
     use of a new module index in this function the array was extended
     appropriately.  Access also does not have to be guarded against
     modifications of the array.  It is assumed that pointer-size
     values can be read atomically even in SMP environments.  It is
     possible that other threads at the same time dynamically load
     code and therefore add to the slotinfo list.  This is a problem
     since we must not pick up any information about incomplete work.
     The solution to this is to ignore all dtv slots which were
     created after the one we are currently interested.  We know that
     dynamic loading for this module is completed and this is the last
     load operation we know finished.  */
  unsigned long int idx = req_modid;
  struct dtv_slotinfo_list *listp = _dl_tls_dtv_slotinfo_list;

  _dl_debug_early ("Updating slotinfo for module %d\n", req_modid);

  while (idx >= listp->len)
    {
      idx -= listp->len;
      listp = listp->next;
    }

  if (dtv[0].counter < listp->slotinfo[idx].gen)
    {
      /* The generation counter for the slot is higher than what the
	 current dtv implements.  We have to update the whole dtv but
	 only those entries with a generation counter <= the one for
	 the entry we need.  */
      size_t new_gen = listp->slotinfo[idx].gen;
      size_t total = 0;

      /* We have to look through the entire dtv slotinfo list.  */
      listp =  _dl_tls_dtv_slotinfo_list;
      do
	{
	  size_t cnt;

	  for (cnt = total == 0 ? 1 : 0; cnt < listp->len; ++cnt)
	    {
	      size_t gen = listp->slotinfo[cnt].gen;

	      if (gen > new_gen)
		/* This is a slot for a generation younger than the
		   one we are handling now.  It might be incompletely
		   set up so ignore it.  */
		continue;

	      /* If the entry is older than the current dtv layout we
		 know we don't have to handle it.  */
	      if (gen <= dtv[0].counter)
		continue;

	      /* If there is no map this means the entry is empty.  */
	      struct link_map *map = listp->slotinfo[cnt].map;
	      if (map == NULL)
		{
		  /* If this modid was used at some point the memory
		     might still be allocated.  */
		  if (! dtv[total + cnt].pointer.is_static
		      && dtv[total + cnt].pointer.val != TLS_DTV_UNALLOCATED)
		    {
		      _dl_free (dtv[total + cnt].pointer.val);
		      dtv[total + cnt].pointer.val = TLS_DTV_UNALLOCATED;
		    }

		  continue;
		}

	      /* Check whether the current dtv array is large enough.  */
	      size_t modid = map->l_tls_modid;
	      _dl_assert (total + cnt == modid);
	      if (dtv[-1].counter < modid)
		{
		  /* Reallocate the dtv.  */
		  dtv_t *newp;
		  size_t newsize = _dl_tls_max_dtv_idx + DTV_SURPLUS;
		  size_t oldsize = dtv[-1].counter;

		  _dl_assert (map->l_tls_modid <= newsize);

		  if (dtv == _dl_initial_dtv)
		    {
		      /* This is the initial dtv that was allocated
			 during rtld startup using the dl-minimal.c
			 malloc instead of the real malloc.  We can't
			 free it, we have to abandon the old storage.  */

		      newp = _dl_malloc ((2 + newsize) * sizeof (dtv_t));
		      if (newp == NULL)
			oom ();
		      _dl_memcpy (newp, &dtv[-1], oldsize * sizeof (dtv_t));
		    }
		  else
		    {
		      newp = _dl_realloc (&dtv[-1],
				      (2 + newsize) * sizeof (dtv_t));
		      if (newp == NULL)
			oom ();
		    }

		  newp[0].counter = newsize;

		  /* Clear the newly allocated part.  */
		  _dl_memset (newp + 2 + oldsize, '\0',
			  (newsize - oldsize) * sizeof (dtv_t));

		  /* Point dtv to the generation counter.  */
		  dtv = &newp[1];

		  /* Install this new dtv in the thread data
		     structures.  */
		  INSTALL_NEW_DTV (dtv);
		}

	      /* If there is currently memory allocate for this
		 dtv entry free it.  */
	      /* XXX Ideally we will at some point create a memory
		 pool.  */
	      if (! dtv[modid].pointer.is_static
		  && dtv[modid].pointer.val != TLS_DTV_UNALLOCATED)
		/* Note that free is called for NULL is well.  We
		   deallocate even if it is this dtv entry we are
		   supposed to load.  The reason is that we call
		   memalign and not malloc.  */
		_dl_free (dtv[modid].pointer.val);

	      /* This module is loaded dynamically- We defer memory
		 allocation.  */
	      dtv[modid].pointer.is_static = false;
	      dtv[modid].pointer.val = TLS_DTV_UNALLOCATED;

	      if (modid == req_modid)
		the_map = map;
	    }

	  total += listp->len;
	}
      while ((listp = listp->next) != NULL);

      /* This will be the new maximum generation counter.  */
      dtv[0].counter = new_gen;
    }

  return the_map;
}
コード例 #8
0
ファイル: dl-tls.c プロジェクト: htakata/uClibc
internal_function
_dl_allocate_tls_init (void *result)
{
  if (result == NULL)
    /* The memory allocation failed.  */
    return NULL;

  dtv_t *dtv = GET_DTV (result);
  struct dtv_slotinfo_list *listp;
  size_t total = 0;
  size_t maxgen = 0;

  /* We have to prepare the dtv for all currently loaded modules using
     TLS.  For those which are dynamically loaded we add the values
     indicating deferred allocation.  */
  listp = _dl_tls_dtv_slotinfo_list;
  while (1)
    {
      size_t cnt;

      for (cnt = total == 0 ? 1 : 0; cnt < listp->len; ++cnt)
	{
	  struct link_map *map;
	  void *dest;

	  /* Check for the total number of used slots.  */
	  if (total + cnt > _dl_tls_max_dtv_idx)
	    break;

	  map = listp->slotinfo[cnt].map;
	  if (map == NULL)
	    /* Unused entry.  */
	    continue;

	  /* Keep track of the maximum generation number.  This might
	     not be the generation counter.  */
	  maxgen = MAX (maxgen, listp->slotinfo[cnt].gen);

	  if (map->l_tls_offset == NO_TLS_OFFSET)
	    {
	      /* For dynamically loaded modules we simply store
		 the value indicating deferred allocation.  */
	      dtv[map->l_tls_modid].pointer.val = TLS_DTV_UNALLOCATED;
	      dtv[map->l_tls_modid].pointer.is_static = false;
	      continue;
	    }

	  _dl_assert (map->l_tls_modid == cnt);
	  _dl_assert (map->l_tls_blocksize >= map->l_tls_initimage_size);
# ifdef TLS_TCB_AT_TP
	  _dl_assert ((size_t) map->l_tls_offset >= map->l_tls_blocksize);
	  dest = (char *) result - map->l_tls_offset;
# elif defined(TLS_DTV_AT_TP)
	  dest = (char *) result + map->l_tls_offset;
# else
#  error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
# endif

	  /* Copy the initialization image and clear the BSS part.  */
	  dtv[map->l_tls_modid].pointer.val = dest;
	  dtv[map->l_tls_modid].pointer.is_static = true;
	  _dl_memcpy(dest, map->l_tls_initimage, map->l_tls_initimage_size);
	  _dl_memset((dest + map->l_tls_initimage_size), '\0',
		  map->l_tls_blocksize - map->l_tls_initimage_size);

	}

      total += cnt;
      if (total >= _dl_tls_max_dtv_idx)
	break;

      listp = listp->next;
      _dl_assert (listp != NULL);
    }

  /* The DTV version is up-to-date now.  */
  dtv[0].counter = maxgen;

  return result;
}
コード例 #9
0
ファイル: boot.c プロジェクト: darksoul42/bitrig
void
_dl_boot_bind(const long sp, long *dl_data, Elf_Dyn *dynamicp)
{
	struct elf_object  dynld;	/* Resolver data for the loader */
	AuxInfo		*auxstack;
	long		*stack;
	Elf_Dyn		*dynp;
	int		n, argc;
	char **argv, **envp;
	long loff;

	/*
	 * Scan argument and environment vectors. Find dynamic
	 * data vector put after them.
	 */
	stack = (long *)sp;
	argc = *stack++;
	argv = (char **)stack;
	envp = &argv[argc + 1];
	stack = (long *)envp;
	while (*stack++ != 0L)
		;

	/*
	 * Zero out dl_data.
	 */
	for (n = 0; n <= AUX_entry; n++)
		dl_data[n] = 0;

	/*
	 * Dig out auxiliary data set up by exec call. Move all known
	 * tags to an indexed local table for easy access.
	 */
	for (auxstack = (AuxInfo *)stack; auxstack->au_id != AUX_null;
	    auxstack++) {
		if (auxstack->au_id > AUX_entry)
			continue;
		dl_data[auxstack->au_id] = auxstack->au_v;
	}
	loff = dl_data[AUX_base];	/* XXX assumes ld.so is linked at 0x0 */

	/*
	 * We need to do 'selfreloc' in case the code weren't
	 * loaded at the address it was linked to.
	 *
	 * Scan the DYNAMIC section for the loader.
	 * Cache the data for easier access.
	 */

	dynp = (Elf_Dyn *)((long)_DYNAMIC + loff);
	_dl_memset(dynld.Dyn.info, 0, sizeof(dynld.Dyn.info));
	while (dynp != NULL && dynp->d_tag != DT_NULL) {
		if (dynp->d_tag < DT_NUM)
			dynld.Dyn.info[dynp->d_tag] = dynp->d_un.d_val;
		else if (dynp->d_tag >= DT_LOPROC &&
		    dynp->d_tag < DT_LOPROC + DT_PROCNUM)
			dynld.Dyn.info[dynp->d_tag - DT_LOPROC + DT_NUM] =
			    dynp->d_un.d_val;
		if (dynp->d_tag == DT_TEXTREL)
			dynld.dyn.textrel = 1;
		dynp++;
	}

	/*
	 * Do the 'bootstrap relocation'. This is really only needed if
	 * the code was loaded at another location than it was linked to.
	 * We don't do undefined symbols resolving (to difficult..)
	 */

	/* "relocate" dyn.X values if they represent addresses */
	{
		int i, val;
		/* must be code, not pic data */
		int table[20];

		i = 0;
		table[i++] = DT_PLTGOT;
		table[i++] = DT_HASH;
		table[i++] = DT_STRTAB;
		table[i++] = DT_SYMTAB;
		table[i++] = DT_RELA;
		table[i++] = DT_INIT;
		table[i++] = DT_FINI;
		table[i++] = DT_REL;
		table[i++] = DT_JMPREL;
		/* other processors insert their extras here */
		table[i++] = DT_NULL;
		for (i = 0; table[i] != DT_NULL; i++) {
			val = table[i];
			if (val >= DT_LOPROC && val < DT_LOPROC + DT_PROCNUM)
				val = val - DT_LOPROC + DT_NUM;
			else if (val >= DT_NUM)
				continue;
			if (dynld.Dyn.info[val] != 0)
				dynld.Dyn.info[val] += loff;
		}
	}

	{
		u_int32_t rs;
		Elf_Rel *rp;
		int	i;

		rp = (Elf_Rel *)(dynld.Dyn.info[DT_REL]);
		rs = dynld.dyn.relsz;

		for (i = 0; i < rs; i += sizeof (Elf_Rel)) {
			Elf_Addr *ra;
			const Elf_Sym *sp;

			sp = dynld.dyn.symtab;
			sp += ELF_R_SYM(rp->r_info);

			if (ELF_R_SYM(rp->r_info) && sp->st_value == 0) {
#if 0
/* cannot printf in this function */
				_dl_wrstderr("Dynamic loader failure: self bootstrapping impossible.\n");
				_dl_wrstderr("Undefined symbol: ");
				_dl_wrstderr((char *)dynld.dyn.strtab +
				    sp->st_name);
#endif
				_dl_exit(5);
			}

			ra = (Elf_Addr *)(rp->r_offset + loff);
			RELOC_REL(rp, sp, ra, loff);
			rp++;
		}
	}

	for (n = 0; n < 2; n++) {
		unsigned long rs;
		Elf_RelA *rp;
		int	i;

		switch (n) {
		case 0:
			rp = (Elf_RelA *)(dynld.Dyn.info[DT_JMPREL]);
			rs = dynld.dyn.pltrelsz;
			break;
		case 1:
			rp = (Elf_RelA *)(dynld.Dyn.info[DT_RELA]);
			rs = dynld.dyn.relasz;
			break;
		default:
			rp = NULL;
			rs = 0;
		}
		for (i = 0; i < rs; i += sizeof (Elf_RelA)) {
			Elf_Addr *ra;
			const Elf_Sym *sp;

			sp = dynld.dyn.symtab;
			sp += ELF_R_SYM(rp->r_info);
			if (ELF_R_SYM(rp->r_info) && sp->st_value == 0) {
#if 0
				_dl_wrstderr("Dynamic loader failure: self bootstrapping impossible.\n");
				_dl_wrstderr("Undefined symbol: ");
				_dl_wrstderr((char *)dynld.dyn.strtab +
				    sp->st_name);
#endif
				_dl_exit(6);
			}

			ra = (Elf_Addr *)(rp->r_offset + loff);
			RELOC_RELA(rp, sp, ra, loff, dynld.dyn.pltgot);
			rp++;
		}
	}

	RELOC_GOT(&dynld, loff);

	/*
	 * we have been fully relocated here, so most things no longer
	 * need the loff adjustment
	 */
}
コード例 #10
0
ファイル: library.c プロジェクト: enukane/openbsd-work
elf_object_t *
_dl_tryload_shlib(const char *libname, int type, int flags)
{
	int	libfile, i;
	struct load_list *next_load, *load_list = NULL;
	Elf_Addr maxva = 0, minva = ELFDEFNNAME(NO_ADDR);
	Elf_Addr libaddr, loff, align = _dl_pagesz - 1;
	elf_object_t *object;
	char	hbuf[4096];
	Elf_Dyn *dynp = 0;
	Elf_Ehdr *ehdr;
	Elf_Phdr *phdp;
	struct stat sb;
	void *prebind_data;

#define ROUND_PG(x) (((x) + align) & ~(align))
#define TRUNC_PG(x) ((x) & ~(align))

	libfile = _dl_open(libname, O_RDONLY);
	if (libfile < 0) {
		_dl_errno = DL_CANT_OPEN;
		return(0);
	}

	if ( _dl_fstat(libfile, &sb) < 0) {
		_dl_errno = DL_CANT_OPEN;
		return(0);
	}

	for (object = _dl_objects; object != NULL; object = object->next) {
		if (object->dev == sb.st_dev &&
		    object->inode == sb.st_ino) {
			object->obj_flags |= flags & DF_1_GLOBAL;
			_dl_close(libfile);
			if (_dl_loading_object == NULL)
				_dl_loading_object = object;
			if (object->load_object != _dl_objects &&
			    object->load_object != _dl_loading_object) {
				_dl_link_grpref(object->load_object,
				    _dl_loading_object);
			}
			return(object);
		}
	}

	_dl_read(libfile, hbuf, sizeof(hbuf));
	ehdr = (Elf_Ehdr *)hbuf;
	if (ehdr->e_ident[0] != ELFMAG0  || ehdr->e_ident[1] != ELFMAG1 ||
	    ehdr->e_ident[2] != ELFMAG2 || ehdr->e_ident[3] != ELFMAG3 ||
	    ehdr->e_type != ET_DYN || ehdr->e_machine != MACHID) {
		_dl_close(libfile);
		_dl_errno = DL_NOT_ELF;
		return(0);
	}

	/*
	 *  Alright, we might have a winner!
	 *  Figure out how much VM space we need.
	 */
	phdp = (Elf_Phdr *)(hbuf + ehdr->e_phoff);
	for (i = 0; i < ehdr->e_phnum; i++, phdp++) {
		switch (phdp->p_type) {
		case PT_LOAD:
			if (phdp->p_vaddr < minva)
				minva = phdp->p_vaddr;
			if (phdp->p_vaddr + phdp->p_memsz > maxva)
				maxva = phdp->p_vaddr + phdp->p_memsz;
			break;
		case PT_DYNAMIC:
			dynp = (Elf_Dyn *)phdp->p_vaddr;
			break;
		case PT_TLS:
			_dl_printf("%s: unsupported TLS program header in %s\n",
			    _dl_progname, libname);
			_dl_close(libfile);
			_dl_errno = DL_CANT_LOAD_OBJ;
			return(0);
		default:
			break;
		}
	}
	minva = TRUNC_PG(minva);
	maxva = ROUND_PG(maxva);

	/*
	 * We map the entire area to see that we can get the VM
	 * space required. Map it unaccessible to start with.
	 *
	 * We must map the file we'll map later otherwise the VM
	 * system won't be able to align the mapping properly
	 * on VAC architectures.
	 */
	libaddr = (Elf_Addr)_dl_mmap(0, maxva - minva, PROT_NONE,
	    MAP_PRIVATE|MAP_FILE, libfile, 0);
	if (_dl_mmap_error(libaddr)) {
		_dl_printf("%s: rtld mmap failed mapping %s.\n",
		    _dl_progname, libname);
		_dl_close(libfile);
		_dl_errno = DL_CANT_MMAP;
		return(0);
	}

	loff = libaddr - minva;
	phdp = (Elf_Phdr *)(hbuf + ehdr->e_phoff);

	for (i = 0; i < ehdr->e_phnum; i++, phdp++) {
		switch (phdp->p_type) {
		case PT_LOAD: {
			char *start = (char *)(TRUNC_PG(phdp->p_vaddr)) + loff;
			Elf_Addr off = (phdp->p_vaddr & align);
			Elf_Addr size = off + phdp->p_filesz;
			void *res;

			if (size != 0) {
				res = _dl_mmap(start, ROUND_PG(size),
				    PFLAGS(phdp->p_flags),
				    MAP_FIXED|MAP_PRIVATE, libfile,
				    TRUNC_PG(phdp->p_offset));
			} else
				res = NULL;	/* silence gcc */
			next_load = _dl_malloc(sizeof(struct load_list));
			next_load->next = load_list;
			load_list = next_load;
			next_load->start = start;
			next_load->size = size;
			next_load->prot = PFLAGS(phdp->p_flags);
			if (size != 0 && _dl_mmap_error(res)) {
				_dl_printf("%s: rtld mmap failed mapping %s.\n",
				    _dl_progname, libname);
				_dl_close(libfile);
				_dl_errno = DL_CANT_MMAP;
				_dl_munmap((void *)libaddr, maxva - minva);
				_dl_load_list_free(load_list);
				return(0);
			}
			if (phdp->p_flags & PF_W) {
				/* Zero out everything past the EOF */
				if ((size & align) != 0)
					_dl_memset(start + size, 0,
					    _dl_pagesz - (size & align));
				if (ROUND_PG(size) ==
				    ROUND_PG(off + phdp->p_memsz))
					continue;
				start = start + ROUND_PG(size);
				size = ROUND_PG(off + phdp->p_memsz) -
				    ROUND_PG(size);
				res = _dl_mmap(start, size,
				    PFLAGS(phdp->p_flags),
				    MAP_FIXED|MAP_PRIVATE|MAP_ANON, -1, 0);
				if (_dl_mmap_error(res)) {
					_dl_printf("%s: rtld mmap failed mapping %s.\n",
					    _dl_progname, libname);
					_dl_close(libfile);
					_dl_errno = DL_CANT_MMAP;
					_dl_munmap((void *)libaddr, maxva - minva);
					_dl_load_list_free(load_list);
					return(0);
				}
			}
			break;
		}

		case PT_OPENBSD_RANDOMIZE:
			_dl_randombuf((char *)(phdp->p_vaddr + loff),
			    phdp->p_memsz);
			break;

		default:
			break;
		}
	}

	prebind_data = prebind_load_fd(libfile, libname);

	_dl_close(libfile);

	dynp = (Elf_Dyn *)((unsigned long)dynp + loff);
	object = _dl_finalize_object(libname, dynp,
	    (Elf_Phdr *)((char *)libaddr + ehdr->e_phoff), ehdr->e_phnum,type,
	    libaddr, loff);
	if (object) {
		object->prebind_data = prebind_data;
		object->load_size = maxva - minva;	/*XXX*/
		object->load_list = load_list;
		/* set inode, dev from stat info */
		object->dev = sb.st_dev;
		object->inode = sb.st_ino;
		object->obj_flags |= flags;
		_dl_set_sod(object->load_name, &object->sod);
	} else {
		_dl_munmap((void *)libaddr, maxva - minva);
		_dl_load_list_free(load_list);
	}
	return(object);
}
コード例 #11
0
elf_object_t *
_dl_tryload_shlib(const char *libname, int type, int flags)
{
	int libfile, i;
	struct load_list *ld, *lowld = NULL;
	elf_object_t *object;
	Elf_Dyn *dynp = 0;
	Elf_Ehdr *ehdr;
	Elf_Phdr *phdp;
	Elf_Addr load_end = 0;
	Elf_Addr align = _dl_pagesz - 1, off, size;
	struct stat sb;
	void *prebind_data;
	char hbuf[4096];

#define ROUND_PG(x) (((x) + align) & ~(align))
#define TRUNC_PG(x) ((x) & ~(align))

	libfile = _dl_open(libname, O_RDONLY);
	if (libfile < 0) {
		_dl_errno = DL_CANT_OPEN;
		return(0);
	}

	if ( _dl_fstat(libfile, &sb) < 0) {
		_dl_errno = DL_CANT_OPEN;
		return(0);
	}

	for (object = _dl_objects; object != NULL; object = object->next) {
		if (object->dev == sb.st_dev &&
		    object->inode == sb.st_ino) {
			object->obj_flags |= flags & RTLD_GLOBAL;
			_dl_close(libfile);
			if (_dl_loading_object == NULL)
				_dl_loading_object = object;
			if (object->load_object != _dl_objects &&
			    object->load_object != _dl_loading_object) {
				_dl_link_grpref(object->load_object,
				    _dl_loading_object);
			}
			return(object);
		}
	}

	_dl_read(libfile, hbuf, sizeof(hbuf));
	ehdr = (Elf_Ehdr *)hbuf;
	if (ehdr->e_ident[0] != ELFMAG0  || ehdr->e_ident[1] != ELFMAG1 ||
	    ehdr->e_ident[2] != ELFMAG2 || ehdr->e_ident[3] != ELFMAG3 ||
	    ehdr->e_type != ET_DYN || ehdr->e_machine != MACHID) {
		_dl_close(libfile);
		_dl_errno = DL_NOT_ELF;
		return(0);
	}

	/* Insertion sort */
#define LDLIST_INSERT(ld) do { \
	struct load_list **_ld; \
	for (_ld = &lowld; *_ld != NULL; _ld = &(*_ld)->next) \
		if ((*_ld)->moff > ld->moff) \
			break; \
	ld->next = *_ld; \
	*_ld = ld; \
} while (0)
	/*
	 *  Alright, we might have a winner!
	 *  Figure out how much VM space we need and set up the load
	 *  list that we'll use to find free VM space.
	 */
	phdp = (Elf_Phdr *)(hbuf + ehdr->e_phoff);
	for (i = 0; i < ehdr->e_phnum; i++, phdp++) {
		switch (phdp->p_type) {
		case PT_LOAD:
			off = (phdp->p_vaddr & align);
			size = off + phdp->p_filesz;

			ld = _dl_malloc(sizeof(struct load_list));
			ld->start = NULL;
			ld->size = size;
			ld->moff = TRUNC_PG(phdp->p_vaddr);
			ld->foff = TRUNC_PG(phdp->p_offset);
			ld->prot = PFLAGS(phdp->p_flags);
			LDLIST_INSERT(ld);

			if ((ld->prot & PROT_WRITE) == 0 ||
			    ROUND_PG(size) == ROUND_PG(off + phdp->p_memsz))
				break;
			/* This phdr has a zfod section */
			ld = _dl_malloc(sizeof(struct load_list));
			ld->start = NULL;
			ld->size = ROUND_PG(off + phdp->p_memsz) -
			    ROUND_PG(size);
			ld->moff = TRUNC_PG(phdp->p_vaddr) +
			    ROUND_PG(size);
			ld->foff = -1;
			ld->prot = PFLAGS(phdp->p_flags);
			LDLIST_INSERT(ld);
			break;
		case PT_DYNAMIC:
			dynp = (Elf_Dyn *)phdp->p_vaddr;
			break;
		default:
			break;
		}
	}

#define LOFF ((Elf_Addr)lowld->start - lowld->moff)

retry:
	for (ld = lowld; ld != NULL; ld = ld->next) {
		off_t foff;
		int fd, flags;

		/*
		 * We don't want to provide the fd/off hint for anything
		 * but the first mapping, all other might have
		 * cache-incoherent aliases and will cause this code to
		 * loop forever.
		 */
		if (ld == lowld) {
			fd = libfile;
			foff = ld->foff;
			flags = 0;
		} else {
			fd = -1;
			foff = 0;
			flags = MAP_FIXED;
		}

		ld->start = (void *)(LOFF + ld->moff);

		/*
		 * Magic here.
		 * The first mquery is done with MAP_FIXED to see if
		 * the mapping we want is free. If it's not, we redo the
		 * mquery without MAP_FIXED to get the next free mapping,
		 * adjust the base mapping address to match this free mapping
		 * and restart the process again.
		 */
		ld->start = _dl_mquery(ld->start, ROUND_PG(ld->size), ld->prot,
		    flags, fd, foff);
		if (_dl_mmap_error(ld->start)) {
			ld->start = (void *)(LOFF + ld->moff);
			ld->start = _dl_mquery(ld->start, ROUND_PG(ld->size),
			    ld->prot, flags & ~MAP_FIXED, fd, foff);
			if (_dl_mmap_error(ld->start))
				goto fail;
		}

		if (ld->start != (void *)(LOFF + ld->moff)) {
			lowld->start = ld->start - ld->moff + lowld->moff;
			goto retry;
		}
		/*
		 * XXX - we need some kind of boundary condition here,
		 * or fix mquery to not run into the stack
		 */
	}

	for (ld = lowld; ld != NULL; ld = ld->next) {
		int fd, flags;
		off_t foff;
		void *res;

		if (ld->foff < 0) {
			fd = -1;
			foff = 0;
			flags = MAP_FIXED|MAP_PRIVATE|MAP_ANON;
		} else {
			fd = libfile;
			foff = ld->foff;
			flags = MAP_FIXED|MAP_PRIVATE;
		}
		res = _dl_mmap(ld->start, ROUND_PG(ld->size), ld->prot, flags,
		    fd, foff);
		if (_dl_mmap_error(res))
			goto fail;
		/* Zero out everything past the EOF */
		if ((ld->prot & PROT_WRITE) != 0 && (ld->size & align) != 0)
			_dl_memset((char *)ld->start + ld->size, 0,
			    _dl_pagesz - (ld->size & align));
		load_end = (Elf_Addr)ld->start + ROUND_PG(ld->size);
	}

	prebind_data = prebind_load_fd(libfile, libname);

	_dl_close(libfile);

	dynp = (Elf_Dyn *)((unsigned long)dynp + LOFF);
	object = _dl_finalize_object(libname, dynp, 
	    (Elf_Phdr *)((char *)lowld->start + ehdr->e_phoff), ehdr->e_phnum,
	    type, (Elf_Addr)lowld->start, LOFF);
	if (object) {
		object->prebind_data = prebind_data;
		object->load_size = (Elf_Addr)load_end - (Elf_Addr)lowld->start;
		object->load_list = lowld;
		/* set inode, dev from stat info */
		object->dev = sb.st_dev;
		object->inode = sb.st_ino;
		object->obj_flags |= flags;
		_dl_build_sod(object->load_name, &object->sod);
	} else {
		/* XXX no point. object is never returned NULL */
		_dl_load_list_free(lowld);
	}
	return(object);
fail:
	_dl_printf("%s: rtld mmap failed mapping %s.\n",
	    _dl_progname, libname);
	_dl_close(libfile);
	_dl_errno = DL_CANT_MMAP;
	_dl_load_list_free(lowld);
	return(0);
}
コード例 #12
0
ファイル: dlib.c プロジェクト: BackupTheBerlios/wl530g-svn
void *_dlopen(const char *libname, int flag)
{
	struct elf_resolve *tpnt, *tfrom;
	struct dyn_elf *rpnt = NULL;
	struct dyn_elf *dyn_chain;
	struct dyn_elf *dpnt;
	static int dl_init = 0;
	char *from;
	void (*dl_brk) (void);
#ifdef __PIC__
	int (*dl_elf_init) (void);
#endif

	/* A bit of sanity checking... */
	if (!(flag & (RTLD_LAZY|RTLD_NOW))) {
		_dl_error_number = LD_BAD_HANDLE;
		return NULL;
	}

	from = __builtin_return_address(0);

	/* Have the dynamic linker use the regular malloc function now */
	if (!dl_init) {
		dl_init++;
		_dl_malloc_function = malloc;
	}

	/* Cover the trivial case first */
	if (!libname)
		return _dl_symbol_tables;

#ifdef USE_CACHE
	_dl_map_cache();
#endif

	/*
	 * Try and locate the module we were called from - we
	 * need this so that we get the correct RPATH.  Note that
	 * this is the current behavior under Solaris, but the
	 * ABI+ specifies that we should only use the RPATH from
	 * the application.  Thus this may go away at some time
	 * in the future.
	 */
	tfrom = NULL;
	for (dpnt = _dl_symbol_tables; dpnt; dpnt = dpnt->next) {
		tpnt = dpnt->dyn;
		if (tpnt->loadaddr < from
			&& (tfrom == NULL || tfrom->loadaddr < tpnt->loadaddr))
			tfrom = tpnt;
	}

	if (!(tpnt = _dl_load_shared_library(0, &rpnt, tfrom, (char*)libname))) {
#ifdef USE_CACHE
		_dl_unmap_cache();
#endif
		return NULL;
	}
	//tpnt->libtype = loaded_file;

	dyn_chain = rpnt = (struct dyn_elf *) malloc(sizeof(struct dyn_elf));
	_dl_memset(rpnt, 0, sizeof(*rpnt));
	rpnt->dyn = tpnt;
	rpnt->flags = flag;
	if (!tpnt->symbol_scope)
		tpnt->symbol_scope = dyn_chain;

	rpnt->next_handle = _dl_handles;
	_dl_handles = rpnt;

	/*
	 * OK, we have the requested file in memory.  Now check for
	 * any other requested files that may also be required.
	 */
	  {
	    struct elf_resolve *tcurr;
	    struct elf_resolve * tpnt1;
	    Elf32_Dyn * dpnt;
	    char * lpnt;

	    tcurr = tpnt;
	    do{
	      for(dpnt = (Elf32_Dyn *) tcurr->dynamic_addr; dpnt->d_tag; dpnt++)
		{
	  
		  if(dpnt->d_tag == DT_NEEDED)
		    {
		      lpnt = tcurr->loadaddr + tcurr->dynamic_info[DT_STRTAB] + 
			dpnt->d_un.d_val;
		      if(!(tpnt1 = _dl_load_shared_library(0, &rpnt, tcurr, lpnt)))
			goto oops;

		      rpnt->next = (struct dyn_elf *) malloc(sizeof(struct dyn_elf));
		      _dl_memset (rpnt->next, 0, sizeof (*(rpnt->next)));
		      rpnt = rpnt->next;
		      if (!tpnt1->symbol_scope) tpnt1->symbol_scope = dyn_chain;
		      rpnt->dyn = tpnt1;
		    };
		}
	      
	      tcurr = tcurr->next;
	    } while(tcurr);
	  }
	 
	/*
	 * OK, now attach the entire chain at the end
	 */

	rpnt->next = _dl_symbol_tables;

	/*
	 * MIPS is special *sigh*
	 */
#ifdef __mips__
	_dl_perform_mips_global_got_relocations(tpnt);
#endif

	if (do_fixup(tpnt, flag)) {
		_dl_error_number = LD_NO_SYMBOL;
		goto oops;
	}

	if (_dl_debug_addr) {
	    dl_brk = (void (*)(void)) _dl_debug_addr->r_brk;
	    if (dl_brk != NULL) {
		_dl_debug_addr->r_state = RT_ADD;
		(*dl_brk) ();

		_dl_debug_addr->r_state = RT_CONSISTENT;
		(*dl_brk) ();
	    }
	}

#ifdef __PIC__
	for (rpnt = dyn_chain; rpnt; rpnt = rpnt->next) {
		tpnt = rpnt->dyn;
		/* Apparently crt1 for the application is responsible for handling this.
		 * We only need to run the init/fini for shared libraries
		 */
		if (tpnt->libtype == program_interpreter)
			continue;
		if (tpnt->libtype == elf_executable)
			continue;
		if (tpnt->init_flag & INIT_FUNCS_CALLED)
			continue;
		tpnt->init_flag |= INIT_FUNCS_CALLED;

		if (tpnt->dynamic_info[DT_INIT]) {
			dl_elf_init = (int (*)(void)) (tpnt->loadaddr + tpnt->dynamic_info[DT_INIT]);
			(*dl_elf_init) ();
		}
		if (tpnt->dynamic_info[DT_FINI]) {
			atexit((void (*)(void)) (tpnt->loadaddr + tpnt->dynamic_info[DT_FINI]));
		}

	}
#endif

#ifdef USE_CACHE
	_dl_unmap_cache();
#endif
	return (void *) dyn_chain;

  oops:
	/* Something went wrong.  Clean up and return NULL. */
#ifdef USE_CACHE
	_dl_unmap_cache();
#endif
	do_dlclose(dyn_chain, 0);
	return NULL;
}