Пример #1
0
static void
init (void)
{
  void *resume, *personality;
  void *handle;

  handle = __libc_dlopen (LIBGCC_S_SO);

  if (handle == NULL
      || (resume = __libc_dlsym (handle, "_Unwind_Resume")) == NULL
      || (personality = __libc_dlsym (handle, "__gcc_personality_v0")) == NULL)
    __libc_fatal (LIBGCC_S_SO " must be installed for pthread_cancel to work\n");

  libgcc_s_resume = resume;
  libgcc_s_personality = personality;
  atomic_write_barrier ();
  /* At the point at which any thread writes the handle
     to libgcc_s_handle, the initialization is complete.
     The writing of libgcc_s_handle is atomic. All other
     threads reading libgcc_s_handle do so atomically. Any
     thread that does not execute this function must issue
     a read barrier to ensure that all of the above has
     actually completed and that the values of the
     function pointers are correct.   */
  libgcc_s_handle = handle;
}
Пример #2
0
enum nss_status
_nss_grp_create_tablename (int *errnop)
{
  if (grp_tablename_val == NULL)
    {
      const char *local_dir = nis_local_directory ();
      size_t local_dir_len = strlen (local_dir);
      static const char prefix[] = "group.org_dir.";

      char *p = malloc (sizeof (prefix) + local_dir_len);
      if (p == NULL)
	{
	  *errnop = errno;
	  return NSS_STATUS_TRYAGAIN;
	}

      memcpy (__stpcpy (p, prefix), local_dir, local_dir_len + 1);

      grp_tablename_len = sizeof (prefix) - 1 + local_dir_len;

      atomic_write_barrier ();

      if (atomic_compare_and_exchange_bool_acq (&grp_tablename_val, p, NULL))
	{
	  /* Another thread already installed the value.  */
	  free (p);
	  grp_tablename_len = strlen (grp_tablename_val);
	}
    }

  return NSS_STATUS_SUCCESS;
}
Пример #3
0
void
__init_sched_fifo_prio (void)
{
  __sched_fifo_max_prio = sched_get_priority_max (SCHED_FIFO);
  atomic_write_barrier ();
  __sched_fifo_min_prio = sched_get_priority_min (SCHED_FIFO);
}
Пример #4
0
/* Set up NIP to run through the services.  Return nonzero if there are no
   services (left).  */
static int
setup (void **fctp, service_user **nipp)
{
  /* Remember the first service_entry, it's always the same.  */
  static bool startp_initialized;
  static service_user *startp;
  int no_more;

  if (!startp_initialized)
    {
      /* Executing this more than once at the same time must yield the
	 same result every time.  So we need no locking.  */
      no_more = __nss_netgroup_lookup (nipp, "setnetgrent", fctp);
      startp = no_more ? (service_user *) -1 : *nipp;
      PTR_MANGLE (startp);
      atomic_write_barrier ();
      startp_initialized = true;
    }
  else
    {
      service_user *nip = startp;
      PTR_DEMANGLE (nip);
      if (nip == (service_user *) -1)
	/* No services at all.  */
	return 1;

      /* Reset to the beginning of the service list.  */
      *nipp = nip;
      /* Look up the first function.  */
      no_more = __nss_lookup (nipp, "setnetgrent", NULL, fctp);
    }
  return no_more;
}
Пример #5
0
static enum nss_status
_nss_create_tablename (int *errnop)
{
  if (tablename_val == NULL)
    {
      const char *local_dir = nis_local_directory ();
      size_t local_dir_len = strlen (local_dir);
      static const char prefix[] = "ethers.org_dir.";

      char *p = malloc (sizeof (prefix) + local_dir_len);
      if (p == NULL)
	{
	  *errnop = errno;
	  return NSS_STATUS_TRYAGAIN;
	}

      memcpy (__stpcpy (p, prefix), local_dir, local_dir_len + 1);

      tablename_len = sizeof (prefix) - 1 + local_dir_len;

      atomic_write_barrier ();

      tablename_val = p;
    }
  return NSS_STATUS_SUCCESS;
}
Пример #6
0
static mstate
_int_new_arena(size_t size)
{
  mstate a;
  heap_info *h;
  char *ptr;
  unsigned long misalign;

  h = new_heap(size + (sizeof(*h) + sizeof(*a) + MALLOC_ALIGNMENT),
	       mp_.top_pad);
  if(!h) {
    /* Maybe size is too large to fit in a single heap.  So, just try
       to create a minimally-sized arena and let _int_malloc() attempt
       to deal with the large request via mmap_chunk().  */
    h = new_heap(sizeof(*h) + sizeof(*a) + MALLOC_ALIGNMENT, mp_.top_pad);
    if(!h)
      return 0;
  }
  a = h->ar_ptr = (mstate)(h+1);
  malloc_init_state(a);
  /*a->next = NULL;*/
  a->system_mem = a->max_system_mem = h->size;
  arena_mem += h->size;

  /* Set up the top chunk, with proper alignment. */
  ptr = (char *)(a + 1);
  misalign = (unsigned long)chunk2mem(ptr) & MALLOC_ALIGN_MASK;
  if (misalign > 0)
    ptr += MALLOC_ALIGNMENT - misalign;
  top(a) = (mchunkptr)ptr;
  set_head(top(a), (((char*)h + h->size) - ptr) | PREV_INUSE);

  tsd_setspecific(arena_key, (void *)a);
  mutex_init(&a->mutex);
  (void)mutex_lock(&a->mutex);

#ifdef PER_THREAD
  (void)mutex_lock(&list_lock);
#endif

  /* Add the new arena to the global list.  */
  a->next = main_arena.next;
  atomic_write_barrier ();
  main_arena.next = a;

#ifdef PER_THREAD
  (void)mutex_unlock(&list_lock);
#endif

  THREAD_STAT(++(a->stat_lock_loop));

  return a;
}
Пример #7
0
void
__attribute_noinline__
pthread_cancel_init (void)
{
  void *resume;
  void *personality;
  void *forcedunwind;
  void *getcfa;
  void *handle;

  if (__builtin_expect (libgcc_s_handle != NULL, 1))
    {
      /* Force gcc to reload all values.  */
      asm volatile ("" ::: "memory");
      return;
    }

  handle = __libc_dlopen (LIBGCC_S_SO);

  if (handle == NULL
      || (resume = __libc_dlsym (handle, "_Unwind_Resume")) == NULL
      || (personality = __libc_dlsym (handle, "__gcc_personality_v0")) == NULL
      || (forcedunwind = __libc_dlsym (handle, "_Unwind_ForcedUnwind"))
	 == NULL
      || (getcfa = __libc_dlsym (handle, "_Unwind_GetCFA")) == NULL
#ifdef ARCH_CANCEL_INIT
      || ARCH_CANCEL_INIT (handle)
#endif
      )
    __libc_fatal (LIBGCC_S_SO " must be installed for pthread_cancel to work\n");

  PTR_MANGLE (resume);
  libgcc_s_resume = resume;
  PTR_MANGLE (personality);
  libgcc_s_personality = personality;
  PTR_MANGLE (forcedunwind);
  libgcc_s_forcedunwind = forcedunwind;
  PTR_MANGLE (getcfa);
  libgcc_s_getcfa = getcfa;
  /* Make sure libgcc_s_handle is written last.  Otherwise,
     pthread_cancel_init might return early even when the pointer the
     caller is interested in is not initialized yet.  */
  atomic_write_barrier ();
  libgcc_s_handle = handle;
}
Пример #8
0
void
gomp_barrier_wait_end (gomp_barrier_t *bar, gomp_barrier_state_t state)
{
    if (__builtin_expect ((state & 1) != 0, 0))
    {
        /* Next time we'll be awaiting TOTAL threads again.  */
        bar->awaited = bar->total;
        atomic_write_barrier ();
        bar->generation += 4;
        futex_wake ((int *) &bar->generation, INT_MAX);
    }
    else
    {
        unsigned int generation = state;

        do
            do_wait ((int *) &bar->generation, generation);
        while (bar->generation == generation);
    }
}
Пример #9
0
void
gomp_team_barrier_wait_end (gomp_barrier_t *bar, gomp_barrier_state_t state)
{
    unsigned int generation;

    if (__builtin_expect ((state & 1) != 0, 0))
    {
        /* Next time we'll be awaiting TOTAL threads again.  */
        struct gomp_thread *thr = gomp_thread ();
        struct gomp_team *team = thr->ts.team;
        bar->awaited = bar->total;
        atomic_write_barrier ();
        if (__builtin_expect (team->task_count, 0))
        {
            gomp_barrier_handle_tasks (state);
            state &= ~1;
        }
        else
        {
            bar->generation = state + 3;
            futex_wake ((int *) &bar->generation, INT_MAX);
            return;
        }
    }

    generation = state;
    do
    {
        do_wait ((int *) &bar->generation, generation);
        if (__builtin_expect (bar->generation & 1, 0))
            gomp_barrier_handle_tasks (state);
        if ((bar->generation & 2))
            generation |= 2;
    }
    while (bar->generation != state + 4);
}
Пример #10
0
void
qsort_r (void *b, size_t n, size_t s, __compar_d_fn_t cmp, void *arg)
{
  size_t size = n * s;
  char *tmp = NULL;
  struct msort_param p;

  /* For large object sizes use indirect sorting.  */
  if (s > 32)
    size = 2 * n * sizeof (void *) + s;

  if (size < 1024)
    /* The temporary array is small, so put it on the stack.  */
    p.t = __alloca (size);
  else
    {
      /* We should avoid allocating too much memory since this might
	 have to be backed up by swap space.  */
      static long int phys_pages;
      static int pagesize;

      if (pagesize == 0)
	{
	  phys_pages = __sysconf (_SC_PHYS_PAGES);

	  if (phys_pages == -1)
	    /* Error while determining the memory size.  So let's
	       assume there is enough memory.  Otherwise the
	       implementer should provide a complete implementation of
	       the `sysconf' function.  */
	    phys_pages = (long int) (~0ul >> 1);

	  /* The following determines that we will never use more than
	     a quarter of the physical memory.  */
	  phys_pages /= 4;

          /* Make sure phys_pages is written to memory.  */
          atomic_write_barrier ();

	  pagesize = __sysconf (_SC_PAGESIZE);
	}

      /* Just a comment here.  We cannot compute
	   phys_pages * pagesize
	   and compare the needed amount of memory against this value.
	   The problem is that some systems might have more physical
	   memory then can be represented with a `size_t' value (when
	   measured in bytes.  */

      /* If the memory requirements are too high don't allocate memory.  */
      if (size / pagesize > (size_t) phys_pages)
	{
	  _quicksort (b, n, s, cmp, arg);
	  return;
	}

      /* It's somewhat large, so malloc it.  */
      int save = errno;
      tmp = malloc (size);
      __set_errno (save);
      if (tmp == NULL)
	{
	  /* Couldn't get space, so use the slower algorithm
	     that doesn't need a temporary array.  */
	  _quicksort (b, n, s, cmp, arg);
	  return;
	}
      p.t = tmp;
    }
Пример #11
0
void
__unregister_atfork (
     void *dso_handle)
{
  /* Check whether there is any entry in the list which we have to
     remove.  It is likely that this is not the case so don't bother
     getting the lock.

     We do not worry about other threads adding entries for this DSO
     right this moment.  If this happens this is a race and we can do
     whatever we please.  The program will crash anyway seen.  */
  struct fork_handler *runp = __fork_handlers;
  struct fork_handler *lastp = NULL;

  while (runp != NULL)
    if (runp->dso_handle == dso_handle)
      break;
    else
      {
	lastp = runp;
	runp = runp->next;
      }

  if (runp == NULL)
    /* Nothing to do.  */
    return;

  /* Get the lock to not conflict with additions or deletions.  Note
     that there couldn't have been another thread deleting something.
     The __unregister_atfork function is only called from the
     dlclose() code which itself serializes the operations.  */
  lll_lock (__fork_lock, LLL_PRIVATE);

  /* We have to create a new list with all the entries we don't remove.  */
  struct deleted_handler
  {
    struct fork_handler *handler;
    struct deleted_handler *next;
  } *deleted = NULL;

  /* Remove the entries for the DSO which is unloaded from the list.
     It's a single linked list so readers are.  */
  do
    {
    again:
      if (runp->dso_handle == dso_handle)
	{
	  if (lastp == NULL)
	    {
	      /* We have to use an atomic operation here because
		 __linkin_atfork also uses one.  */
	      if (catomic_compare_and_exchange_bool_acq (&__fork_handlers,
							 runp->next, runp)
		  != 0)
		{
		  runp = __fork_handlers;
		  goto again;
		}
	    }
	  else
	    lastp->next = runp->next;

	  /* We cannot overwrite the ->next element now.  Put the deleted
	     entries in a separate list.  */
	  struct deleted_handler *newp = alloca (sizeof (*newp));
	  newp->handler = runp;
	  newp->next = deleted;
	  deleted = newp;
	}
      else
	lastp = runp;

      runp = runp->next;
    }
  while (runp != NULL);

  /* Release the lock.  */
  lll_unlock (__fork_lock, LLL_PRIVATE);

  /* Walk the list of all entries which have to be deleted.  */
  while (deleted != NULL)
    {
      /* We need to be informed by possible current users.  */
      deleted->handler->need_signal = 1;
      /* Make sure this gets written out first.  */
      atomic_write_barrier ();

      /* Decrement the reference counter.  If it does not reach zero
	 wait for the last user.  */
      atomic_decrement (&deleted->handler->refcntr);
      unsigned int val;
      while ((val = deleted->handler->refcntr) != 0)
	lll_futex_wait (&deleted->handler->refcntr, val, LLL_PRIVATE);

      deleted = deleted->next;
    }
}
Пример #12
0
int
__get_nprocs (void)
{
  static int cached_result = -1;
  static time_t timestamp;

  time_t now = time (NULL);
  time_t prev = timestamp;
  atomic_read_barrier ();
  if (now == prev && cached_result > -1)
    return cached_result;

  /* XXX Here will come a test for the new system call.  */

  const size_t buffer_size = __libc_use_alloca (8192) ? 8192 : 512;
  char *buffer = alloca (buffer_size);
  char *buffer_end = buffer + buffer_size;
  char *cp = buffer_end;
  char *re = buffer_end;

  const int flags = O_RDONLY | O_CLOEXEC;
  int fd = __open_nocancel ("/sys/devices/system/cpu/online", flags);
  char *l;
  int result = 0;
  if (fd != -1)
    {
      l = next_line (fd, buffer, &cp, &re, buffer_end);
      if (l != NULL)
	do
	  {
	    char *endp;
	    unsigned long int n = strtoul (l, &endp, 10);
	    if (l == endp)
	      {
		result = 0;
		break;
	      }

	    unsigned long int m = n;
	    if (*endp == '-')
	      {
		l = endp + 1;
		m = strtoul (l, &endp, 10);
		if (l == endp)
		  {
		    result = 0;
		    break;
		  }
	      }

	    result += m - n + 1;

	    l = endp;
	    while (l < re && isspace (*l))
	      ++l;
	  }
	while (l < re);

      __close_nocancel_nostatus (fd);

      if (result > 0)
	goto out;
    }

  cp = buffer_end;
  re = buffer_end;

  /* Default to an SMP system in case we cannot obtain an accurate
     number.  */
  result = 2;

  /* The /proc/stat format is more uniform, use it by default.  */
  fd = __open_nocancel ("/proc/stat", flags);
  if (fd != -1)
    {
      result = 0;

      while ((l = next_line (fd, buffer, &cp, &re, buffer_end)) != NULL)
	/* The current format of /proc/stat has all the cpu* entries
	   at the front.  We assume here that stays this way.  */
	if (strncmp (l, "cpu", 3) != 0)
	  break;
	else if (isdigit (l[3]))
	  ++result;

      __close_nocancel_nostatus (fd);
    }
  else
    {
      fd = __open_nocancel ("/proc/cpuinfo", flags);
      if (fd != -1)
	{
	  GET_NPROCS_PARSER (fd, buffer, cp, re, buffer_end, result);
	  __close_nocancel_nostatus (fd);
	}
    }

 out:
  cached_result = result;
  atomic_write_barrier ();
  timestamp = now;

  return result;
}
Пример #13
0
void
_dl_map_object_deps (struct link_map *map,
		     struct link_map **preloads, unsigned int npreloads,
		     int trace_mode, int open_mode)
{
  struct list *known = __alloca (sizeof *known * (1 + npreloads + 1));
  struct list *runp, *tail;
  unsigned int nlist, i;
  /* Object name.  */
  const char *name;
  int errno_saved;
  int errno_reason;
  struct dl_exception exception;

  /* No loaded object so far.  */
  nlist = 0;

  /* First load MAP itself.  */
  preload (known, &nlist, map);

  /* Add the preloaded items after MAP but before any of its dependencies.  */
  for (i = 0; i < npreloads; ++i)
    preload (known, &nlist, preloads[i]);

  /* Terminate the lists.  */
  known[nlist - 1].next = NULL;

  /* Pointer to last unique object.  */
  tail = &known[nlist - 1];

  struct scratch_buffer needed_space;
  scratch_buffer_init (&needed_space);

  /* Process each element of the search list, loading each of its
     auxiliary objects and immediate dependencies.  Auxiliary objects
     will be added in the list before the object itself and
     dependencies will be appended to the list as we step through it.
     This produces a flat, ordered list that represents a
     breadth-first search of the dependency tree.

     The whole process is complicated by the fact that we better
     should use alloca for the temporary list elements.  But using
     alloca means we cannot use recursive function calls.  */
  errno_saved = errno;
  errno_reason = 0;
  errno = 0;
  name = NULL;
  for (runp = known; runp; )
    {
      struct link_map *l = runp->map;
      struct link_map **needed = NULL;
      unsigned int nneeded = 0;

      /* Unless otherwise stated, this object is handled.  */
      runp->done = 1;

      /* Allocate a temporary record to contain the references to the
	 dependencies of this object.  */
      if (l->l_searchlist.r_list == NULL && l->l_initfini == NULL
	  && l != map && l->l_ldnum > 0)
	{
	  /* l->l_ldnum includes space for the terminating NULL.  */
	  if (!scratch_buffer_set_array_size
	      (&needed_space, l->l_ldnum, sizeof (struct link_map *)))
	    _dl_signal_error (ENOMEM, map->l_name, NULL,
			      N_("cannot allocate dependency buffer"));
	  needed = needed_space.data;
	}

      if (l->l_info[DT_NEEDED] || l->l_info[AUXTAG] || l->l_info[FILTERTAG])
	{
	  const char *strtab = (const void *) D_PTR (l, l_info[DT_STRTAB]);
	  struct openaux_args args;
	  struct list *orig;
	  const ElfW(Dyn) *d;

	  args.strtab = strtab;
	  args.map = l;
	  args.trace_mode = trace_mode;
	  args.open_mode = open_mode;
	  orig = runp;

	  for (d = l->l_ld; d->d_tag != DT_NULL; ++d)
	    if (__builtin_expect (d->d_tag, DT_NEEDED) == DT_NEEDED)
	      {
		/* Map in the needed object.  */
		struct link_map *dep;

		/* Recognize DSTs.  */
		name = expand_dst (l, strtab + d->d_un.d_val, 0);
		/* Store the tag in the argument structure.  */
		args.name = name;

		int err = _dl_catch_exception (&exception, openaux, &args);
		if (__glibc_unlikely (exception.errstring != NULL))
		  {
		    if (err)
		      errno_reason = err;
		    else
		      errno_reason = -1;
		    goto out;
		  }
		else
		  dep = args.aux;

		if (! dep->l_reserved)
		  {
		    /* Allocate new entry.  */
		    struct list *newp;

		    newp = alloca (sizeof (struct list));

		    /* Append DEP to the list.  */
		    newp->map = dep;
		    newp->done = 0;
		    newp->next = NULL;
		    tail->next = newp;
		    tail = newp;
		    ++nlist;
		    /* Set the mark bit that says it's already in the list.  */
		    dep->l_reserved = 1;
		  }

		/* Remember this dependency.  */
		if (needed != NULL)
		  needed[nneeded++] = dep;
	      }
	    else if (d->d_tag == DT_AUXILIARY || d->d_tag == DT_FILTER)
	      {
		struct list *newp;

		/* Recognize DSTs.  */
		name = expand_dst (l, strtab + d->d_un.d_val,
				   d->d_tag == DT_AUXILIARY);
		/* Store the tag in the argument structure.  */
		args.name = name;

		/* Say that we are about to load an auxiliary library.  */
		if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_LIBS,
				      0))
		  _dl_debug_printf ("load auxiliary object=%s"
				    " requested by file=%s\n",
				    name,
				    DSO_FILENAME (l->l_name));

		/* We must be prepared that the addressed shared
		   object is not available.  For filter objects the dependency
		   must be available.  */
		int err = _dl_catch_exception (&exception, openaux, &args);
		if (__glibc_unlikely (exception.errstring != NULL))
		  {
		    if (d->d_tag == DT_AUXILIARY)
		      {
			/* We are not interested in the error message.  */
			_dl_exception_free (&exception);
			/* Simply ignore this error and continue the work.  */
			continue;
		      }
		    else
		      {
			if (err)
			  errno_reason = err;
			else
			  errno_reason = -1;
			goto out;
		      }
		  }

		/* The auxiliary object is actually available.
		   Incorporate the map in all the lists.  */

		/* Allocate new entry.  This always has to be done.  */
		newp = alloca (sizeof (struct list));

		/* We want to insert the new map before the current one,
		   but we have no back links.  So we copy the contents of
		   the current entry over.  Note that ORIG and NEWP now
		   have switched their meanings.  */
		memcpy (newp, orig, sizeof (*newp));

		/* Initialize new entry.  */
		orig->done = 0;
		orig->map = args.aux;

		/* Remember this dependency.  */
		if (needed != NULL)
		  needed[nneeded++] = args.aux;

		/* We must handle two situations here: the map is new,
		   so we must add it in all three lists.  If the map
		   is already known, we have two further possibilities:
		   - if the object is before the current map in the
		   search list, we do nothing.  It is already found
		   early
		   - if the object is after the current one, we must
		   move it just before the current map to make sure
		   the symbols are found early enough
		*/
		if (args.aux->l_reserved)
		  {
		    /* The object is already somewhere in the list.
		       Locate it first.  */
		    struct list *late;

		    /* This object is already in the search list we
		       are building.  Don't add a duplicate pointer.
		       Just added by _dl_map_object.  */
		    for (late = newp; late->next != NULL; late = late->next)
		      if (late->next->map == args.aux)
			break;

		    if (late->next != NULL)
		      {
			/* The object is somewhere behind the current
			   position in the search path.  We have to
			   move it to this earlier position.  */
			orig->next = newp;

			/* Now remove the later entry from the list
			   and adjust the tail pointer.  */
			if (tail == late->next)
			  tail = late;
			late->next = late->next->next;

			/* We must move the object earlier in the chain.  */
			if (args.aux->l_prev != NULL)
			  args.aux->l_prev->l_next = args.aux->l_next;
			if (args.aux->l_next != NULL)
			  args.aux->l_next->l_prev = args.aux->l_prev;

			args.aux->l_prev = newp->map->l_prev;
			newp->map->l_prev = args.aux;
			if (args.aux->l_prev != NULL)
			  args.aux->l_prev->l_next = args.aux;
			args.aux->l_next = newp->map;
		      }
		    else
		      {
			/* The object must be somewhere earlier in the
			   list.  Undo to the current list element what
			   we did above.  */
			memcpy (orig, newp, sizeof (*newp));
			continue;
		      }
		  }
		else
		  {
		    /* This is easy.  We just add the symbol right here.  */
		    orig->next = newp;
		    ++nlist;
		    /* Set the mark bit that says it's already in the list.  */
		    args.aux->l_reserved = 1;

		    /* The only problem is that in the double linked
		       list of all objects we don't have this new
		       object at the correct place.  Correct this here.  */
		    if (args.aux->l_prev)
		      args.aux->l_prev->l_next = args.aux->l_next;
		    if (args.aux->l_next)
		      args.aux->l_next->l_prev = args.aux->l_prev;

		    args.aux->l_prev = newp->map->l_prev;
		    newp->map->l_prev = args.aux;
		    if (args.aux->l_prev != NULL)
		      args.aux->l_prev->l_next = args.aux;
		    args.aux->l_next = newp->map;
		  }

		/* Move the tail pointer if necessary.  */
		if (orig == tail)
		  tail = newp;

		/* Move on the insert point.  */
		orig = newp;
	      }
	}

      /* Terminate the list of dependencies and store the array address.  */
      if (needed != NULL)
	{
	  needed[nneeded++] = NULL;

	  struct link_map **l_initfini = (struct link_map **)
	    malloc ((2 * nneeded + 1) * sizeof needed[0]);
	  if (l_initfini == NULL)
	    {
	      scratch_buffer_free (&needed_space);
	      _dl_signal_error (ENOMEM, map->l_name, NULL,
				N_("cannot allocate dependency list"));
	    }
	  l_initfini[0] = l;
	  memcpy (&l_initfini[1], needed, nneeded * sizeof needed[0]);
	  memcpy (&l_initfini[nneeded + 1], l_initfini,
		  nneeded * sizeof needed[0]);
	  atomic_write_barrier ();
	  l->l_initfini = l_initfini;
	  l->l_free_initfini = 1;
	}

      /* If we have no auxiliary objects just go on to the next map.  */
      if (runp->done)
	do
	  runp = runp->next;
	while (runp != NULL && runp->done);
    }

 out:
  scratch_buffer_free (&needed_space);

  if (errno == 0 && errno_saved != 0)
    __set_errno (errno_saved);

  struct link_map **old_l_initfini = NULL;
  if (map->l_initfini != NULL && map->l_type == lt_loaded)
    {
      /* This object was previously loaded as a dependency and we have
	 a separate l_initfini list.  We don't need it anymore.  */
      assert (map->l_searchlist.r_list == NULL);
      old_l_initfini = map->l_initfini;
    }

  /* Store the search list we built in the object.  It will be used for
     searches in the scope of this object.  */
  struct link_map **l_initfini =
    (struct link_map **) malloc ((2 * nlist + 1)
				 * sizeof (struct link_map *));
  if (l_initfini == NULL)
    _dl_signal_error (ENOMEM, map->l_name, NULL,
		      N_("cannot allocate symbol search list"));


  map->l_searchlist.r_list = &l_initfini[nlist + 1];
  map->l_searchlist.r_nlist = nlist;

  for (nlist = 0, runp = known; runp; runp = runp->next)
    {
      if (__builtin_expect (trace_mode, 0) && runp->map->l_faked)
	/* This can happen when we trace the loading.  */
	--map->l_searchlist.r_nlist;
      else
	map->l_searchlist.r_list[nlist++] = runp->map;

      /* Now clear all the mark bits we set in the objects on the search list
	 to avoid duplicates, so the next call starts fresh.  */
      runp->map->l_reserved = 0;
    }

  if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_PRELINK, 0) != 0
      && map == GL(dl_ns)[LM_ID_BASE]._ns_loaded)
    {
      /* If we are to compute conflicts, we have to build local scope
	 for each library, not just the ultimate loader.  */
      for (i = 0; i < nlist; ++i)
	{
	  struct link_map *l = map->l_searchlist.r_list[i];
	  unsigned int j, cnt;

	  /* The local scope has been already computed.  */
	  if (l == map
	      || (l->l_local_scope[0]
		  && l->l_local_scope[0]->r_nlist) != 0)
	    continue;

	  if (l->l_info[AUXTAG] || l->l_info[FILTERTAG])
	    {
	      /* As current DT_AUXILIARY/DT_FILTER implementation needs to be
		 rewritten, no need to bother with prelinking the old
		 implementation.  */
	      _dl_signal_error (EINVAL, l->l_name, NULL, N_("\
Filters not supported with LD_TRACE_PRELINKING"));
	    }

	  cnt = _dl_build_local_scope (l_initfini, l);
	  assert (cnt <= nlist);
	  for (j = 0; j < cnt; j++)
	    {
	      l_initfini[j]->l_reserved = 0;
	      if (j && __builtin_expect (l_initfini[j]->l_info[DT_SYMBOLIC]
					 != NULL, 0))
		l->l_symbolic_in_local_scope = true;
	    }

	  l->l_local_scope[0] =
	    (struct r_scope_elem *) malloc (sizeof (struct r_scope_elem)
					    + (cnt
					       * sizeof (struct link_map *)));
	  if (l->l_local_scope[0] == NULL)
	    _dl_signal_error (ENOMEM, map->l_name, NULL,
			      N_("cannot allocate symbol search list"));
	  l->l_local_scope[0]->r_nlist = cnt;
	  l->l_local_scope[0]->r_list =
	    (struct link_map **) (l->l_local_scope[0] + 1);
	  memcpy (l->l_local_scope[0]->r_list, l_initfini,
		  cnt * sizeof (struct link_map *));
	}
    }
Пример #14
0
void membar_producer (void)
{
  atomic_write_barrier ();
}
Пример #15
0
static mstate
_int_new_arena (size_t size)
{
  mstate a;
  heap_info *h;
  char *ptr;
  unsigned long misalign;

  h = new_heap (size + (sizeof (*h) + sizeof (*a) + MALLOC_ALIGNMENT),
                mp_.top_pad);
  if (!h)
    {
      /* Maybe size is too large to fit in a single heap.  So, just try
         to create a minimally-sized arena and let _int_malloc() attempt
         to deal with the large request via mmap_chunk().  */
      h = new_heap (sizeof (*h) + sizeof (*a) + MALLOC_ALIGNMENT, mp_.top_pad);
      if (!h)
        return 0;
    }
  a = h->ar_ptr = (mstate) (h + 1);
  malloc_init_state (a);
  a->attached_threads = 1;
  /*a->next = NULL;*/
  a->system_mem = a->max_system_mem = h->size;

  /* Set up the top chunk, with proper alignment. */
  ptr = (char *) (a + 1);
  misalign = (unsigned long) chunk2mem (ptr) & MALLOC_ALIGN_MASK;
  if (misalign > 0)
    ptr += MALLOC_ALIGNMENT - misalign;
  top (a) = (mchunkptr) ptr;
  set_head (top (a), (((char *) h + h->size) - ptr) | PREV_INUSE);

  LIBC_PROBE (memory_arena_new, 2, a, size);
  mstate replaced_arena = thread_arena;
  thread_arena = a;
  __libc_lock_init (a->mutex);

  __libc_lock_lock (list_lock);

  /* Add the new arena to the global list.  */
  a->next = main_arena.next;
  /* FIXME: The barrier is an attempt to synchronize with read access
     in reused_arena, which does not acquire list_lock while
     traversing the list.  */
  atomic_write_barrier ();
  main_arena.next = a;

  __libc_lock_unlock (list_lock);

  __libc_lock_lock (free_list_lock);
  detach_arena (replaced_arena);
  __libc_lock_unlock (free_list_lock);

  /* Lock this arena.  NB: Another thread may have been attached to
     this arena because the arena is now accessible from the
     main_arena.next list and could have been picked by reused_arena.
     This can only happen for the last arena created (before the arena
     limit is reached).  At this point, some arena has to be attached
     to two threads.  We could acquire the arena lock before list_lock
     to make it less likely that reused_arena picks this new arena,
     but this could result in a deadlock with
     __malloc_fork_lock_parent.  */

  __libc_lock_lock (a->mutex);

  return a;
}
Пример #16
0
static struct malloc_arena*
arena_get2(struct malloc_arena* a_tsd, size_t size)
{
  struct malloc_arena* a;
  int err;

  if(!a_tsd)
    a = a_tsd = &main_arena;
  else {
    a = a_tsd->next;
    if(!a) {
      /* This can only happen while initializing the new arena. */
      (void)mutex_lock(&main_arena.mutex);
      THREAD_STAT(++(main_arena.stat_lock_wait));
      return &main_arena;
    }
  }

  /* Check the global, circularly linked list for available arenas. */
 repeat:
  do {
    if(!mutex_trylock(&a->mutex)) {
      THREAD_STAT(++(a->stat_lock_loop));
      tsd_setspecific(arena_key, (void *)a);
      return a;
    }
    a = a->next;
  } while(a != a_tsd);

  /* If not even the list_lock can be obtained, try again.  This can
     happen during `atfork', or for example on systems where thread
     creation makes it temporarily impossible to obtain _any_
     locks. */
  if(mutex_trylock(&list_lock)) {
    a = a_tsd;
    goto repeat;
  }
  (void)mutex_unlock(&list_lock);

  /* Nothing immediately available, so generate a new arena.  */
  a = _int_new_arena(size);
  if(!a)
    return 0;

  tsd_setspecific(arena_key, (void *)a);
  mutex_init(&a->mutex);
  err = mutex_lock(&a->mutex); /* remember result */

  /* Add the new arena to the global list.  */
  (void)mutex_lock(&list_lock);
  a->next = main_arena.next;
  atomic_write_barrier ();
  main_arena.next = a;
  (void)mutex_unlock(&list_lock);

  if(err) /* locking failed; keep arena for further attempts later */
    return 0;

  THREAD_STAT(++(a->stat_lock_loop));
  return a;
}