/* Unlock RWLOCK.  */
int
__pthread_rwlock_unlock (pthread_rwlock_t *rwlock)
{
  lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);
  if (rwlock->__data.__writer)
    rwlock->__data.__writer = 0;
  else
    --rwlock->__data.__nr_readers;
  if (rwlock->__data.__nr_readers == 0)
    {
      if (rwlock->__data.__nr_writers_queued)
	{
	  ++rwlock->__data.__writer_wakeup;
	  lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
	  lll_futex_wake (&rwlock->__data.__writer_wakeup, 1,
			  rwlock->__data.__shared);
	  return 0;
	}
      else if (rwlock->__data.__nr_readers_queued)
	{
	  ++rwlock->__data.__readers_wakeup;
	  lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
	  lll_futex_wake (&rwlock->__data.__readers_wakeup, INT_MAX,
			  rwlock->__data.__shared);
	  return 0;
	}
    }
  lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
  return 0;
}
Ejemplo n.º 2
0
int
pthread_getattr_default_np (pthread_attr_t *out)
{
  struct pthread_attr *real_out;

  real_out = (struct pthread_attr *) out;

  lll_lock (__default_pthread_attr_lock, LLL_PRIVATE);
  *real_out = __default_pthread_attr;
  lll_unlock (__default_pthread_attr_lock, LLL_PRIVATE);

  return 0;
}
Ejemplo n.º 3
0
int
attribute_protected
__pthread_getschedparam (
     pthread_t threadid,
     int *policy,
     struct sched_param *param)
{
  struct pthread *pd = (struct pthread *) threadid;

  /* Make sure the descriptor is valid.  */
  if (INVALID_TD_P (pd))
    /* Not a valid thread handle.  */
    return ESRCH;

  int result = 0;

  lll_lock (pd->lock, LLL_PRIVATE);

  /* The library is responsible for maintaining the values at all
     times.  If the user uses a interface other than
     pthread_setschedparam to modify the scheduler setting it is not
     the library's problem.  In case the descriptor's values have
     not yet been retrieved do it now.  */
  if ((pd->flags & ATTR_FLAG_SCHED_SET) == 0)
    {
      if (sched_getparam (pd->tid, &pd->schedparam) != 0)
	result = 1;
      else
	pd->flags |= ATTR_FLAG_SCHED_SET;
    }

  if ((pd->flags & ATTR_FLAG_POLICY_SET) == 0)
    {
      pd->schedpolicy = sched_getscheduler (pd->tid);
      if (pd->schedpolicy == -1)
	result = 1;
      else
	pd->flags |= ATTR_FLAG_POLICY_SET;
    }

  if (result == 0)
    {
      *policy = pd->schedpolicy;
      memcpy (param, &pd->schedparam, sizeof (struct sched_param));
    }

  lll_unlock (pd->lock, LLL_PRIVATE);

  return result;
}
Ejemplo n.º 4
0
int
__pthread_cond_signal (pthread_cond_t *cond)
{
  int pshared = (cond->__data.__mutex == (void *) ~0l)
		? LLL_SHARED : LLL_PRIVATE;

  LIBC_PROBE (cond_signal, 1, cond);

  /* Make sure we are alone.  */
  lll_lock (cond->__data.__lock, pshared);

  /* Are there any waiters to be woken?  */
  if (cond->__data.__total_seq > cond->__data.__wakeup_seq)
    {
      /* Yes.  Mark one of them as woken.  */
      ++cond->__data.__wakeup_seq;
      ++cond->__data.__futex;

#if (defined lll_futex_cmp_requeue_pi \
     && defined __ASSUME_REQUEUE_PI)
      pthread_mutex_t *mut = cond->__data.__mutex;

      if (USE_REQUEUE_PI (mut)
	/* This can only really fail with a ENOSYS, since nobody can modify
	   futex while we have the cond_lock.  */
	  && lll_futex_cmp_requeue_pi (&cond->__data.__futex, 1, 0,
				       &mut->__data.__lock,
				       cond->__data.__futex, pshared) == 0)
	{
	  lll_unlock (cond->__data.__lock, pshared);
	  return 0;
	}
      else
#endif
	/* Wake one.  */
	if (! __builtin_expect (lll_futex_wake_unlock (&cond->__data.__futex,
						       1, 1,
						       &cond->__data.__lock,
						       pshared), 0))
	  return 0;

      /* Fallback if neither of them work.  */
      lll_futex_wake (&cond->__data.__futex, 1, pshared);
    }

  /* We are done.  */
  lll_unlock (cond->__data.__lock, pshared);

  return 0;
}
Ejemplo n.º 5
0
int
__pthread_current_priority (void)
{
  struct pthread *self = THREAD_SELF;
  if ((self->flags & (ATTR_FLAG_POLICY_SET | ATTR_FLAG_SCHED_SET))
      == (ATTR_FLAG_POLICY_SET | ATTR_FLAG_SCHED_SET))
    return self->schedparam.sched_priority;

  int result = 0;
#ifdef TPP_PTHREAD_SCHED
  int policy;
  struct sched_param param;
#endif

  lll_lock (self->lock, LLL_PRIVATE);

  if ((self->flags & ATTR_FLAG_SCHED_SET) == 0)
    {
#ifndef TPP_PTHREAD_SCHED
      if (__sched_getparam (self->tid, &self->schedparam) != 0)
#else
      if (__pthread_getschedparam (self->tid, &policy, &self->schedparam) != 0)
#endif
	result = -1;
      else
	self->flags |= ATTR_FLAG_SCHED_SET;
    }

  if ((self->flags & ATTR_FLAG_POLICY_SET) == 0)
    {
#ifndef TPP_PTHREAD_SCHED
      self->schedpolicy = __sched_getscheduler (self->tid);
#else
      if (__pthread_getschedparam (self->tid, &self->schedpolicy, &param) != 0)
        self->schedpolicy = -1;
#endif
      if (self->schedpolicy == -1)
	result = -1;
      else
	self->flags |= ATTR_FLAG_POLICY_SET;
    }

  if (result != -1)
    result = self->schedparam.sched_priority;

  lll_unlock (self->lock, LLL_PRIVATE);

  return result;
}
Ejemplo n.º 6
0
int
__pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock)
{
  int result = EBUSY;
  bool wake = false;
  int futex_shared =
      rwlock->__data.__shared == LLL_PRIVATE ? FUTEX_PRIVATE : FUTEX_SHARED;

  if (ELIDE_TRYLOCK (rwlock->__data.__rwelision,
		     rwlock->__data.__lock == 0
		     && rwlock->__data.__nr_readers == 0
		     && rwlock->__data.__writer, 0))
    return 0;

  lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);

  if (rwlock->__data.__writer == 0
      && (rwlock->__data.__nr_writers_queued == 0
	  || PTHREAD_RWLOCK_PREFER_READER_P (rwlock)))
    {
      if (__glibc_unlikely (++rwlock->__data.__nr_readers == 0))
	{
	  --rwlock->__data.__nr_readers;
	  result = EAGAIN;
	}
      else
	{
	  result = 0;
	  /* See pthread_rwlock_rdlock.  */
	  if (rwlock->__data.__nr_readers == 1
	      && rwlock->__data.__nr_readers_queued > 0
	      && rwlock->__data.__nr_writers_queued > 0)
	    {
	      ++rwlock->__data.__readers_wakeup;
	      wake = true;
	    }
	}
    }

  lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);

  if (wake)
    futex_wake (&rwlock->__data.__readers_wakeup, INT_MAX, futex_shared);

  return result;
}
Ejemplo n.º 7
0
int
__pthread_rwlock_trywrlock (
     pthread_rwlock_t *rwlock)
{
  int result = EBUSY;

  lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);

  if (rwlock->__data.__writer == 0 && rwlock->__data.__nr_readers == 0)
    {
      rwlock->__data.__writer = THREAD_GETMEM (THREAD_SELF, tid);
      result = 0;
    }

  lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);

  return result;
}
Ejemplo n.º 8
0
/* Unlock RWLOCK.  */
int
__pthread_rwlock_unlock (pthread_rwlock_t *rwlock)
{
  int futex_shared =
      rwlock->__data.__shared == LLL_PRIVATE ? FUTEX_PRIVATE : FUTEX_SHARED;

  LIBC_PROBE (rwlock_unlock, 1, rwlock);

  if (ELIDE_UNLOCK (rwlock->__data.__writer == 0
		    && rwlock->__data.__nr_readers == 0))
    return 0;

  lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);
  if (rwlock->__data.__writer)
    rwlock->__data.__writer = 0;
  else
    --rwlock->__data.__nr_readers;
  /* If there are still readers present, we do not yet need to wake writers
     nor are responsible to wake any readers.  */
  if (rwlock->__data.__nr_readers == 0)
    {
      /* Note that if there is a blocked writer, we effectively make it
	 responsible for waking any readers because we don't wake readers in
	 this case.  */
      if (rwlock->__data.__nr_writers_queued)
	{
	  ++rwlock->__data.__writer_wakeup;
	  lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
	  futex_wake (&rwlock->__data.__writer_wakeup, 1, futex_shared);
	  return 0;
	}
      else if (rwlock->__data.__nr_readers_queued)
	{
	  ++rwlock->__data.__readers_wakeup;
	  lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
	  futex_wake (&rwlock->__data.__readers_wakeup, INT_MAX,
		      futex_shared);
	  return 0;
	}
    }
  lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);
  return 0;
}
Ejemplo n.º 9
0
int
__pthread_rwlock_rdlock (pthread_rwlock_t *rwlock)
{
  int result = 0;

  LIBC_PROBE (rdlock_entry, 1, rwlock);

  if (ELIDE_LOCK (rwlock->__data.__rwelision,
		  rwlock->__data.__lock == 0
		  && rwlock->__data.__writer == 0
		  && rwlock->__data.__nr_readers == 0))
    return 0;

  /* Make sure we are alone.  */
  lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);

  /* Get the rwlock if there is no writer...  */
  if (rwlock->__data.__writer == 0
      /* ...and if either no writer is waiting or we prefer readers.  */
      && (!rwlock->__data.__nr_writers_queued
	  || PTHREAD_RWLOCK_PREFER_READER_P (rwlock)))
    {
      /* Increment the reader counter.  Avoid overflow.  */
      if (__glibc_unlikely (++rwlock->__data.__nr_readers == 0))
	{
	  /* Overflow on number of readers.	 */
	  --rwlock->__data.__nr_readers;
	  result = EAGAIN;
	}
      else
	LIBC_PROBE (rdlock_acquire_read, 1, rwlock);

      /* We are done, free the lock.  */
      lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);

      return result;
    }

  return __pthread_rwlock_rdlock_slow (rwlock);
}
Ejemplo n.º 10
0
/*
 * OSMemoryZone
 */
OSMemoryZone* _OSGetFirstAvailableMemoryZone(void) {
	lll_lock(&malloc_lock);

	OSMemoryZone* osm = NULL;
	for (int i = 0; i < MAX_ZONES; i++) {
		OSMemoryZone* tosm = &zones[i];
		if (tosm->registred == FALSE) {
			osm = tosm;
		}
	}

	if (osm != NULL) {
		osm->registred = TRUE;
	}
	else {
		OSHalt("out of avail malloc zones (max: %d)", MAX_ZONES);
	}

	lll_unlock(&malloc_lock);

	return (OSMemoryZone*)osm;
}
Ejemplo n.º 11
0
int
pthread_setschedprio (
    pthread_t threadid,
    int prio)
{
    struct pthread *pd = (struct pthread *) threadid;

    /* Make sure the descriptor is valid.  */
    if (INVALID_TD_P (pd))
        /* Not a valid thread handle.  */
        return ESRCH;

    int result = 0;
    struct sched_param param;
    param.sched_priority = prio;

    lll_lock (pd->lock, LLL_PRIVATE);

    /* If the thread should have higher priority because of some
       PTHREAD_PRIO_PROTECT mutexes it holds, adjust the priority.  */
    if (__builtin_expect (pd->tpp != NULL, 0) && pd->tpp->priomax > prio)
        param.sched_priority = pd->tpp->priomax;

    /* Try to set the scheduler information.  */
    if (__builtin_expect (sched_setparam (pd->tid, &param) == -1, 0))
        result = errno;
    else
    {
        /* We succeeded changing the kernel information.  Reflect this
        change in the thread descriptor.  */
        param.sched_priority = prio;
        memcpy (&pd->schedparam, &param, sizeof (struct sched_param));
        pd->flags |= ATTR_FLAG_SCHED_SET;
    }

    lll_unlock (pd->lock, LLL_PRIVATE);

    return result;
}
Ejemplo n.º 12
0
int
sem_close (sem_t *sem)
{
  int result = 0;

  /* Get the lock.  */
  lll_lock (__sem_mappings_lock, LLL_PRIVATE);

  /* Locate the entry for the mapping the caller provided.  */
  rec = NULL;
  the_sem = sem;
  __twalk (__sem_mappings, walker);
  if  (rec != NULL)
    {
      /* Check the reference counter.  If it is going to be zero, free
	 all the resources.  */
      if (--rec->refcnt == 0)
	{
	  /* Remove the record from the tree.  */
	  (void) __tdelete (rec, &__sem_mappings, __sem_search);

	  result = munmap (rec->sem, sizeof (sem_t));

	  free (rec);
	}
    }
  else
    {
      /* This is no valid semaphore.  */
      result = -1;
      __set_errno (EINVAL);
    }

  /* Release the lock.  */
  lll_unlock (__sem_mappings_lock, LLL_PRIVATE);

  return result;
}
Ejemplo n.º 13
0
Archivo: tpp.c Proyecto: siddhesh/glibc
int
__pthread_current_priority (void)
{
  struct pthread *self = THREAD_SELF;
  if ((self->flags & (ATTR_FLAG_POLICY_SET | ATTR_FLAG_SCHED_SET))
      == (ATTR_FLAG_POLICY_SET | ATTR_FLAG_SCHED_SET))
    return self->schedparam.sched_priority;

  int result = 0;

  /* See CREATE THREAD NOTES in nptl/pthread_create.c.  */
  lll_lock (self->lock, LLL_PRIVATE);

  if ((self->flags & ATTR_FLAG_SCHED_SET) == 0)
    {
      if (__sched_getparam (self->tid, &self->schedparam) != 0)
	result = -1;
      else
	self->flags |= ATTR_FLAG_SCHED_SET;
    }

  if ((self->flags & ATTR_FLAG_POLICY_SET) == 0)
    {
      self->schedpolicy = __sched_getscheduler (self->tid);
      if (self->schedpolicy == -1)
	result = -1;
      else
	self->flags |= ATTR_FLAG_POLICY_SET;
    }

  if (result != -1)
    result = self->schedparam.sched_priority;

  lll_unlock (self->lock, LLL_PRIVATE);

  return result;
}
int
__pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock)
{
  int result = EBUSY;

  lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);

  if (rwlock->__data.__writer == 0
      && (rwlock->__data.__nr_writers_queued == 0
	  || PTHREAD_RWLOCK_PREFER_READER_P (rwlock)))
    {
      if (__builtin_expect (++rwlock->__data.__nr_readers == 0, 0))
	{
	  --rwlock->__data.__nr_readers;
	  result = EAGAIN;
	}
      else
	result = 0;
    }

  lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);

  return result;
}
Ejemplo n.º 15
0
void
__unregister_atfork (
     void *dso_handle)
{
  /* Check whether there is any entry in the list which we have to
     remove.  It is likely that this is not the case so don't bother
     getting the lock.

     We do not worry about other threads adding entries for this DSO
     right this moment.  If this happens this is a race and we can do
     whatever we please.  The program will crash anyway seen.  */
  struct fork_handler *runp = __fork_handlers;
  struct fork_handler *lastp = NULL;

  while (runp != NULL)
    if (runp->dso_handle == dso_handle)
      break;
    else
      {
	lastp = runp;
	runp = runp->next;
      }

  if (runp == NULL)
    /* Nothing to do.  */
    return;

  /* Get the lock to not conflict with additions or deletions.  Note
     that there couldn't have been another thread deleting something.
     The __unregister_atfork function is only called from the
     dlclose() code which itself serializes the operations.  */
  lll_lock (__fork_lock, LLL_PRIVATE);

  /* We have to create a new list with all the entries we don't remove.  */
  struct deleted_handler
  {
    struct fork_handler *handler;
    struct deleted_handler *next;
  } *deleted = NULL;

  /* Remove the entries for the DSO which is unloaded from the list.
     It's a single linked list so readers are.  */
  do
    {
    again:
      if (runp->dso_handle == dso_handle)
	{
	  if (lastp == NULL)
	    {
	      /* We have to use an atomic operation here because
		 __linkin_atfork also uses one.  */
	      if (catomic_compare_and_exchange_bool_acq (&__fork_handlers,
							 runp->next, runp)
		  != 0)
		{
		  runp = __fork_handlers;
		  goto again;
		}
	    }
	  else
	    lastp->next = runp->next;

	  /* We cannot overwrite the ->next element now.  Put the deleted
	     entries in a separate list.  */
	  struct deleted_handler *newp = alloca (sizeof (*newp));
	  newp->handler = runp;
	  newp->next = deleted;
	  deleted = newp;
	}
      else
	lastp = runp;

      runp = runp->next;
    }
  while (runp != NULL);

  /* Release the lock.  */
  lll_unlock (__fork_lock, LLL_PRIVATE);

  /* Walk the list of all entries which have to be deleted.  */
  while (deleted != NULL)
    {
      /* We need to be informed by possible current users.  */
      deleted->handler->need_signal = 1;
      /* Make sure this gets written out first.  */
      atomic_write_barrier ();

      /* Decrement the reference counter.  If it does not reach zero
	 wait for the last user.  */
      atomic_decrement (&deleted->handler->refcntr);
      unsigned int val;
      while ((val = deleted->handler->refcntr) != 0)
	lll_futex_wait (&deleted->handler->refcntr, val, LLL_PRIVATE);

      deleted = deleted->next;
    }
}
Ejemplo n.º 16
0
Archivo: tpp.c Proyecto: siddhesh/glibc
int
__pthread_tpp_change_priority (int previous_prio, int new_prio)
{
  struct pthread *self = THREAD_SELF;
  struct priority_protection_data *tpp = THREAD_GETMEM (self, tpp);
  int fifo_min_prio = atomic_load_relaxed (&__sched_fifo_min_prio);
  int fifo_max_prio = atomic_load_relaxed (&__sched_fifo_max_prio);

  if (tpp == NULL)
    {
      /* See __init_sched_fifo_prio.  We need both the min and max prio,
         so need to check both, and run initialization if either one is
         not initialized.  The memory model's write-read coherence rule
         makes this work.  */
      if (fifo_min_prio == -1 || fifo_max_prio == -1)
	{
	  __init_sched_fifo_prio ();
	  fifo_min_prio = atomic_load_relaxed (&__sched_fifo_min_prio);
	  fifo_max_prio = atomic_load_relaxed (&__sched_fifo_max_prio);
	}

      size_t size = sizeof *tpp;
      size += (fifo_max_prio - fifo_min_prio + 1)
	      * sizeof (tpp->priomap[0]);
      tpp = calloc (size, 1);
      if (tpp == NULL)
	return ENOMEM;
      tpp->priomax = fifo_min_prio - 1;
      THREAD_SETMEM (self, tpp, tpp);
    }

  assert (new_prio == -1
	  || (new_prio >= fifo_min_prio
	      && new_prio <= fifo_max_prio));
  assert (previous_prio == -1
	  || (previous_prio >= fifo_min_prio
	      && previous_prio <= fifo_max_prio));

  int priomax = tpp->priomax;
  int newpriomax = priomax;
  if (new_prio != -1)
    {
      if (tpp->priomap[new_prio - fifo_min_prio] + 1 == 0)
	return EAGAIN;
      ++tpp->priomap[new_prio - fifo_min_prio];
      if (new_prio > priomax)
	newpriomax = new_prio;
    }

  if (previous_prio != -1)
    {
      if (--tpp->priomap[previous_prio - fifo_min_prio] == 0
	  && priomax == previous_prio
	  && previous_prio > new_prio)
	{
	  int i;
	  for (i = previous_prio - 1; i >= fifo_min_prio; --i)
	    if (tpp->priomap[i - fifo_min_prio])
	      break;
	  newpriomax = i;
	}
    }

  if (priomax == newpriomax)
    return 0;

  /* See CREATE THREAD NOTES in nptl/pthread_create.c.  */
  lll_lock (self->lock, LLL_PRIVATE);

  tpp->priomax = newpriomax;

  int result = 0;

  if ((self->flags & ATTR_FLAG_SCHED_SET) == 0)
    {
      if (__sched_getparam (self->tid, &self->schedparam) != 0)
	result = errno;
      else
	self->flags |= ATTR_FLAG_SCHED_SET;
    }

  if ((self->flags & ATTR_FLAG_POLICY_SET) == 0)
    {
      self->schedpolicy = __sched_getscheduler (self->tid);
      if (self->schedpolicy == -1)
	result = errno;
      else
	self->flags |= ATTR_FLAG_POLICY_SET;
    }

  if (result == 0)
    {
      struct sched_param sp = self->schedparam;
      if (sp.sched_priority < newpriomax || sp.sched_priority < priomax)
	{
	  if (sp.sched_priority < newpriomax)
	    sp.sched_priority = newpriomax;

	  if (__sched_setscheduler (self->tid, self->schedpolicy, &sp) < 0)
	    result = errno;
	}
    }

  lll_unlock (self->lock, LLL_PRIVATE);

  return result;
}
Ejemplo n.º 17
0
void OSSpinLockLock(volatile OSSpinLock *__lock)
{
	lll_lock((OSLowLock*)__lock);
}
Ejemplo n.º 18
0
int
pthread_setattr_default_np (const pthread_attr_t *in)
{
  const struct pthread_attr *real_in;
  struct pthread_attr attrs;
  int ret;

  assert (sizeof (*in) >= sizeof (struct pthread_attr));
  real_in = (struct pthread_attr *) in;

  /* Catch invalid values.  */
  int policy = real_in->schedpolicy;
  ret = check_sched_policy_attr (policy);
  if (ret)
    return ret;

  const struct sched_param *param = &real_in->schedparam;
  if (param->sched_priority > 0)
    {
      ret = check_sched_priority_attr (param->sched_priority, policy);
      if (ret)
	return ret;
    }

  ret = check_cpuset_attr (real_in->cpuset, real_in->cpusetsize);
  if (ret)
    return ret;

  /* stacksize == 0 is fine.  It means that we don't change the current
     value.  */
  if (real_in->stacksize != 0)
    {
      ret = check_stacksize_attr (real_in->stacksize);
      if (ret)
	return ret;
    }

  /* Having a default stack address is wrong.  */
  if (real_in->flags & ATTR_FLAG_STACKADDR)
    return EINVAL;

  attrs = *real_in;

  /* Now take the lock because we start writing into
     __default_pthread_attr.  */
  lll_lock (__default_pthread_attr_lock, LLL_PRIVATE);

  /* Free the cpuset if the input is 0.  Otherwise copy in the cpuset
     contents.  */
  size_t cpusetsize = attrs.cpusetsize;
  if (cpusetsize == 0)
    {
      free (__default_pthread_attr.cpuset);
      __default_pthread_attr.cpuset = NULL;
    }
  else if (cpusetsize == __default_pthread_attr.cpusetsize)
    {
      attrs.cpuset = __default_pthread_attr.cpuset;
      memcpy (attrs.cpuset, real_in->cpuset, cpusetsize);
    }
  else
    {
      /* This may look wrong at first sight, but it isn't.  We're freeing
	 __default_pthread_attr.cpuset and allocating to attrs.cpuset because
	 we'll copy over all of attr to __default_pthread_attr later.  */
      cpu_set_t *newp = realloc (__default_pthread_attr.cpuset,
				 cpusetsize);

      if (newp == NULL)
	{
	  ret = ENOMEM;
	  goto out;
	}

      attrs.cpuset = newp;
      memcpy (attrs.cpuset, real_in->cpuset, cpusetsize);
    }

  /* We don't want to accidentally set the default stacksize to zero.  */
  if (attrs.stacksize == 0)
    attrs.stacksize = __default_pthread_attr.stacksize;
  __default_pthread_attr = attrs;

 out:
  lll_unlock (__default_pthread_attr_lock, LLL_PRIVATE);
  return ret;
}
Ejemplo n.º 19
0
static int
do_clone (struct pthread *pd, const struct pthread_attr *attr,
	  int clone_flags, int (*fct) (void *), STACK_VARIABLES_PARMS,
	  int stopped)
{
#ifdef PREPARE_CREATE
  PREPARE_CREATE;
#endif

  if (stopped)
    /* We Make sure the thread does not run far by forcing it to get a
       lock.  We lock it here too so that the new thread cannot continue
       until we tell it to.  */
    lll_lock (pd->lock);

  /* One more thread.  We cannot have the thread do this itself, since it
     might exist but not have been scheduled yet by the time we've returned
     and need to check the value to behave correctly.  We must do it before
     creating the thread, in case it does get scheduled first and then
     might mistakenly think it was the only thread.  In the failure case,
     we momentarily store a false value; this doesn't matter because there
     is no kosher thing a signal handler interrupting us right here can do
     that cares whether the thread count is correct.  */
  atomic_increment (&__nptl_nthreads);

  if (ARCH_CLONE (fct, STACK_VARIABLES_ARGS, clone_flags,
		  pd, &pd->tid, TLS_VALUE, &pd->tid) == -1)
    {
      atomic_decrement (&__nptl_nthreads); /* Oops, we lied for a second.  */

      /* Failed.  If the thread is detached, remove the TCB here since
	 the caller cannot do this.  The caller remembered the thread
	 as detached and cannot reverify that it is not since it must
	 not access the thread descriptor again.  */
      if (IS_DETACHED (pd))
	__deallocate_stack (pd);

      return errno;
    }

  /* Now we have the possibility to set scheduling parameters etc.  */
  if (__builtin_expect (stopped != 0, 0))
    {
      INTERNAL_SYSCALL_DECL (err);
      int res = 0;

      /* Set the affinity mask if necessary.  */
      if (attr->cpuset != NULL)
	{
	  res = INTERNAL_SYSCALL (sched_setaffinity, err, 3, pd->tid,
				  sizeof (cpu_set_t), attr->cpuset);

	  if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (res, err), 0))
	    {
	      /* The operation failed.  We have to kill the thread.  First
		 send it the cancellation signal.  */
	      INTERNAL_SYSCALL_DECL (err2);
	    err_out:
#if __ASSUME_TGKILL
	      (void) INTERNAL_SYSCALL (tgkill, err2, 3,
				       THREAD_GETMEM (THREAD_SELF, pid),
				       pd->tid, SIGCANCEL);
#else
	      (void) INTERNAL_SYSCALL (tkill, err2, 2, pd->tid, SIGCANCEL);
#endif

	      return (INTERNAL_SYSCALL_ERROR_P (res, err)
		      ? INTERNAL_SYSCALL_ERRNO (res, err)
		      : 0);
	    }
	}

      /* Set the scheduling parameters.  */
      if ((attr->flags & ATTR_FLAG_NOTINHERITSCHED) != 0)
	{
	  res = INTERNAL_SYSCALL (sched_setscheduler, err, 3, pd->tid,
				  pd->schedpolicy, &pd->schedparam);

	  if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (res, err), 0))
	    goto err_out;
	}
    }

  /* We now have for sure more than one thread.  The main thread might
     not yet have the flag set.  No need to set the global variable
     again if this is what we use.  */
  THREAD_SETMEM (THREAD_SELF, header.multiple_threads, 1);

  return 0;
}
Ejemplo n.º 20
0
static int
do_clone (struct pthread *pd, const struct pthread_attr *attr,
	  int clone_flags, int (*fct) (void *), STACK_VARIABLES_PARMS,
	  int stopped)
{
#ifdef PREPARE_CREATE
  PREPARE_CREATE;
#endif

  if (__builtin_expect (stopped != 0, 0))
    /* We make sure the thread does not run far by forcing it to get a
       lock.  We lock it here too so that the new thread cannot continue
       until we tell it to.  */
    lll_lock (pd->lock, LLL_PRIVATE);

  /* One more thread.  We cannot have the thread do this itself, since it
     might exist but not have been scheduled yet by the time we've returned
     and need to check the value to behave correctly.  We must do it before
     creating the thread, in case it does get scheduled first and then
     might mistakenly think it was the only thread.  In the failure case,
     we momentarily store a false value; this doesn't matter because there
     is no kosher thing a signal handler interrupting us right here can do
     that cares whether the thread count is correct.  */
  atomic_increment (&__nptl_nthreads);

  int rc = ARCH_CLONE (fct, STACK_VARIABLES_ARGS, clone_flags,
		       pd, &pd->tid, TLS_VALUE, &pd->tid);

  if (__builtin_expect (rc == -1, 0))
    {
      atomic_decrement (&__nptl_nthreads); /* Oops, we lied for a second.  */

      /* Perhaps a thread wants to change the IDs and if waiting
	 for this stillborn thread.  */
      if (__builtin_expect (atomic_exchange_acq (&pd->setxid_futex, 0)
			    == -2, 0))
	lll_futex_wake (&pd->setxid_futex, 1, LLL_PRIVATE);

      /* Free the resources.  */
	__deallocate_stack (pd);

      /* We have to translate error codes.  */
      return errno == ENOMEM ? EAGAIN : errno;
    }

  /* Now we have the possibility to set scheduling parameters etc.  */
  if (__builtin_expect (stopped != 0, 0))
    {
      INTERNAL_SYSCALL_DECL (err);
      int res = 0;

      /* Set the affinity mask if necessary.  */
      if (attr->cpuset != NULL)
	{
	  res = INTERNAL_SYSCALL (sched_setaffinity, err, 3, pd->tid,
				  attr->cpusetsize, attr->cpuset);

	  if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (res, err), 0))
	    {
	      /* The operation failed.  We have to kill the thread.  First
		 send it the cancellation signal.  */
	      INTERNAL_SYSCALL_DECL (err2);
	    err_out:
	      (void) INTERNAL_SYSCALL (tgkill, err2, 3,
				       THREAD_GETMEM (THREAD_SELF, pid),
				       pd->tid, SIGCANCEL);

	      /* We do not free the stack here because the canceled thread
		 itself will do this.  */

	      return (INTERNAL_SYSCALL_ERROR_P (res, err)
		      ? INTERNAL_SYSCALL_ERRNO (res, err)
		      : 0);
	    }
	}

      /* Set the scheduling parameters.  */
      if ((attr->flags & ATTR_FLAG_NOTINHERITSCHED) != 0)
	{
	  res = INTERNAL_SYSCALL (sched_setscheduler, err, 3, pd->tid,
				  pd->schedpolicy, &pd->schedparam);

	  if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (res, err), 0))
	    goto err_out;
	}
    }

  /* We now have for sure more than one thread.  The main thread might
     not yet have the flag set.  No need to set the global variable
     again if this is what we use.  */
  THREAD_SETMEM (THREAD_SELF, header.multiple_threads, 1);

  return 0;
}
Ejemplo n.º 21
0
void
__pthread_initialize_minimal_internal (void)
{
#ifndef SHARED
  /* Unlike in the dynamically linked case the dynamic linker has not
     taken care of initializing the TLS data structures.  */
  __libc_setup_tls (TLS_TCB_SIZE, TLS_TCB_ALIGN);

  /* We must prevent gcc from being clever and move any of the
     following code ahead of the __libc_setup_tls call.  This function
     will initialize the thread register which is subsequently
     used.  */
  __asm __volatile ("");
#endif

  /* Minimal initialization of the thread descriptor.  */
  struct pthread *pd = THREAD_SELF;
  __pthread_initialize_pids (pd);
  THREAD_SETMEM (pd, specific[0], &pd->specific_1stblock[0]);
  THREAD_SETMEM (pd, user_stack, true);
  if (LLL_LOCK_INITIALIZER != 0)
    THREAD_SETMEM (pd, lock, LLL_LOCK_INITIALIZER);
#if HP_TIMING_AVAIL
  THREAD_SETMEM (pd, cpuclock_offset, GL(dl_cpuclock_offset));
#endif

  /* Initialize the robust mutex data.  */
  {
#ifdef __PTHREAD_MUTEX_HAVE_PREV
    pd->robust_prev = &pd->robust_head;
#endif
    pd->robust_head.list = &pd->robust_head;
#ifdef __NR_set_robust_list
    pd->robust_head.futex_offset = (offsetof (pthread_mutex_t, __data.__lock)
				    - offsetof (pthread_mutex_t,
						__data.__list.__next));
    INTERNAL_SYSCALL_DECL (err);
    int res = INTERNAL_SYSCALL (set_robust_list, err, 2, &pd->robust_head,
				sizeof (struct robust_list_head));
    if (INTERNAL_SYSCALL_ERROR_P (res, err))
#endif
      set_robust_list_not_avail ();
  }

#ifdef __NR_futex
# ifndef __ASSUME_PRIVATE_FUTEX
  /* Private futexes are always used (at least internally) so that
     doing the test once this early is beneficial.  */
  {
    int word = 0;
    INTERNAL_SYSCALL_DECL (err);
    word = INTERNAL_SYSCALL (futex, err, 3, &word,
			    FUTEX_WAKE | FUTEX_PRIVATE_FLAG, 1);
    if (!INTERNAL_SYSCALL_ERROR_P (word, err))
      THREAD_SETMEM (pd, header.private_futex, FUTEX_PRIVATE_FLAG);
  }

  /* Private futexes have been introduced earlier than the
     FUTEX_CLOCK_REALTIME flag.  We don't have to run the test if we
     know the former are not supported.  This also means we know the
     kernel will return ENOSYS for unknown operations.  */
  if (THREAD_GETMEM (pd, header.private_futex) != 0)
# endif
# ifndef __ASSUME_FUTEX_CLOCK_REALTIME
    {
      int word = 0;
      /* NB: the syscall actually takes six parameters.  The last is the
	 bit mask.  But since we will not actually wait at all the value
	 is irrelevant.  Given that passing six parameters is difficult
	 on some architectures we just pass whatever random value the
	 calling convention calls for to the kernel.  It causes no harm.  */
      INTERNAL_SYSCALL_DECL (err);
      word = INTERNAL_SYSCALL (futex, err, 5, &word,
			       FUTEX_WAIT_BITSET | FUTEX_CLOCK_REALTIME
			       | FUTEX_PRIVATE_FLAG, 1, NULL, 0);
      assert (INTERNAL_SYSCALL_ERROR_P (word, err));
      if (INTERNAL_SYSCALL_ERRNO (word, err) != ENOSYS)
	__set_futex_clock_realtime ();
    }
# endif
#endif

  /* Set initial thread's stack block from 0 up to __libc_stack_end.
     It will be bigger than it actually is, but for unwind.c/pt-longjmp.c
     purposes this is good enough.  */
  THREAD_SETMEM (pd, stackblock_size, (size_t) __libc_stack_end);

  /* Initialize the list of all running threads with the main thread.  */
  INIT_LIST_HEAD (&__stack_user);
  list_add (&pd->list, &__stack_user);

  /* Before initializing __stack_user, the debugger could not find us and
     had to set __nptl_initial_report_events.  Propagate its setting.  */
  THREAD_SETMEM (pd, report_events, __nptl_initial_report_events);

#if defined SIGCANCEL || defined SIGSETXID
  struct sigaction sa;
  __sigemptyset (&sa.sa_mask);

# ifdef SIGCANCEL
  /* Install the cancellation signal handler.  If for some reason we
     cannot install the handler we do not abort.  Maybe we should, but
     it is only asynchronous cancellation which is affected.  */
  sa.sa_sigaction = sigcancel_handler;
  sa.sa_flags = SA_SIGINFO;
  (void) __libc_sigaction (SIGCANCEL, &sa, NULL);
# endif

# ifdef SIGSETXID
  /* Install the handle to change the threads' uid/gid.  */
  sa.sa_sigaction = sighandler_setxid;
  sa.sa_flags = SA_SIGINFO | SA_RESTART;
  (void) __libc_sigaction (SIGSETXID, &sa, NULL);
# endif

  /* The parent process might have left the signals blocked.  Just in
     case, unblock it.  We reuse the signal mask in the sigaction
     structure.  It is already cleared.  */
# ifdef SIGCANCEL
  __sigaddset (&sa.sa_mask, SIGCANCEL);
# endif
# ifdef SIGSETXID
  __sigaddset (&sa.sa_mask, SIGSETXID);
# endif
  {
    INTERNAL_SYSCALL_DECL (err);
    (void) INTERNAL_SYSCALL (rt_sigprocmask, err, 4, SIG_UNBLOCK, &sa.sa_mask,
			     NULL, _NSIG / 8);
  }
#endif

  /* Get the size of the static and alignment requirements for the TLS
     block.  */
  size_t static_tls_align;
  _dl_get_tls_static_info (&__static_tls_size, &static_tls_align);

  /* Make sure the size takes all the alignments into account.  */
  if (STACK_ALIGN > static_tls_align)
    static_tls_align = STACK_ALIGN;
  __static_tls_align_m1 = static_tls_align - 1;

  __static_tls_size = roundup (__static_tls_size, static_tls_align);

  /* Determine the default allowed stack size.  This is the size used
     in case the user does not specify one.  */
  struct rlimit limit;
  if (__getrlimit (RLIMIT_STACK, &limit) != 0
      || limit.rlim_cur == RLIM_INFINITY)
    /* The system limit is not usable.  Use an architecture-specific
       default.  */
    limit.rlim_cur = ARCH_STACK_DEFAULT_SIZE;
  else if (limit.rlim_cur < PTHREAD_STACK_MIN)
    /* The system limit is unusably small.
       Use the minimal size acceptable.  */
    limit.rlim_cur = PTHREAD_STACK_MIN;

  /* Make sure it meets the minimum size that allocate_stack
     (allocatestack.c) will demand, which depends on the page size.  */
  const uintptr_t pagesz = GLRO(dl_pagesize);
  const size_t minstack = pagesz + __static_tls_size + MINIMAL_REST_STACK;
  if (limit.rlim_cur < minstack)
    limit.rlim_cur = minstack;

  /* Round the resource limit up to page size.  */
  limit.rlim_cur = ALIGN_UP (limit.rlim_cur, pagesz);
  lll_lock (__default_pthread_attr_lock, LLL_PRIVATE);
  __default_pthread_attr.stacksize = limit.rlim_cur;
  __default_pthread_attr.guardsize = GLRO (dl_pagesize);
  lll_unlock (__default_pthread_attr_lock, LLL_PRIVATE);

#ifdef SHARED
  /* Transfer the old value from the dynamic linker's internal location.  */
  *__libc_dl_error_tsd () = *(*GL(dl_error_catch_tsd)) ();
  GL(dl_error_catch_tsd) = &__libc_dl_error_tsd;

  /* Make __rtld_lock_{,un}lock_recursive use pthread_mutex_{,un}lock,
     keep the lock count from the ld.so implementation.  */
  GL(dl_rtld_lock_recursive) = (void *) __pthread_mutex_lock;
  GL(dl_rtld_unlock_recursive) = (void *) __pthread_mutex_unlock;
  unsigned int rtld_lock_count = GL(dl_load_lock).mutex.__data.__count;
  GL(dl_load_lock).mutex.__data.__count = 0;
  while (rtld_lock_count-- > 0)
    __pthread_mutex_lock (&GL(dl_load_lock).mutex);

  GL(dl_make_stack_executable_hook) = &__make_stacks_executable;
#endif

  GL(dl_init_static_tls) = &__pthread_init_static_tls;

  GL(dl_wait_lookup_done) = &__wait_lookup_done;

  /* Register the fork generation counter with the libc.  */
#ifndef TLS_MULTIPLE_THREADS_IN_TCB
  __libc_multiple_threads_ptr =
#endif
    __libc_pthread_init (&__fork_generation, __reclaim_stacks,
			 ptr_pthread_functions);

  /* Determine whether the machine is SMP or not.  */
  __is_smp = is_smp_system ();
}
Ejemplo n.º 22
0
static int
create_thread (struct pthread *pd, const struct pthread_attr *attr,
	       bool stopped_start, STACK_VARIABLES_PARMS, bool *thread_ran)
{
  /* Determine whether the newly created threads has to be started
     stopped since we have to set the scheduling parameters or set the
     affinity.  */
  if (attr != NULL
      && (__glibc_unlikely (attr->cpuset != NULL)
	  || __glibc_unlikely ((attr->flags & ATTR_FLAG_NOTINHERITSCHED) != 0)))
    stopped_start = true;

  pd->stopped_start = stopped_start;
  if (__glibc_unlikely (stopped_start))
    /* We make sure the thread does not run far by forcing it to get a
       lock.  We lock it here too so that the new thread cannot continue
       until we tell it to.  */
    lll_lock (pd->lock, LLL_PRIVATE);

  /* We rely heavily on various flags the CLONE function understands:

     CLONE_VM, CLONE_FS, CLONE_FILES
	These flags select semantics with shared address space and
	file descriptors according to what POSIX requires.

     CLONE_SIGHAND, CLONE_THREAD
	This flag selects the POSIX signal semantics and various
	other kinds of sharing (itimers, POSIX timers, etc.).

     CLONE_SETTLS
	The sixth parameter to CLONE determines the TLS area for the
	new thread.

     CLONE_PARENT_SETTID
	The kernels writes the thread ID of the newly created thread
	into the location pointed to by the fifth parameters to CLONE.

	Note that it would be semantically equivalent to use
	CLONE_CHILD_SETTID but it is be more expensive in the kernel.

     CLONE_CHILD_CLEARTID
	The kernels clears the thread ID of a thread that has called
	sys_exit() in the location pointed to by the seventh parameter
	to CLONE.

     The termination signal is chosen to be zero which means no signal
     is sent.  */
  const int clone_flags = (CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SYSVSEM
			   | CLONE_SIGHAND | CLONE_THREAD
			   | CLONE_SETTLS | CLONE_PARENT_SETTID
			   | CLONE_CHILD_CLEARTID
			   | 0);

  TLS_DEFINE_INIT_TP (tp, pd);

  if (__glibc_unlikely (ARCH_CLONE (&start_thread, STACK_VARIABLES_ARGS,
				    clone_flags, pd, &pd->tid, tp, &pd->tid)
			== -1))
    return errno;

  /* It's started now, so if we fail below, we'll have to cancel it
     and let it clean itself up.  */
  *thread_ran = true;

  /* Now we have the possibility to set scheduling parameters etc.  */
  if (attr != NULL)
    {
      INTERNAL_SYSCALL_DECL (err);
      int res;

      /* Set the affinity mask if necessary.  */
      if (attr->cpuset != NULL)
	{
	  assert (stopped_start);

	  res = INTERNAL_SYSCALL (sched_setaffinity, err, 3, pd->tid,
				  attr->cpusetsize, attr->cpuset);

	  if (__glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (res, err)))
	  err_out:
	    {
	      /* The operation failed.  We have to kill the thread.
		 We let the normal cancellation mechanism do the work.  */

	      INTERNAL_SYSCALL_DECL (err2);
	      (void) INTERNAL_SYSCALL (tgkill, err2, 3,
				       THREAD_GETMEM (THREAD_SELF, pid),
				       pd->tid, SIGCANCEL);

	      return INTERNAL_SYSCALL_ERRNO (res, err);
	    }
	}

      /* Set the scheduling parameters.  */
      if ((attr->flags & ATTR_FLAG_NOTINHERITSCHED) != 0)
	{
	  assert (stopped_start);

	  res = INTERNAL_SYSCALL (sched_setscheduler, err, 3, pd->tid,
				  pd->schedpolicy, &pd->schedparam);

	  if (__glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (res, err)))
	    goto err_out;
	}
    }

  return 0;
}
Ejemplo n.º 23
0
/* Acquire read lock for RWLOCK.  */
int
attribute_protected
__pthread_rwlock_rdlock (
     pthread_rwlock_t *rwlock)
{
  int result = 0;

  /* Make sure we are along.  */
  lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);

  while (1)
    {
      /* Get the rwlock if there is no writer...  */
      if (rwlock->__data.__writer == 0
	  /* ...and if either no writer is waiting or we prefer readers.  */
	  && (!rwlock->__data.__nr_writers_queued
	      || PTHREAD_RWLOCK_PREFER_READER_P (rwlock)))
	{
	  /* Increment the reader counter.  Avoid overflow.  */
	  if (__builtin_expect (++rwlock->__data.__nr_readers == 0, 0))
	    {
	      /* Overflow on number of readers.	 */
	      --rwlock->__data.__nr_readers;
	      result = EAGAIN;
	    }

	  break;
	}

      /* Make sure we are not holding the rwlock as a writer.  This is
	 a deadlock situation we recognize and report.  */
      if (__builtin_expect (rwlock->__data.__writer
			    == THREAD_GETMEM (THREAD_SELF, tid), 0))
	{
	  result = EDEADLK;
	  break;
	}

      /* Remember that we are a reader.  */
      if (__builtin_expect (++rwlock->__data.__nr_readers_queued == 0, 0))
	{
	  /* Overflow on number of queued readers.  */
	  --rwlock->__data.__nr_readers_queued;
	  result = EAGAIN;
	  break;
	}

      int waitval = rwlock->__data.__readers_wakeup;

      /* Free the lock.  */
      lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);

      /* Wait for the writer to finish.  */
      lll_futex_wait (&rwlock->__data.__readers_wakeup, waitval,
		      rwlock->__data.__shared);

      /* Get the lock.  */
      lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);

      --rwlock->__data.__nr_readers_queued;
    }

  /* We are done, free the lock.  */
  lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);

  return result;
}
Ejemplo n.º 24
0
int
__pthread_tpp_change_priority (int previous_prio, int new_prio)
{
  struct pthread *self = THREAD_SELF;
  struct priority_protection_data *tpp = THREAD_GETMEM (self, tpp);

  if (tpp == NULL)
    {
      if (__sched_fifo_min_prio == -1)
	__init_sched_fifo_prio ();

      size_t size = sizeof *tpp;
      size += (__sched_fifo_max_prio - __sched_fifo_min_prio + 1)
	      * sizeof (tpp->priomap[0]);
      tpp = calloc (size, 1);
      if (tpp == NULL)
	return ENOMEM;
      tpp->priomax = __sched_fifo_min_prio - 1;
      THREAD_SETMEM (self, tpp, tpp);
    }

  assert (new_prio == -1
	  || (new_prio >= __sched_fifo_min_prio
	      && new_prio <= __sched_fifo_max_prio));
  assert (previous_prio == -1
	  || (previous_prio >= __sched_fifo_min_prio
	      && previous_prio <= __sched_fifo_max_prio));

  int priomax = tpp->priomax;
  int newpriomax = priomax;
  if (new_prio != -1)
    {
      if (tpp->priomap[new_prio - __sched_fifo_min_prio] + 1 == 0)
	return EAGAIN;
      ++tpp->priomap[new_prio - __sched_fifo_min_prio];
      if (new_prio > priomax)
	newpriomax = new_prio;
    }

  if (previous_prio != -1)
    {
      if (--tpp->priomap[previous_prio - __sched_fifo_min_prio] == 0
	  && priomax == previous_prio
	  && previous_prio > new_prio)
	{
	  int i;
	  for (i = previous_prio - 1; i >= __sched_fifo_min_prio; --i)
	    if (tpp->priomap[i - __sched_fifo_min_prio])
	      break;
	  newpriomax = i;
	}
    }

  if (priomax == newpriomax)
    return 0;

  lll_lock (self->lock, LLL_PRIVATE);

  tpp->priomax = newpriomax;

  int result = 0;
#ifdef TPP_PTHREAD_SCHED
  int policy;
  struct sched_param param;
#endif

  if ((self->flags & ATTR_FLAG_SCHED_SET) == 0)
    {
#ifndef TPP_PTHREAD_SCHED
      if (__sched_getparam (self->tid, &self->schedparam) != 0)
#else
      if (__pthread_getschedparam (self->tid, &policy, &self->schedparam) != 0)
#endif
	result = errno;
      else
	self->flags |= ATTR_FLAG_SCHED_SET;
    }

  if ((self->flags & ATTR_FLAG_POLICY_SET) == 0)
    {
#ifndef TPP_PTHREAD_SCHED
      self->schedpolicy = __sched_getscheduler (self->tid);
#else
      if (__pthread_getschedparam (self->tid, &self->schedpolicy, &param) != 0)
        self->schedpolicy = -1;
#endif
      if (self->schedpolicy == -1)
	result = errno;
      else
	self->flags |= ATTR_FLAG_POLICY_SET;
    }

  if (result == 0)
    {
      struct sched_param sp = self->schedparam;
      if (sp.sched_priority < newpriomax || sp.sched_priority < priomax)
	{
	  if (sp.sched_priority < newpriomax)
	    sp.sched_priority = newpriomax;

#ifndef TPP_PTHREAD_SCHED
	  if (__sched_setscheduler (self->tid, self->schedpolicy, &sp) < 0)
#else
      if (__pthread_setschedparam (self->tid, self->schedpolicy, &sp) < 0)
#endif
	    result = errno;
	}
    }

  lll_unlock (self->lock, LLL_PRIVATE);

  return result;
}
/* Try to acquire write lock for RWLOCK or return after specfied time.	*/
int
pthread_rwlock_timedwrlock (pthread_rwlock_t *rwlock,
			    const struct timespec *abstime)
{
  int result = 0;
  bool wake_readers = false;
  int futex_shared =
      rwlock->__data.__shared == LLL_PRIVATE ? FUTEX_PRIVATE : FUTEX_SHARED;

  /* Make sure we are alone.  */
  lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);

  while (1)
    {
      int err;

      /* Get the rwlock if there is no writer and no reader.  */
      if (rwlock->__data.__writer == 0 && rwlock->__data.__nr_readers == 0)
	{
	  /* Mark self as writer.  */
	  rwlock->__data.__writer = THREAD_GETMEM (THREAD_SELF, tid);
	  break;
	}

      /* Make sure we are not holding the rwlock as a writer.  This is
	 a deadlock situation we recognize and report.  */
      if (__builtin_expect (rwlock->__data.__writer
			    == THREAD_GETMEM (THREAD_SELF, tid), 0))
	{
	  result = EDEADLK;
	  break;
	}

      /* Make sure the passed in timeout value is valid.  Ideally this
	 test would be executed once.  But since it must not be
	 performed if we would not block at all simply moving the test
	 to the front is no option.  Replicating all the code is
	 costly while this test is not.  */
      if (__builtin_expect (abstime->tv_nsec >= 1000000000
                            || abstime->tv_nsec < 0, 0))
	{
	  result = EINVAL;
	  break;
	}

      /* Remember that we are a writer.  */
      if (++rwlock->__data.__nr_writers_queued == 0)
	{
	  /* Overflow on number of queued writers.  */
	  --rwlock->__data.__nr_writers_queued;
	  result = EAGAIN;
	  break;
	}

      int waitval = rwlock->__data.__writer_wakeup;

      /* Free the lock.  */
      lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);

      /* Wait for the writer or reader(s) to finish.  We handle ETIMEDOUT
	 below; on other return values, we decide how to continue based on
	 the state of the rwlock.  */
      err = futex_abstimed_wait (&rwlock->__data.__writer_wakeup, waitval,
				 abstime, futex_shared);

      /* Get the lock.  */
      lll_lock (rwlock->__data.__lock, rwlock->__data.__shared);

      /* To start over again, remove the thread from the writer list.  */
      --rwlock->__data.__nr_writers_queued;

      /* Did the futex call time out?  */
      if (err == ETIMEDOUT)
	{
	  result = ETIMEDOUT;
	  /* If we prefer writers, it can have happened that readers blocked
	     for us to acquire the lock first.  If we have timed out, we need
	     to wake such readers if there are any, and if there is no writer
	     currently (otherwise, the writer will take care of wake-up).
	     Likewise, even if we prefer readers, we can be responsible for
	     wake-up (see pthread_rwlock_unlock) if no reader or writer has
	     acquired the lock.  We have timed out and thus not consumed a
	     futex wake-up; therefore, if there is no other blocked writer
	     that would consume the wake-up and thus take over responsibility,
	     we need to wake blocked readers.  */
	  if ((!PTHREAD_RWLOCK_PREFER_READER_P (rwlock)
	       || ((rwlock->__data.__nr_readers == 0)
		   && (rwlock->__data.__nr_writers_queued == 0)))
	      && (rwlock->__data.__nr_readers_queued > 0)
	      && (rwlock->__data.__writer == 0))
	    {
	      ++rwlock->__data.__readers_wakeup;
	      wake_readers = true;
	    }
	  break;
	}
    }

  /* We are done, free the lock.  */
  lll_unlock (rwlock->__data.__lock, rwlock->__data.__shared);

  /* Might be required after timeouts.  */
  if (wake_readers)
    futex_wake (&rwlock->__data.__readers_wakeup, INT_MAX, futex_shared);

  return result;
}
Ejemplo n.º 26
0
int
pthread_getattr_np (
     pthread_t thread_id,
     pthread_attr_t *attr)
{
  struct pthread *thread = (struct pthread *) thread_id;
  struct pthread_attr *iattr = (struct pthread_attr *) attr;
  int ret = 0;

  lll_lock (thread->lock, LLL_PRIVATE);

  /* The thread library is responsible for keeping the values in the
     thread desriptor up-to-date in case the user changes them.  */
  memcpy (&iattr->schedparam, &thread->schedparam,
	  sizeof (struct sched_param));
  iattr->schedpolicy = thread->schedpolicy;

  /* Clear the flags work.  */
  iattr->flags = thread->flags;

  /* The thread might be detached by now.  */
  if (IS_DETACHED (thread))
    iattr->flags |= ATTR_FLAG_DETACHSTATE;

  /* This is the guardsize after adjusting it.  */
  iattr->guardsize = thread->reported_guardsize;

  /* The sizes are subject to alignment.  */
  if (__builtin_expect (thread->stackblock != NULL, 1))
    {
      iattr->stacksize = thread->stackblock_size;
      iattr->stackaddr = (char *) thread->stackblock + iattr->stacksize;
    }
  else
    {
      /* No stack information available.  This must be for the initial
	 thread.  Get the info in some magical way.  */
      assert (abs (thread->pid) == thread->tid);

      /* Stack size limit.  */
      struct rlimit rl;

      /* The safest way to get the top of the stack is to read
	 /proc/self/maps and locate the line into which
	 __libc_stack_end falls.  */
      FILE *fp = fopen ("/proc/self/maps", "rc");
      if (fp == NULL)
	ret = errno;
      /* We need the limit of the stack in any case.  */
      else
	{
	  if (getrlimit (RLIMIT_STACK, &rl) != 0)
	    ret = errno;
	  else
	    {
	      /* We need no locking.  */
	      __fsetlocking (fp, FSETLOCKING_BYCALLER);

	      /* Until we found an entry (which should always be the case)
		 mark the result as a failure.  */
	      ret = ENOENT;

	      char *line = NULL;
	      size_t linelen = 0;
	      uintptr_t last_to = 0;

	      while (! feof_unlocked (fp))
		{
		  if (getdelim (&line, &linelen, '\n', fp) <= 0)
		    break;

		  uintptr_t from;
		  uintptr_t to;
		  if (sscanf (line, "%" SCNxPTR "-%" SCNxPTR, &from, &to) != 2)
		    continue;
		  if (from <= (uintptr_t) __libc_stack_end
		      && (uintptr_t) __libc_stack_end < to)
		    {
		      /* Found the entry.  Now we have the info we need.  */
		      iattr->stacksize = rl.rlim_cur;
		      iattr->stackaddr = (void *) to;

		      /* The limit might be too high.  */
		      if ((size_t) iattr->stacksize
			  > (size_t) iattr->stackaddr - last_to)
			iattr->stacksize = (size_t) iattr->stackaddr - last_to;

		      /* We succeed and no need to look further.  */
		      ret = 0;
		      break;
		    }
		  last_to = to;
		}

	      free (line);
	    }

	  fclose (fp);
	}
    }

  iattr->flags |= ATTR_FLAG_STACKADDR;

  if (ret == 0)
    {
      size_t size = 16;
      cpu_set_t *cpuset = NULL;

      do
	{
	  size <<= 1;

	  void *newp = realloc (cpuset, size);
	  if (newp == NULL)
	    {
	      ret = ENOMEM;
	      break;
	    }
	  cpuset = (cpu_set_t *) newp;

	  ret = __pthread_getaffinity_np (thread_id, size, cpuset);
	}
      /* Pick some ridiculous upper limit.  Is 8 million CPUs enough?  */
      while (ret == EINVAL && size < 1024 * 1024);

      if (ret == 0)
	{
	  iattr->cpuset = cpuset;
	  iattr->cpusetsize = size;
	}
      else
	{
	  free (cpuset);
	  if (ret == ENOSYS)
	    {
	      /* There is no such functionality.  */
	      ret = 0;
	      iattr->cpuset = NULL;
	      iattr->cpusetsize = 0;
	    }
	}
    }

  lll_unlock (thread->lock, LLL_PRIVATE);

  return ret;
}
Ejemplo n.º 27
0
static int
do_clone (struct pthread *pd, const struct pthread_attr *attr,
	  int clone_flags, int (*fct) (void *), STACK_VARIABLES_PARMS,
	  int stopped)
{
#if 0
  PREPARE_CREATE;
#endif

  if (__builtin_expect (stopped != 0, 0))
    /* We make sure the thread does not run far by forcing it to get a
       lock.  We lock it here too so that the new thread cannot continue
       until we tell it to.  */
    lll_lock (pd->lock, LLL_PRIVATE);

  /* One more thread.  We cannot have the thread do this itself, since it
     might exist but not have been scheduled yet by the time we've returned
     and need to check the value to behave correctly.  We must do it before
     creating the thread, in case it does get scheduled first and then
     might mistakenly think it was the only thread.  In the failure case,
     we momentarily store a false value; this doesn't matter because there
     is no kosher thing a signal handler interrupting us right here can do
     that cares whether the thread count is correct.  */
  atomic_increment (&__nptl_nthreads);

#if !defined(__native_client__) && !defined(__ZRT_HOST)
#error "This code was changed to work only in Native Client"
#endif

  /* Native Client does not have a notion of a thread ID, so we make
     one up.  This must be small enough to leave space for number identifying
     the clock.  Use CLOCK_IDFIELD_SIZE to guarantee that.  */
  pd->tid = ((unsigned int) pd) >> CLOCK_IDFIELD_SIZE;

  /* Native Client syscall thread_create does not push return address onto stack
     as opposed to the kernel.  We emulate this behavior on x86-64 to meet the
     ABI requirement ((%rsp + 8) mod 16 == 0).  On x86-32 the attribute
     'force_align_arg_pointer' does the same for start_thread ().  */
#ifdef __x86_64__
  STACK_VARIABLES_ARGS -= 8;
#endif

  if (__nacl_irt_thread_create (fct, STACK_VARIABLES_ARGS, pd) != 0)
    {
      pd->tid = 0;
      atomic_decrement (&__nptl_nthreads); /* Oops, we lied for a second.  */

      /* Failed.  If the thread is detached, remove the TCB here since
	 the caller cannot do this.  The caller remembered the thread
	 as detached and cannot reverify that it is not since it must
	 not access the thread descriptor again.  */
      if (IS_DETACHED (pd))
	__deallocate_stack (pd);

      /* We have to translate error codes.  */
      return errno == ENOMEM ? EAGAIN : errno;
    }

  /* Now we have the possibility to set scheduling parameters etc.  */
  if (__builtin_expect (stopped != 0, 0))
    {
      INTERNAL_SYSCALL_DECL (err);
      int res = 0;

      /* Set the affinity mask if necessary.  */
      if (attr->cpuset != NULL)
	{
	  res = INTERNAL_SYSCALL (sched_setaffinity, err, 3, pd->tid,
				  attr->cpusetsize, attr->cpuset);

	  if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (res, err), 0))
	    {
	      /* The operation failed.  We have to kill the thread.  First
		 send it the cancellation signal.  */
	      INTERNAL_SYSCALL_DECL (err2);
	    err_out:
#if __ASSUME_TGKILL
	      (void) INTERNAL_SYSCALL (tgkill, err2, 3,
				       THREAD_GETMEM (THREAD_SELF, pid),
				       pd->tid, SIGCANCEL);
#else
	      (void) INTERNAL_SYSCALL (tkill, err2, 2, pd->tid, SIGCANCEL);
#endif

	      return (INTERNAL_SYSCALL_ERROR_P (res, err)
		      ? INTERNAL_SYSCALL_ERRNO (res, err)
		      : 0);
	    }
	}

      /* Set the scheduling parameters.  */
      if ((attr->flags & ATTR_FLAG_NOTINHERITSCHED) != 0)
	{
	  res = INTERNAL_SYSCALL (sched_setscheduler, err, 3, pd->tid,
				  pd->schedpolicy, &pd->schedparam);

	  if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (res, err), 0))
	    goto err_out;
	}
    }

  /* We now have for sure more than one thread.  The main thread might
     not yet have the flag set.  No need to set the global variable
     again if this is what we use.  */
  THREAD_SETMEM (THREAD_SELF, header.multiple_threads, 1);

  return 0;
}