Пример #1
0
int
__pthread_mutex_trylock (pthread_mutex_t *mtxp)
{
  struct __pthread *self;
  int ret;

  switch (MTX_TYPE (mtxp))
    {
    case PT_MTX_NORMAL:
      ret = lll_trylock (&mtxp->__lock);
      if (ret)
	ret = EBUSY;
      break;

    case PT_MTX_RECURSIVE:
      self = _pthread_self ();
      if (mtx_owned_p (mtxp, self, mtxp->__flags))
	{
	  if (__glibc_unlikely (mtxp->__cnt + 1 == 0))
	    return EAGAIN;

	  ++mtxp->__cnt;
	  ret = 0;
	}
      else if ((ret = lll_trylock (&mtxp->__lock)) == 0)
	{
	  mtx_set_owner (mtxp, self, mtxp->__flags);
	  mtxp->__cnt = 1;
	}
      else
	ret = EBUSY;

      break;

    case PT_MTX_ERRORCHECK:
      self = _pthread_self ();
      if ((ret = lll_trylock (&mtxp->__lock)) == 0)
	mtx_set_owner (mtxp, self, mtxp->__flags);
      else
	ret = EBUSY;
      break;

    case PT_MTX_NORMAL | PTHREAD_MUTEX_ROBUST:
    case PT_MTX_RECURSIVE | PTHREAD_MUTEX_ROBUST:
    case PT_MTX_ERRORCHECK | PTHREAD_MUTEX_ROBUST:
      self = _pthread_self ();
      ROBUST_LOCK (self, mtxp, __lll_robust_trylock);
      break;

    default:
      ret = EINVAL;
      break;
    }

  return ret;
}
Пример #2
0
void 
_funlockfile(FILE *fp)
{
	pthread_t	curthread = _pthread_self();

	/*
	 * Check if this file is owned by the current thread:
	 */
	if (fp->_lock->fl_owner == curthread) {
		/*
		 * Check if this thread has locked the FILE
		 * more than once:
		 */
		if (fp->_lock->fl_count > 1)
			/*
			 * Decrement the count of the number of
			 * times the running thread has locked this
			 * file:
			 */
			fp->_lock->fl_count--;
		else {
			/*
			 * The running thread will release the
			 * lock now:
			 */
			fp->_lock->fl_count = 0;
#ifndef __SYMBIAN32__			
			fp->_lock->fl_owner = NULL;
#else //__SYMBIAN32__
			fp->_lock->fl_owner = 0;
#endif //__SYMBIAN32__
			_pthread_mutex_unlock(&fp->_lock->fl_mutex);
		}
	}
}
Пример #3
0
int
_pthread_spin_lock(pthread_spinlock_t *lock)
{
	struct pthread_spinlock	*lck;
	struct pthread *self = _pthread_self();
	int count, oldval, ret;

	if (lock == NULL || (lck = *lock) == NULL)
		ret = EINVAL;
	else if (lck->s_owner == self)
		ret = EDEADLK;
	else {
		do {
			count = SPIN_COUNT;
			while (lck->s_lock) {
#ifdef __i386__
				/* tell cpu we are spinning */
				__asm __volatile("pause");
#endif
				if (--count <= 0) {
					count = SPIN_COUNT;
					_pthread_yield();
				}
			}
			atomic_swap_int(&(lck)->s_lock, 1, &oldval);
		} while (oldval);

		lck->s_owner = self;
		ret = 0;
	}

	return (ret);
}
Пример #4
0
/* Return the thread ID of the calling thread.  */
pthread_t
__pthread_self (void)
{
  struct __pthread *self = _pthread_self ();
  assert (self);

  return self->thread;
}
Пример #5
0
pthread_t
pthread_self()
{
    pthread_t r;
    _PTHREAD_LOCK();
    r = _pthread_self();
    _PTHREAD_UNLOCK();
    return r;
}
Пример #6
0
/*
 * Provide the equivelant to Solaris thr_main() function
 */
int
_pthread_main_np()
{

	if (!_thr_initial)
		return (-1);
	else
		return (_pthread_equal(_pthread_self(), _thr_initial) ? 1 : 0);
}
Пример #7
0
int
pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mtx)
{
    int err = -1;
    _PTHREAD_LOCK();
    if (cond->pc_mtx) {
        errno = EINVAL;
    } else
    if (mtx->pm_owner != _pthread_self()) {
        errno = EPERM;
    }
    cond->pc_mtx = mtx;
    mtx->pm_owner = 0;
    PTHREAD_LOG("broadcasting condition signal at %p", cond);
    _PTHREAD_UNLOCK();              // 
    err = thr_mtx_wait(mtx->pm_id); //  niebezpieczny moment
    _PTHREAD_LOCK();                //
    cond->pc_mtx = 0;
    if (!err) {
        mtx->pm_owner = _pthread_self();
    }
    _PTHREAD_UNLOCK();
    return err;
}
Пример #8
0
int
_pthread_spin_trylock(pthread_spinlock_t *lock)
{
	struct pthread_spinlock	*lck;
	struct pthread *self = _pthread_self();
	int oldval, ret;

	if (lock == NULL || (lck = *lock) == NULL)
		ret = EINVAL;
	else if (lck->s_owner == self)
		ret = EDEADLK;
	else if (lck->s_lock != 0)
		ret = EBUSY;
	else {
		atomic_swap_int(&(lck)->s_lock, 1, &oldval);
		if (oldval)
			ret = EBUSY;
		else {
			lck->s_owner = _pthread_self();
			ret = 0;
		}
	}
	return (ret);
}
void
__pthread_thread_dealloc (struct __pthread *thread)
{
  assert (thread != _pthread_self ());

  __pthread_thread_halt (thread);

  /* Clean up the activation state.  */
  hurd_activation_state_free (thread->utcb);

  assert (thread->lock_message_buffer);
  hurd_message_buffer_free (thread->lock_message_buffer);

  thread->have_kernel_resources = 0;
}
Пример #10
0
int
__pthread_mutex_transfer_np (pthread_mutex_t *mtxp, pthread_t th)
{
  struct __pthread *self = _pthread_self ();
  struct __pthread *pt = __pthread_getid (th);

  if (pt == NULL)
    return ESRCH;
  else if (pt == self)
    return 0;

  int ret = 0;
  int flags = mtxp->__flags & GSYNC_SHARED;

  switch (MTX_TYPE (mtxp))
    {
    case PT_MTX_NORMAL:
      break;

    case PT_MTX_RECURSIVE:
    case PT_MTX_ERRORCHECK:
      if (!mtx_owned_p (mtxp, self, flags))
	ret = EPERM;
      else
	mtx_set_owner (mtxp, pt, flags);

      break;

    case PT_MTX_NORMAL | PTHREAD_MUTEX_ROBUST:
    case PT_MTX_RECURSIVE | PTHREAD_MUTEX_ROBUST:
    case PT_MTX_ERRORCHECK | PTHREAD_MUTEX_ROBUST:
      /* Note that this can be used to transfer an inconsistent
       * mutex as well. The new owner will still have the same
       * flags as the original. */
      if (mtxp->__owner_id != self->thread
	  || (int) (mtxp->__lock & LLL_OWNER_MASK) != __getpid ())
	ret = EPERM;
      else
	mtxp->__owner_id = pt->thread;

      break;

    default:
      ret = EINVAL;
    }

  return ret;
}
Пример #11
0
int
_raise(int sig)
{
	int ret;

	if (!_kse_isthreaded())
		ret = kill(getpid(), sig);
	else {
		ret = _pthread_kill(_pthread_self(), sig);
		if (ret != 0) {
			errno = ret;
			ret = -1;
		}
	}
	return (ret);
}
Пример #12
0
void
_flockfile(FILE *fp)
{
	pthread_t curthread = _pthread_self();

	if (fp->_lock->fl_owner == curthread)
		fp->_lock->fl_count++;
	else {
		/*
		 * Make sure this mutex is treated as a private
		 * internal mutex:
		 */
		_pthread_mutex_lock(&fp->_lock->fl_mutex);
		fp->_lock->fl_owner = curthread;
		fp->_lock->fl_count = 1;
	}
}
Пример #13
0
int
_pthread_spin_unlock(pthread_spinlock_t *lock)
{
	struct pthread_spinlock	*lck;
	int ret;

	if (lock == NULL || (lck = *lock) == NULL)
		ret = EINVAL;
	else {
		if (lck->s_owner != _pthread_self())
			ret = EPERM;
		else {
			lck->s_owner = NULL;
			atomic_swap_int(&lck->s_lock, 0, &ret);
			ret = 0;
		}
	}

	return (ret);
}
Пример #14
0
int
_ftrylockfile(FILE *fp)
{
	pthread_t curthread = _pthread_self();
	int	ret = 0;

	if (fp->_lock->fl_owner == curthread)
		fp->_lock->fl_count++;
	/*
	 * Make sure this mutex is treated as a private
	 * internal mutex:
	 */
	else if (_pthread_mutex_trylock(&fp->_lock->fl_mutex) == 0) {
		fp->_lock->fl_owner = curthread;
		fp->_lock->fl_count = 1;
	}
	else
		ret = -1;
	return (ret);
}
Пример #15
0
int
__pthread_setcancelstate (int state, int *oldstate)
{
    struct __pthread *p = _pthread_self ();

    switch (state)
    {
    default:
        return EINVAL;
    case PTHREAD_CANCEL_ENABLE:
    case PTHREAD_CANCEL_DISABLE:
        break;
    }

    __pthread_mutex_lock (&p->cancel_lock);
    if (oldstate)
        *oldstate = p->cancel_state;
    p->cancel_state = state;
    __pthread_mutex_unlock (&p->cancel_lock);

    return 0;
}
Пример #16
0
int
__pthread_setcanceltype (int type, int *oldtype)
{
  struct __pthread *p = _pthread_self ();

  switch (type)
    {
    default:
      return EINVAL;
    case PTHREAD_CANCEL_DEFERRED:
    case PTHREAD_CANCEL_ASYNCHRONOUS:
      break;
    }

  __pthread_mutex_lock (&p->cancel_lock);
  if (oldtype)
    *oldtype = p->cancel_type;
  p->cancel_type = type;
  __pthread_mutex_unlock (&p->cancel_lock);

  return 0;
}
Пример #17
0
int
pthread_setspecific (pthread_key_t key, const void *value)
{
  error_t err;
  struct __pthread *self = _pthread_self ();

  if (key < 0 || key >= __pthread_key_count
      || __pthread_key_destructors[key] == PTHREAD_KEY_INVALID)
    return EINVAL;

  if (! self->thread_specifics)
    {
      err = hurd_ihash_create (&self->thread_specifics, HURD_IHASH_NO_LOCP);
      if (err)
	return ENOMEM;
    }

  err = hurd_ihash_add (self->thread_specifics, key, (void *) value);
  if (err)
    return ENOMEM;

  return 0;
}
Пример #18
0
/* Block on condition variable COND until ABSTIME.  As a GNU
   extension, if ABSTIME is NULL, then wait forever.  MUTEX should be
   held by the calling thread.  On return, MUTEX will be held by the
   calling thread.  */
int
__pthread_cond_timedwait_internal (pthread_cond_t *cond,
				   pthread_mutex_t *mutex,
				   const struct timespec *abstime)
{
  error_t err;
  int cancelled, oldtype, drain;
  clockid_t clock_id = __pthread_default_condattr.__clock;

  if (abstime && (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000))
    return EINVAL;

  struct __pthread *self = _pthread_self ();
  struct cancel_ctx ctx;
  ctx.wakeup = self;
  ctx.cond = cond;

  /* Test for a pending cancellation request, switch to deferred mode for
     safer resource handling, and prepare the hook to call in case we're
     cancelled while blocking.  Once CANCEL_LOCK is released, the cancellation
     hook can be called by another thread at any time.  Whatever happens,
     this function must exit with MUTEX locked.

     This function contains inline implementations of pthread_testcancel and
     pthread_setcanceltype to reduce locking overhead.  */
  __pthread_mutex_lock (&self->cancel_lock);
  cancelled = (self->cancel_state == PTHREAD_CANCEL_ENABLE)
      && self->cancel_pending;

  if (!cancelled)
    {
      self->cancel_hook = cancel_hook;
      self->cancel_hook_arg = &ctx;
      oldtype = self->cancel_type;

      if (oldtype != PTHREAD_CANCEL_DEFERRED)
	self->cancel_type = PTHREAD_CANCEL_DEFERRED;

      /* Add ourselves to the list of waiters.  This is done while setting
         the cancellation hook to simplify the cancellation procedure, i.e.
         if the thread is queued, it can be cancelled, otherwise it is
         already unblocked, progressing on the return path.  */
      __pthread_spin_lock (&cond->__lock);
      __pthread_enqueue (&cond->__queue, self);
      if (cond->__attr != NULL)
	clock_id = cond->__attr->__clock;
      __pthread_spin_unlock (&cond->__lock);
    }
  __pthread_mutex_unlock (&self->cancel_lock);

  if (cancelled)
    __pthread_exit (PTHREAD_CANCELED);

  /* Release MUTEX before blocking.  */
  __pthread_mutex_unlock (mutex);

  /* Block the thread.  */
  if (abstime != NULL)
    err = __pthread_timedblock (self, abstime, clock_id);
  else
    {
      err = 0;
      __pthread_block (self);
    }

  __pthread_spin_lock (&cond->__lock);
  if (self->prevp == NULL)
    {
      /* Another thread removed us from the list of waiters, which means a
         wakeup message has been sent.  It was either consumed while we were
         blocking, or queued after we timed out and before we acquired the
         condition lock, in which case the message queue must be drained.  */
      if (!err)
	drain = 0;
      else
	{
	  assert (err == ETIMEDOUT);
	  drain = 1;
	}
    }
  else
    {
      /* We're still in the list of waiters.  Noone attempted to wake us up,
         i.e. we timed out.  */
      assert (err == ETIMEDOUT);
      __pthread_dequeue (self);
      drain = 0;
    }
  __pthread_spin_unlock (&cond->__lock);

  if (drain)
    __pthread_block (self);

  /* We're almost done.  Remove the unblock hook, restore the previous
     cancellation type, and check for a pending cancellation request.  */
  __pthread_mutex_lock (&self->cancel_lock);
  self->cancel_hook = NULL;
  self->cancel_hook_arg = NULL;
  self->cancel_type = oldtype;
  cancelled = (self->cancel_state == PTHREAD_CANCEL_ENABLE)
      && self->cancel_pending;
  __pthread_mutex_unlock (&self->cancel_lock);

  /* Reacquire MUTEX before returning/cancelling.  */
  __pthread_mutex_lock (mutex);

  if (cancelled)
    __pthread_exit (PTHREAD_CANCELED);

  return err;
}
Пример #19
0
/* Acquire the rwlock *RWLOCK for reading blocking until *ABSTIME if
   it is already held.  As a GNU extension, if TIMESPEC is NULL then
   wait forever.  */
int
__pthread_rwlock_timedrdlock_internal (struct __pthread_rwlock *rwlock,
				       const struct timespec *abstime)
{
  error_t err;
  int drain;
  struct __pthread *self;

  __pthread_spin_lock (&rwlock->__lock);
  if (__pthread_spin_trylock (&rwlock->__held) == 0)
    /* Successfully acquired the lock.  */
    {
      assert (rwlock->__readerqueue == 0);
      assert (rwlock->__writerqueue == 0);
      assert (rwlock->__readers == 0);

      rwlock->__readers = 1;
      __pthread_spin_unlock (&rwlock->__lock);
      return 0;
    }
  else
    /* Lock is held, but is held by a reader?  */
  if (rwlock->__readers > 0)
    /* Just add ourself to number of readers.  */
    {
      assert (rwlock->__readerqueue == 0);
      rwlock->__readers++;
      __pthread_spin_unlock (&rwlock->__lock);
      return 0;
    }

  /* The lock is busy.  */

  /* Better be blocked by a writer.  */
  assert (rwlock->__readers == 0);

  if (abstime != NULL && (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000))
    return EINVAL;

  self = _pthread_self ();

  /* Add ourself to the queue.  */
  __pthread_enqueue (&rwlock->__readerqueue, self);
  __pthread_spin_unlock (&rwlock->__lock);

  /* Block the thread.  */
  if (abstime != NULL)
    err = __pthread_timedblock (self, abstime, CLOCK_REALTIME);
  else
    {
      err = 0;
      __pthread_block (self);
    }

  __pthread_spin_lock (&rwlock->__lock);
  if (self->prevp == NULL)
    /* Another thread removed us from the queue, which means a wakeup message
       has been sent.  It was either consumed while we were blocking, or
       queued after we timed out and before we acquired the rwlock lock, in
       which case the message queue must be drained.  */
    drain = err ? 1 : 0;
  else
    {
      /* We're still in the queue.  Noone attempted to wake us up, i.e. we
         timed out.  */
      __pthread_dequeue (self);
      drain = 0;
    }
  __pthread_spin_unlock (&rwlock->__lock);

  if (drain)
    __pthread_block (self);

  if (err)
    {
      assert (err == ETIMEDOUT);
      return err;
    }

  /* The reader count has already been increment by whoever woke us
     up.  */

  assert (rwlock->__readers > 0);

  return 0;
}
/* Lock MUTEX, return EBUSY if we can't get it.  */
int
__pthread_mutex_trylock (struct __pthread_mutex *mutex)
{
  int err;
  struct __pthread *self;

  __pthread_spin_lock (&mutex->__lock);
  if (__pthread_spin_trylock (&mutex->__held) == 0)
    /* Acquired the lock.  */
    {
#ifndef NDEBUG
      self = _pthread_self ();
      if (self)
	/* The main thread may take a lock before the library is fully
	   initialized, in particular, before the main thread has a
	   TCB.  */
	{
	  assert (! mutex->owner);
	  mutex->owner = _pthread_self ();
	}
#endif

      if (mutex->attr)
	switch (mutex->attr->mutex_type)
	  {
	  case PTHREAD_MUTEX_NORMAL:
	    break;

	  case PTHREAD_MUTEX_RECURSIVE:
	    mutex->locks = 1;
	  case PTHREAD_MUTEX_ERRORCHECK:
	    mutex->owner = _pthread_self ();
	    break;

	  default:
	    LOSE;
	  }

      __pthread_spin_unlock (&mutex->__lock);
      return 0;
    }

  err = EBUSY;

  if (mutex->attr)
    {
      self = _pthread_self ();
      switch (mutex->attr->mutex_type)
	{
	case PTHREAD_MUTEX_NORMAL:
	  break;

	case PTHREAD_MUTEX_ERRORCHECK:
	  /* We could check if MUTEX->OWNER is SELF, however, POSIX
	     does not permit pthread_mutex_trylock to return EDEADLK
	     instead of EBUSY, only pthread_mutex_lock.  */
	  break;

	case PTHREAD_MUTEX_RECURSIVE:
	  if (mutex->owner == self)
	    {
	      mutex->locks ++;
	      err = 0;
	    }
	  break;

	default:
	  LOSE;
	}
    }

  __pthread_spin_unlock (&mutex->__lock);

  return err;
}
Пример #21
0
/* Internal version of pthread_create.  See comment in
   pt-internal.h.  */
int
__pthread_create_internal (struct __pthread **thread,
			   const pthread_attr_t *attr,
			   void *(*start_routine)(void *), void *arg)
{
  int err;
  struct __pthread *pthread;
  const struct __pthread_attr *setup;
  sigset_t sigset;

  /* Allocate a new thread structure.  */
  err = __pthread_alloc (&pthread);
  if (err)
    goto failed;

  /* Use the default attributes if ATTR is NULL.  */
  setup = attr ? attr : &__pthread_default_attr;

  /* Initialize the thread state.  */
  pthread->state = (setup->detachstate == PTHREAD_CREATE_DETACHED
		    ? PTHREAD_DETACHED : PTHREAD_JOINABLE);

  /* If the user supplied a stack, it is not our responsibility to
     setup a stack guard.  */
  if (setup->stackaddr)
    pthread->guardsize = 0;
  else
    pthread->guardsize = (setup->guardsize <= setup->stacksize
			  ? setup->guardsize : setup->stacksize);

  /* Find a stack.  There are several scenarios: if a detached thread
     kills itself, it has no way to deallocate its stack, thus it
     leaves PTHREAD->stack set to true.  We try to reuse it here,
     however, if the user supplied a stack, we cannot use the old one.
     Right now, we simply deallocate it.  */
  if (pthread->stack)
    {
      if (setup->stackaddr != __pthread_default_attr.stackaddr)
	{
	  __pthread_stack_dealloc (pthread->stackaddr,
				   pthread->stacksize);
	  pthread->stackaddr = setup->stackaddr;
	  pthread->stacksize = setup->stacksize;
	}
    }
  else
    {
      err = __pthread_stack_alloc (&pthread->stackaddr,
				   setup->stacksize);
      if (err)
	goto failed_stack_alloc;

      pthread->stacksize = setup->stacksize;
      pthread->stack = 1;
    }

  /* Allocate the kernel thread and other required resources.  */
  err = __pthread_thread_alloc (pthread);
  if (err)
    goto failed_thread_alloc;

#ifdef ENABLE_TLS
  pthread->tcb = _dl_allocate_tls (NULL);
  if (!pthread->tcb)
    goto failed_thread_tls_alloc;
  pthread->tcb->tcb = pthread->tcb;
#endif /* ENABLE_TLS */

  /* And initialize the rest of the machine context.  This may include
     additional machine- and system-specific initializations that
     prove convenient.  */
  err = __pthread_setup (pthread, entry_point, start_routine, arg);
  if (err)
    goto failed_setup;

  /* Initialize the system-specific signal state for the new
     thread.  */
  err = __pthread_sigstate_init (pthread);
  if (err)
    goto failed_sigstate;

  /* Set the new thread's signal mask and set the pending signals to
     empty.  POSIX says: "The signal mask shall be inherited from the
     creating thread.  The set of signals pending for the new thread
     shall be empty."  If the currnet thread is not a pthread then we
     just inherit the process' sigmask.  */
  if (__pthread_num_threads == 1)
    err = sigprocmask (0, 0, &sigset);
  else
    err = __pthread_sigstate (_pthread_self (), 0, 0, &sigset, 0);
  assert_perror (err);

  err = __pthread_sigstate (pthread, SIG_SETMASK, &sigset, 0, 1);
  assert_perror (err);

  /* Increase the total number of threads.  We do this before actually
     starting the new thread, since the new thread might immediately
     call `pthread_exit' which decreases the number of threads and
     calls `exit' if the number of threads reaches zero.  Increasing
     the number of threads from within the new thread isn't an option
     since this thread might return and call `pthread_exit' before the
     new thread runs.  */
  __atomic_inc (&__pthread_total);

  /* Store a pointer to this thread in the thread ID lookup table.  We
     could use __thread_setid, however, we only lock for reading as no
     other thread should be using this entry (we also assume that the
     store is atomic).  */
  pthread_rwlock_rdlock (&__pthread_threads_lock);
  __pthread_threads[pthread->thread - 1] = pthread;
  pthread_rwlock_unlock (&__pthread_threads_lock);

  /* At this point it is possible to guess our pthread ID.  We have to
     make sure that all functions taking a pthread_t argument can
     handle the fact that this thread isn't really running yet.  */

  /* Schedule the new thread.  */
  err = __pthread_thread_start (pthread);
  if (err)
    goto failed_starting;

  /* At this point the new thread is up and running.  */

  *thread = pthread;

  return 0;

 failed_starting:
  __pthread_setid (pthread->thread, NULL);
  __atomic_dec (&__pthread_total);
 failed_sigstate:
  __pthread_sigstate_destroy (pthread);
 failed_setup:
#ifdef ENABLE_TLS
  _dl_deallocate_tls (pthread->tcb, 1);
 failed_thread_tls_alloc:
#endif /* ENABLE_TLS */
  __pthread_thread_dealloc (pthread);
  __pthread_thread_halt (pthread);
 failed_thread_alloc:
  __pthread_stack_dealloc (pthread->stackaddr, pthread->stacksize);
  pthread->stack = 0;
 failed_stack_alloc:
  __pthread_dealloc (pthread);
 failed:
  return err;
}