Ejemplo n.º 1
0
int
sem_timedwait(sem_t *semp, const struct timespec *abstime)
{
	struct tib *tib = TIB_GET();
	pthread_t self;
	sem_t sem;
	int r;
	PREP_CANCEL_POINT(tib);

	if (!_threads_ready)
		_rthread_init();
	self = tib->tib_thread;

	if (!semp || !(sem = *semp)) {
		errno = EINVAL;
		return (-1);
	}

	ENTER_DELAYED_CANCEL_POINT(tib, self);
	r = _sem_wait(sem, 0, abstime, &self->delayed_cancel);
	LEAVE_CANCEL_POINT_INNER(tib, r);

	if (r) {
		errno = r == EWOULDBLOCK ? ETIMEDOUT : r;
		return (-1);
	}

	return (0);
}
Ejemplo n.º 2
0
int
sem_wait(sem_t *semp)
{
	struct tib *tib = TIB_GET();
	pthread_t self;
	sem_t sem;
	int r;
	PREP_CANCEL_POINT(tib);

	if (!_threads_ready)
		_rthread_init();
	self = tib->tib_thread;

	if (!semp || !(sem = *semp)) {
		errno = EINVAL;
		return (-1);
	}

	ENTER_DELAYED_CANCEL_POINT(tib, self);
	r = _sem_wait(sem, 0, NULL, &self->delayed_cancel);
	LEAVE_CANCEL_POINT_INNER(tib, r);

	if (r) {
		errno = r;
		return (-1);
	}

	return (0);
}
Ejemplo n.º 3
0
void
_thread_finalize(void)
{
	struct tib *tib = TIB_GET();

	while (tib->tib_atexit) {
		struct thread_atexit_fn *fnp = tib->tib_atexit;
		tib->tib_atexit = fnp->next;
		fnp->func(fnp->arg);
		free(fnp);
	}
}
Ejemplo n.º 4
0
int
pthread_cond_wait(pthread_cond_t *condp, pthread_mutex_t *mutexp)
{
	pthread_cond_t cond;
	struct pthread_mutex *mutex = (struct pthread_mutex *)*mutexp;
	struct tib *tib = TIB_GET();
	pthread_t self = tib->tib_thread;
	pthread_t next;
	int mutex_count;
	int canceled = 0;
	int error;
	PREP_CANCEL_POINT(tib);

	if (!*condp)
		if ((error = pthread_cond_init(condp, NULL)))
			return (error);
	cond = *condp;
	_rthread_debug(5, "%p: cond_wait %p,%p\n", (void *)self,
	    (void *)cond, (void *)mutex);

	if (mutex == NULL)
#if PTHREAD_MUTEX_DEFAULT == PTHREAD_MUTEX_ERRORCHECK
		return (EPERM);
#else
		abort();
#endif

	if (mutex->owner != self) {
		if (mutex->type == PTHREAD_MUTEX_ERRORCHECK)
			return (EPERM);
		else
			abort();
	}

	ENTER_DELAYED_CANCEL_POINT(tib, self);

	_spinlock(&cond->lock);

	/* mark the condvar as being associated with this mutex */
	if (cond->mutex == NULL) {
		cond->mutex = mutex;
		assert(TAILQ_EMPTY(&cond->waiters));
	} else if (cond->mutex != mutex) {
		assert(cond->mutex == mutex);
		_spinunlock(&cond->lock);
		LEAVE_CANCEL_POINT_INNER(tib, 1);
		return (EINVAL);
	} else
		assert(! TAILQ_EMPTY(&cond->waiters));

	/* snag the count in case this is a recursive mutex */
	mutex_count = mutex->count;

	/* transfer from the mutex queue to the condvar queue */
	_spinlock(&mutex->lock);
	self->blocking_cond = cond;
	TAILQ_INSERT_TAIL(&cond->waiters, self, waiting);
	_spinunlock(&cond->lock);

	/* wake the next guy blocked on the mutex */
	mutex->count = 0;
	mutex->owner = next = TAILQ_FIRST(&mutex->lockers);
	if (next != NULL) {
		TAILQ_REMOVE(&mutex->lockers, next, waiting);
		__thrwakeup(next, 1);
	}

	/* wait until we're the owner of the mutex again */
	while (mutex->owner != self) {
		error = __thrsleep(self, 0 | _USING_TICKETS, NULL,
		    &mutex->lock.ticket, &self->delayed_cancel);

		/*
		 * If we took a normal signal (not from
		 * cancellation) then we should just go back to
		 * sleep without changing state (timeouts, etc).
		 */
		if (error == EINTR && (tib->tib_canceled == 0 ||
		    (tib->tib_cantcancel & CANCEL_DISABLED))) {
			_spinlock(&mutex->lock);
			continue;
		}

		/*
		 * The remaining reasons for waking up (normal
		 * wakeup and cancellation) all mean that we won't
		 * be staying in the condvar queue and we'll no
		 * longer be cancelable.
		 */
		LEAVE_CANCEL_POINT_INNER(tib, 0);

		/*
		 * If we're no longer in the condvar's queue then
		 * we're just waiting for mutex ownership.  Need
		 * cond->lock here to prevent race with cond_signal().
		 */
		_spinlock(&cond->lock);
		if (self->blocking_cond == NULL) {
			_spinunlock(&cond->lock);
			_spinlock(&mutex->lock);
			continue;
		}
		assert(self->blocking_cond == cond);

		/* if canceled, make note of that */
		if (error == EINTR)
			canceled = 1;

		/* transfer between the queues */
		TAILQ_REMOVE(&cond->waiters, self, waiting);
		assert(mutex == cond->mutex);
		if (TAILQ_EMPTY(&cond->waiters))
			cond->mutex = NULL;
		self->blocking_cond = NULL;
		_spinunlock(&cond->lock);
		_spinlock(&mutex->lock);

		/* mutex unlocked right now? */
		if (mutex->owner == NULL &&
		    TAILQ_EMPTY(&mutex->lockers)) {
			assert(mutex->count == 0);
			mutex->owner = self;
			break;
		}
		TAILQ_INSERT_TAIL(&mutex->lockers, self, waiting);
	}

	/* restore the mutex's count */
	mutex->count = mutex_count;
	_spinunlock(&mutex->lock);

	LEAVE_CANCEL_POINT_INNER(tib, canceled);

	return (0);
}
Ejemplo n.º 5
0
int
pthread_main_np(void)
{
	return (!_threads_ready ||
	    (TIB_GET()->tib_thread_flags & TIB_THREAD_INITIAL_STACK) ? 1 : 0);
}