示例#1
0
/*
 * Internal implementation of semaphores
 */
int
_sem_wait(sem_t sem, int tryonly, const struct timespec *abstime,
    int *delayed_cancel)
{
	void *ident = (void *)&sem->waitcount;
	int r;

	if (sem->shared)
		ident = SHARED_IDENT;

	_spinlock(&sem->lock);
	if (sem->value) {
		sem->value--;
		r = 0;
	} else if (tryonly) {
		r = EAGAIN;
	} else {
		sem->waitcount++;
		do {
			r = __thrsleep(ident, CLOCK_REALTIME |
			    _USING_TICKETS, abstime, &sem->lock.ticket,
			    delayed_cancel);
			_spinlock(&sem->lock);
			/* ignore interruptions other than cancelation */
			if (r == EINTR && (delayed_cancel == NULL ||
			    *delayed_cancel == 0))
				r = 0;
		} while (r == 0 && sem->value == 0);
		sem->waitcount--;
		if (r == 0)
			sem->value--;
	}
	_spinunlock(&sem->lock);
	return (r);
}
示例#2
0
/*
 * Internal implementation of semaphores
 */
int
_sem_wait(sem_t sem, int tryonly, const struct timespec *abstime,
    int *delayed_cancel)
{
	int r;

	_spinlock(&sem->lock);
	if (sem->value) {
		sem->value--;
		r = 0;
	} else if (tryonly) {
		r = EAGAIN;
	} else {
		sem->waitcount++;
		do {
			r = __thrsleep(&sem->waitcount, CLOCK_REALTIME,
			    abstime, &sem->lock, delayed_cancel);
			_spinlock(&sem->lock);
			/* ignore interruptions other than cancelation */
			if (r == EINTR && (delayed_cancel == NULL ||
			    *delayed_cancel == 0))
				r = 0;
		} while (r == 0 && sem->value == 0);
		sem->waitcount--;
		if (r == 0)
			sem->value--;
	}
	_spinunlock(&sem->lock);
	return (r);
}
示例#3
0
void
(flockfile)(FILE * fp)
{
    int	idx = file_idx(fp);
    struct	file_lock	*p;
    pthread_t	self = pthread_self();

    /* Lock the hash table: */
    _spinlock(&hash_lock);

    /* Get a pointer to any existing lock for the file: */
    if ((p = find_lock(idx, fp)) == NULL) {
        /*
         * The file is not locked, so this thread can
         * grab the lock:
         */
        do_lock(idx, fp);

        /*
         * The file is already locked, so check if the
         * running thread is the owner:
         */
    } else if (p->owner == self) {
        /*
         * The running thread is already the
         * owner, so increment the count of
         * the number of times it has locked
         * the file:
         */
        p->count++;
    } else {
        /*
         * The file is locked for another thread.
         * Append this thread to the queue of
         * threads waiting on the lock.
         */
        TAILQ_INSERT_TAIL(&p->lockers,self,waiting);
        while (p->owner != self) {
            __thrsleep(self, 0 | _USING_TICKETS, NULL,
                       &hash_lock.ticket, NULL);
            _spinlock(&hash_lock);
        }
    }

    /* Unlock the hash table: */
    _spinunlock(&hash_lock);
}
示例#4
0
static int
_rthread_mutex_lock(pthread_mutex_t *mutexp, int trywait,
    const struct timespec *abstime)
{
	struct pthread_mutex *mutex;
	pthread_t self = pthread_self();
	int ret = 0;

	/*
	 * If the mutex is statically initialized, perform the dynamic
	 * initialization. Note: _thread_mutex_lock() in libc requires
	 * _rthread_mutex_lock() to perform the mutex init when *mutexp
	 * is NULL.
	 */
	if (*mutexp == NULL) {
		_spinlock(&static_init_lock);
		if (*mutexp == NULL)
			ret = pthread_mutex_init(mutexp, NULL);
		_spinunlock(&static_init_lock);
		if (ret != 0)
			return (EINVAL);
	}
	mutex = (struct pthread_mutex *)*mutexp;

	_rthread_debug(5, "%p: mutex_lock %p\n", (void *)self, (void *)mutex);
	_spinlock(&mutex->lock);
	if (mutex->owner == NULL && TAILQ_EMPTY(&mutex->lockers)) {
		assert(mutex->count == 0);
		mutex->owner = self;
	} else if (mutex->owner == self) {
		assert(mutex->count > 0);

		/* already owner?  handle recursive behavior */
		if (mutex->type != PTHREAD_MUTEX_RECURSIVE)
		{
			if (trywait ||
			    mutex->type == PTHREAD_MUTEX_ERRORCHECK) {
				_spinunlock(&mutex->lock);
				return (trywait ? EBUSY : EDEADLK);
			}

			/* self-deadlock is disallowed by strict */
			if (mutex->type == PTHREAD_MUTEX_STRICT_NP &&
			    abstime == NULL)
				abort();

			/* self-deadlock, possibly until timeout */
			while (__thrsleep(self, CLOCK_REALTIME |
			    _USING_TICKETS, abstime,
			    &mutex->lock.ticket, NULL) != EWOULDBLOCK)
				_spinlock(&mutex->lock);
			return (ETIMEDOUT);
		}
		if (mutex->count == INT_MAX) {
			_spinunlock(&mutex->lock);
			return (EAGAIN);
		}
	} else if (trywait) {
		/* try failed */
		_spinunlock(&mutex->lock);
		return (EBUSY);
	} else {
		/* add to the wait queue and block until at the head */
		TAILQ_INSERT_TAIL(&mutex->lockers, self, waiting);
		while (mutex->owner != self) {
			ret = __thrsleep(self, CLOCK_REALTIME | _USING_TICKETS,
			    abstime, &mutex->lock.ticket, NULL);
			_spinlock(&mutex->lock);
			assert(mutex->owner != NULL);
			if (ret == EWOULDBLOCK) {
				if (mutex->owner == self)
					break;
				TAILQ_REMOVE(&mutex->lockers, self, waiting);
				_spinunlock(&mutex->lock);
				return (ETIMEDOUT);
			}
		}
	}

	mutex->count++;
	_spinunlock(&mutex->lock);

	return (0);
}
示例#5
0
int
pthread_cond_wait(pthread_cond_t *condp, pthread_mutex_t *mutexp)
{
	pthread_cond_t cond;
	struct pthread_mutex *mutex = (struct pthread_mutex *)*mutexp;
	struct tib *tib = TIB_GET();
	pthread_t self = tib->tib_thread;
	pthread_t next;
	int mutex_count;
	int canceled = 0;
	int error;
	PREP_CANCEL_POINT(tib);

	if (!*condp)
		if ((error = pthread_cond_init(condp, NULL)))
			return (error);
	cond = *condp;
	_rthread_debug(5, "%p: cond_wait %p,%p\n", (void *)self,
	    (void *)cond, (void *)mutex);

	if (mutex == NULL)
#if PTHREAD_MUTEX_DEFAULT == PTHREAD_MUTEX_ERRORCHECK
		return (EPERM);
#else
		abort();
#endif

	if (mutex->owner != self) {
		if (mutex->type == PTHREAD_MUTEX_ERRORCHECK)
			return (EPERM);
		else
			abort();
	}

	ENTER_DELAYED_CANCEL_POINT(tib, self);

	_spinlock(&cond->lock);

	/* mark the condvar as being associated with this mutex */
	if (cond->mutex == NULL) {
		cond->mutex = mutex;
		assert(TAILQ_EMPTY(&cond->waiters));
	} else if (cond->mutex != mutex) {
		assert(cond->mutex == mutex);
		_spinunlock(&cond->lock);
		LEAVE_CANCEL_POINT_INNER(tib, 1);
		return (EINVAL);
	} else
		assert(! TAILQ_EMPTY(&cond->waiters));

	/* snag the count in case this is a recursive mutex */
	mutex_count = mutex->count;

	/* transfer from the mutex queue to the condvar queue */
	_spinlock(&mutex->lock);
	self->blocking_cond = cond;
	TAILQ_INSERT_TAIL(&cond->waiters, self, waiting);
	_spinunlock(&cond->lock);

	/* wake the next guy blocked on the mutex */
	mutex->count = 0;
	mutex->owner = next = TAILQ_FIRST(&mutex->lockers);
	if (next != NULL) {
		TAILQ_REMOVE(&mutex->lockers, next, waiting);
		__thrwakeup(next, 1);
	}

	/* wait until we're the owner of the mutex again */
	while (mutex->owner != self) {
		error = __thrsleep(self, 0 | _USING_TICKETS, NULL,
		    &mutex->lock.ticket, &self->delayed_cancel);

		/*
		 * If we took a normal signal (not from
		 * cancellation) then we should just go back to
		 * sleep without changing state (timeouts, etc).
		 */
		if (error == EINTR && (tib->tib_canceled == 0 ||
		    (tib->tib_cantcancel & CANCEL_DISABLED))) {
			_spinlock(&mutex->lock);
			continue;
		}

		/*
		 * The remaining reasons for waking up (normal
		 * wakeup and cancellation) all mean that we won't
		 * be staying in the condvar queue and we'll no
		 * longer be cancelable.
		 */
		LEAVE_CANCEL_POINT_INNER(tib, 0);

		/*
		 * If we're no longer in the condvar's queue then
		 * we're just waiting for mutex ownership.  Need
		 * cond->lock here to prevent race with cond_signal().
		 */
		_spinlock(&cond->lock);
		if (self->blocking_cond == NULL) {
			_spinunlock(&cond->lock);
			_spinlock(&mutex->lock);
			continue;
		}
		assert(self->blocking_cond == cond);

		/* if canceled, make note of that */
		if (error == EINTR)
			canceled = 1;

		/* transfer between the queues */
		TAILQ_REMOVE(&cond->waiters, self, waiting);
		assert(mutex == cond->mutex);
		if (TAILQ_EMPTY(&cond->waiters))
			cond->mutex = NULL;
		self->blocking_cond = NULL;
		_spinunlock(&cond->lock);
		_spinlock(&mutex->lock);

		/* mutex unlocked right now? */
		if (mutex->owner == NULL &&
		    TAILQ_EMPTY(&mutex->lockers)) {
			assert(mutex->count == 0);
			mutex->owner = self;
			break;
		}
		TAILQ_INSERT_TAIL(&mutex->lockers, self, waiting);
	}

	/* restore the mutex's count */
	mutex->count = mutex_count;
	_spinunlock(&mutex->lock);

	LEAVE_CANCEL_POINT_INNER(tib, canceled);

	return (0);
}
示例#6
0
int
pthread_cond_timedwait(pthread_cond_t *condp, pthread_mutex_t *mutexp,
    const struct timespec *abstime)
{
	pthread_cond_t cond;
	struct pthread_mutex *mutex = (struct pthread_mutex *)*mutexp;
	pthread_t self = pthread_self();
	pthread_t next;
	int mutex_count;
	int canceled = 0;
	int rv = 0;
	int error;

	if (!*condp)
		if ((error = pthread_cond_init(condp, NULL)))
			return (error);
	cond = *condp;
	_rthread_debug(5, "%p: cond_timed %p,%p\n", (void *)self,
	    (void *)cond, (void *)mutex);

	if (mutex == NULL)
#if PTHREAD_MUTEX_DEFAULT == PTHREAD_MUTEX_ERRORCHECK
		return (EPERM);
#else
		abort();
#endif

	if (mutex->owner != self) {
		if (mutex->type == PTHREAD_MUTEX_ERRORCHECK)
			return (EPERM);
		else
			abort();
	}

	if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
	    abstime->tv_nsec >= 1000000000)
		return (EINVAL);

	_enter_delayed_cancel(self);

	_spinlock(&cond->lock);

	/* mark the condvar as being associated with this mutex */
	if (cond->mutex == NULL) {
		cond->mutex = mutex;
		assert(TAILQ_EMPTY(&cond->waiters));
	} else if (cond->mutex != mutex) {
		assert(cond->mutex == mutex);
		_spinunlock(&cond->lock);
		_leave_delayed_cancel(self, 1);
		return (EINVAL);
	} else
		assert(! TAILQ_EMPTY(&cond->waiters));

	/* snag the count in case this is a recursive mutex */
	mutex_count = mutex->count;

	/* transfer from the mutex queue to the condvar queue */
	_spinlock(&mutex->lock);
	self->blocking_cond = cond;
	TAILQ_INSERT_TAIL(&cond->waiters, self, waiting);
	_spinunlock(&cond->lock);

	/* wake the next guy blocked on the mutex */
	mutex->count = 0;
	mutex->owner = next = TAILQ_FIRST(&mutex->lockers);
	if (next != NULL) {
		TAILQ_REMOVE(&mutex->lockers, next, waiting);
		__thrwakeup(next, 1);
	}

	/* wait until we're the owner of the mutex again */
	while (mutex->owner != self) {
		error = __thrsleep(self, cond->clock | _USING_TICKETS, abstime,
		    &mutex->lock.ticket, &self->delayed_cancel);

		/*
		 * If abstime == NULL, then we're definitely waiting
		 * on the mutex instead of the condvar, and are
		 * just waiting for mutex ownership, regardless of
		 * why we woke up.
		 */
		if (abstime == NULL) {
			_spinlock(&mutex->lock);
			continue;
		}

		/*
		 * If we took a normal signal (not from
		 * cancellation) then we should just go back to
		 * sleep without changing state (timeouts, etc).
		 */
		if (error == EINTR && !IS_CANCELED(self)) {
			_spinlock(&mutex->lock);
			continue;
		}

		/*
		 * The remaining reasons for waking up (normal
		 * wakeup, timeout, and cancellation) all mean that
		 * we won't be staying in the condvar queue and
		 * we'll no longer time out or be cancelable.
		 */
		abstime = NULL;
		_leave_delayed_cancel(self, 0);

		/*
		 * If we're no longer in the condvar's queue then
		 * we're just waiting for mutex ownership.  Need
		 * cond->lock here to prevent race with cond_signal().
		 */
		_spinlock(&cond->lock);
		if (self->blocking_cond == NULL) {
			_spinunlock(&cond->lock);
			_spinlock(&mutex->lock);
			continue;
		}
		assert(self->blocking_cond == cond);

		/* if timeout or canceled, make note of that */
		if (error == EWOULDBLOCK)
			rv = ETIMEDOUT;
		else if (error == EINTR)
			canceled = 1;

		/* transfer between the queues */
		TAILQ_REMOVE(&cond->waiters, self, waiting);
		assert(mutex == cond->mutex);
		if (TAILQ_EMPTY(&cond->waiters))
			cond->mutex = NULL;
		self->blocking_cond = NULL;
		_spinunlock(&cond->lock);
		_spinlock(&mutex->lock);

		/* mutex unlocked right now? */
		if (mutex->owner == NULL &&
		    TAILQ_EMPTY(&mutex->lockers)) {
			assert(mutex->count == 0);
			mutex->owner = self;
			break;
		}
		TAILQ_INSERT_TAIL(&mutex->lockers, self, waiting);
	}

	/* restore the mutex's count */
	mutex->count = mutex_count;
	_spinunlock(&mutex->lock);

	_leave_delayed_cancel(self, canceled);

	return (rv);
}