Exemplo n.º 1
0
/*
 * Internal implementation of semaphores
 */
int
_sem_wait(sem_t sem, int tryonly, const struct timespec *abstime,
    int *delayed_cancel)
{
	void *ident = (void *)&sem->waitcount;
	int r;

	if (sem->shared)
		ident = SHARED_IDENT;

	_spinlock(&sem->lock);
	if (sem->value) {
		sem->value--;
		r = 0;
	} else if (tryonly) {
		r = EAGAIN;
	} else {
		sem->waitcount++;
		do {
			r = __thrsleep(ident, CLOCK_REALTIME |
			    _USING_TICKETS, abstime, &sem->lock.ticket,
			    delayed_cancel);
			_spinlock(&sem->lock);
			/* ignore interruptions other than cancelation */
			if (r == EINTR && (delayed_cancel == NULL ||
			    *delayed_cancel == 0))
				r = 0;
		} while (r == 0 && sem->value == 0);
		sem->waitcount--;
		if (r == 0)
			sem->value--;
	}
	_spinunlock(&sem->lock);
	return (r);
}
Exemplo n.º 2
0
/*
 * Internal implementation of semaphores
 */
int
_sem_wait(sem_t sem, int tryonly, const struct timespec *abstime,
    int *delayed_cancel)
{
	int r;

	_spinlock(&sem->lock);
	if (sem->value) {
		sem->value--;
		r = 0;
	} else if (tryonly) {
		r = EAGAIN;
	} else {
		sem->waitcount++;
		do {
			r = __thrsleep(&sem->waitcount, CLOCK_REALTIME,
			    abstime, &sem->lock, delayed_cancel);
			_spinlock(&sem->lock);
			/* ignore interruptions other than cancelation */
			if (r == EINTR && (delayed_cancel == NULL ||
			    *delayed_cancel == 0))
				r = 0;
		} while (r == 0 && sem->value == 0);
		sem->waitcount--;
		if (r == 0)
			sem->value--;
	}
	_spinunlock(&sem->lock);
	return (r);
}
Exemplo n.º 3
0
static void
_rthread_clearflag(pthread_t thread, int flag)
{
	_spinlock(&thread->flags_lock);
	thread->flags &= ~flag;
	_spinunlock(&thread->flags_lock);
}
Exemplo n.º 4
0
void free_recvframe_queue(_queue *pframequeue,  _queue *pfree_recv_queue)
{
	union	recv_frame 	*precvframe;
	_list	*plist, *phead;

_func_enter_;
	_spinlock(&pframequeue->lock);

	phead = get_list_head(pframequeue);
	plist = get_next(phead);

	while(end_of_queue_search(phead, plist) == _FALSE)
	{
		precvframe = LIST_CONTAINOR(plist, union recv_frame, u);

		plist = get_next(plist);
		
		//list_delete(&precvframe->u.hdr.list); // will do this in free_recvframe()
		
		free_recvframe(precvframe, pfree_recv_queue);
	}

	_spinunlock(&pframequeue->lock);
	
_func_exit_;

}
Exemplo n.º 5
0
static void
_rthread_setflag(pthread_t thread, int flag)
{
	_spinlock(&thread->flags_lock);
	thread->flags |= flag;
	_spinunlock(&thread->flags_lock);
}
Exemplo n.º 6
0
void
(flockfile)(FILE * fp)
{
    int	idx = file_idx(fp);
    struct	file_lock	*p;
    pthread_t	self = pthread_self();

    /* Lock the hash table: */
    _spinlock(&hash_lock);

    /* Get a pointer to any existing lock for the file: */
    if ((p = find_lock(idx, fp)) == NULL) {
        /*
         * The file is not locked, so this thread can
         * grab the lock:
         */
        do_lock(idx, fp);

        /*
         * The file is already locked, so check if the
         * running thread is the owner:
         */
    } else if (p->owner == self) {
        /*
         * The running thread is already the
         * owner, so increment the count of
         * the number of times it has locked
         * the file:
         */
        p->count++;
    } else {
        /*
         * The file is locked for another thread.
         * Append this thread to the queue of
         * threads waiting on the lock.
         */
        TAILQ_INSERT_TAIL(&p->lockers,self,waiting);
        while (p->owner != self) {
            __thrsleep(self, 0 | _USING_TICKETS, NULL,
                       &hash_lock.ticket, NULL);
            _spinlock(&hash_lock);
        }
    }

    /* Unlock the hash table: */
    _spinunlock(&hash_lock);
}
Exemplo n.º 7
0
int
pthread_cond_signal(pthread_cond_t *condp)
{
	pthread_cond_t cond;
	struct pthread_mutex *mutex;
	pthread_t thread;
	int wakeup;

	/* uninitialized?  Then there's obviously no one waiting! */
	if (!*condp)
		return 0;

	cond = *condp;
	_rthread_debug(5, "%p: cond_signal %p,%p\n", (void *)pthread_self(),
	    (void *)cond, (void *)cond->mutex);
	_spinlock(&cond->lock);
	thread = TAILQ_FIRST(&cond->waiters);
	if (thread == NULL) {
		assert(cond->mutex == NULL);
		_spinunlock(&cond->lock);
		return (0);
	}

	assert(thread->blocking_cond == cond);
	TAILQ_REMOVE(&cond->waiters, thread, waiting);
	thread->blocking_cond = NULL;

	mutex = cond->mutex;
	assert(mutex != NULL);
	if (TAILQ_EMPTY(&cond->waiters))
		cond->mutex = NULL;

	/* link locks to prevent race with timedwait */
	_spinlock(&mutex->lock);
	_spinunlock(&cond->lock);

	wakeup = mutex->owner == NULL && TAILQ_EMPTY(&mutex->lockers);
	if (wakeup)
		mutex->owner = thread;
	else
		TAILQ_INSERT_TAIL(&mutex->lockers, thread, waiting);
	_spinunlock(&mutex->lock);
	if (wakeup)
		__thrwakeup(thread, 1);

	return (0);
}
Exemplo n.º 8
0
/*
 * for ld.so
 */
void
_thread_dl_lock(int t)
{
	if(t)
		_spinunlock(&dl_lock);
	else
		_spinlock(&dl_lock);
}
Exemplo n.º 9
0
void
_rthread_bind_lock(int what)
{
	static _spinlock_lock_t lock = _SPINLOCK_UNLOCKED;

	if (what == 0)
		_spinlock(&lock);
	else
		_spinunlock(&lock);
}
Exemplo n.º 10
0
void
_thread_tag_lock(void **tag)
{
	struct thread_tag *t;

	if(*tag == nil)
		_thread_tag_init(tag);
	t = *tag;
	_spinlock(&t->l);
}
Exemplo n.º 11
0
/*
 * compat debug stuff
 */
void
_thread_dump_info(void)
{
	pthread_t thread;

	_spinlock(&_thread_lock);
	LIST_FOREACH(thread, &_thread_list, threads)
		printf("thread %d flags %d name %s\n",
		    thread->tid, thread->flags, thread->name);
	_spinunlock(&_thread_lock);
}
Exemplo n.º 12
0
static void
_rthread_start(void *v)
{
	pthread_t thread = v;
	void *retval;

	/* ensure parent returns from rfork, sets up tid */
	_spinlock(&_thread_lock);
	_spinunlock(&_thread_lock);
	retval = thread->fn(thread->arg);
	pthread_exit(retval);
}
Exemplo n.º 13
0
int
pthread_mutex_unlock(pthread_mutex_t *mutexp)
{
	pthread_t self = pthread_self();
	struct pthread_mutex *mutex = (struct pthread_mutex *)*mutexp;

	_rthread_debug(5, "%p: mutex_unlock %p\n", (void *)self,
	    (void *)mutex);

	if (mutex == NULL)
#if PTHREAD_MUTEX_DEFAULT == PTHREAD_MUTEX_ERRORCHECK
		return (EPERM);
#elif PTHREAD_MUTEX_DEFAULT == PTHREAD_MUTEX_NORMAL
		return(0);
#else
		abort();
#endif

	if (mutex->owner != self) {
		if (mutex->type == PTHREAD_MUTEX_ERRORCHECK ||
		    mutex->type == PTHREAD_MUTEX_RECURSIVE)
			return (EPERM);
		else {
			/*
			 * For mutex type NORMAL our undefined behavior for
			 * unlocking an unlocked mutex is to succeed without
			 * error.  All other undefined behaviors are to
			 * abort() immediately.
			 */
			if (mutex->owner == NULL &&
			    mutex->type == PTHREAD_MUTEX_NORMAL)
				return (0);
			else
				abort();
		}
	}

	if (--mutex->count == 0) {
		pthread_t next;

		_spinlock(&mutex->lock);
		mutex->owner = next = TAILQ_FIRST(&mutex->lockers);
		if (next != NULL)
			TAILQ_REMOVE(&mutex->lockers, next, waiting);
		_spinunlock(&mutex->lock);
		if (next != NULL)
			__thrwakeup(next, 1);
	}

	return (0);
}
Exemplo n.º 14
0
/* always increment count */
int
_sem_post(sem_t sem)
{
	int rv = 0;

	_spinlock(&sem->lock);
	sem->value++;
	if (sem->waitcount) {
		__thrwakeup(&sem->waitcount, 1);
		rv = 1;
	}
	_spinunlock(&sem->lock);
	return (rv);
}
Exemplo n.º 15
0
/*
 * real pthread functions
 */
pthread_t
pthread_self(void)
{
	pthread_t thread;

	if (!_threads_ready)
		if (_rthread_init())
			return (NULL);

	_spinlock(&_thread_lock);
	thread = _rthread_findself();
	_spinunlock(&_thread_lock);

	return (thread);
}
Exemplo n.º 16
0
void
_rthread_free_stack(struct stack *stack)
{
	if (stack->len == RTHREAD_STACK_SIZE_DEF + stack->guardsize &&
	    stack->guardsize == _rthread_attr_default.guard_size) {
		_spinlock(&def_stacks_lock);
		SLIST_INSERT_HEAD(&def_stacks, stack, link);
		_spinunlock(&def_stacks_lock);
	} else {
		/* unmap the storage unless it was application allocated */
		if (stack->guardsize != 1)
			munmap(stack->base, stack->len);
		free(stack);
	}
}
Exemplo n.º 17
0
int
(ftrylockfile)(FILE * fp)
{
    int	ret = -1;
    int	idx = file_idx(fp);
    struct	file_lock	*p;

    /* Lock the hash table: */
    _spinlock(&hash_lock);

    /* Get a pointer to any existing lock for the file: */
    if ((p = find_lock(idx, fp)) == NULL) {
        /*
         * The file is not locked, so this thread can
         * grab the lock:
         */
        p = do_lock(idx, fp);

        /*
         * The file is already locked, so check if the
         * running thread is the owner:
         */
    } else if (p->owner == pthread_self()) {
        /*
         * The running thread is already the
         * owner, so increment the count of
         * the number of times it has locked
         * the file:
         */
        p->count++;
    } else {
        /*
         * The file is locked for another thread,
         * so this try fails.
         */
        p = NULL;
    }

    /* Unlock the hash table: */
    _spinunlock(&hash_lock);

    /* Check if the lock was obtained: */
    if (p != NULL)
        /* Return success: */
        ret = 0;

    return (ret);
}
Exemplo n.º 18
0
int
sem_getvalue(sem_t *semp, int *sval)
{
	sem_t sem;

	if (!semp || !(sem = *semp)) {
		errno = EINVAL;
		return (-1);
	}

	_spinlock(&sem->lock);
	*sval = sem->value;
	_spinunlock(&sem->lock);

	return (0);
}
Exemplo n.º 19
0
sint	_enqueue_cmd(_queue *queue, struct cmd_obj *obj)
{
_func_enter_;
	if (obj == NULL)
		goto exit;

	_spinlock(&(queue->lock));

	list_insert_tail(&(obj->list),&(queue->queue));

	_spinunlock(&(queue->lock));
exit:	
_func_exit_;
	return _SUCCESS;

}
Exemplo n.º 20
0
/*
 * for libc
 */
static void
_thread_tag_init(void **tag)
{
	struct thread_tag *t;

	_spinlock(&tag_lock);
	if(*tag == nil) {
		t = malloc(sizeof (*t));
		if(t != nil) {
			memset(&t->l, 0, sizeof(t->l));
			t->key = nextkey++;
			*tag = t;
		}
	}
	_spinunlock(&tag_lock);
}
Exemplo n.º 21
0
int
pthread_spin_lock(pthread_spinlock_t *lock)
{
	pthread_t self = pthread_self();
	pthread_spinlock_t l;

	if (lock == NULL || *lock == NULL)
		return (EINVAL);

	l = *lock;

	if (l->owner == self)
		return (EDEADLK);

	_spinlock(&l->lock);
	l->owner = self;
	return (0);
}
Exemplo n.º 22
0
struct	cmd_obj	*_dequeue_cmd(_queue *queue)
{

	struct cmd_obj *obj;
_func_enter_;
	_spinlock(&(queue->lock));

	if (is_list_empty(&(queue->queue)))
		obj = NULL;
	else
	{
		obj = LIST_CONTAINOR(get_next(&(queue->queue)), struct cmd_obj, list);
		list_delete(&obj->list);
	}
	_spinunlock(&(queue->lock));
_func_exit_;	
	return obj;
}
Exemplo n.º 23
0
/* always increment count */
int
_sem_post(sem_t sem)
{
	void *ident = (void *)&sem->waitcount;
	int rv = 0;

	if (sem->shared)
		ident = SHARED_IDENT;

	_spinlock(&sem->lock);
	sem->value++;
	if (sem->waitcount) {
		__thrwakeup(ident, 1);
		rv = 1;
	}
	_spinunlock(&sem->lock);
	return (rv);
}
Exemplo n.º 24
0
int
pthread_detach(pthread_t thread)
{
	int rc = 0;

	_spinlock(&thread->flags_lock);
	if (thread->flags & THREAD_DETACHED) {
		rc = EINVAL;
		_spinunlock(&thread->flags_lock);
	} else if (thread->flags & THREAD_DONE) {
		_spinunlock(&thread->flags_lock);
		_rthread_free(thread);
	} else {
		thread->flags |= THREAD_DETACHED;
		_spinunlock(&thread->flags_lock);
	}
	_rthread_reaper();
	return (rc);
}
Exemplo n.º 25
0
static void *
_thread_tag_lookup(struct thread_tag *tag, int size)
{
	struct thread_tag *t;
	void *p;

	_spinlock(&tag->l);
	for(t = thread_tag_store; t != nil; t = t->next)
		if(t->key == tag->key)
			break;
	if(t == nil) {
		p = malloc(size);
		if(p == nil) {
			_spinunlock(&tag->l);
			return nil;
		}
		_thread_tag_insert(tag, p);
	}
	_spinunlock(&tag->l);
	return tag->data;
}
Exemplo n.º 26
0
void
(funlockfile)(FILE * fp)
{
    int	idx = file_idx(fp);
    struct	file_lock	*p;

    /* Lock the hash table: */
    _spinlock(&hash_lock);

    /*
     * Get a pointer to the lock for the file and check that
     * the running thread is the one with the lock:
     */
    if ((p = find_lock(idx, fp)) != NULL && p->owner == pthread_self()) {
        /*
         * Check if this thread has locked the FILE
         * more than once:
         */
        if (--p->count == 0) {
            /* Get the new owner of the lock: */
            if ((p->owner = TAILQ_FIRST(&p->lockers)) != NULL) {
                /* Pop the thread off the queue: */
                TAILQ_REMOVE(&p->lockers,p->owner,waiting);

                /*
                 * This is the first lock for the new
                 * owner:
                 */
                p->count = 1;

                __thrwakeup(p->owner, 1);
            }
        }
    }

    /* Unlock the hash table: */
    _spinunlock(&hash_lock);
}
Exemplo n.º 27
0
void
pthread_exit(void *retval)
{
	struct rthread_cleanup_fn *clfn;
	pid_t tid;
	struct stack *stack;
	pthread_t thread = pthread_self();

	thread->retval = retval;
	
	for (clfn = thread->cleanup_fns; clfn; ) {
		struct rthread_cleanup_fn *oclfn = clfn;
		clfn = clfn->next;
		oclfn->fn(oclfn->arg);
		free(oclfn);
	}
	_rthread_tls_destructors(thread);
	_spinlock(&_thread_lock);
	LIST_REMOVE(thread, threads);
	_spinunlock(&_thread_lock);

	_sem_post(&thread->donesem);

	stack = thread->stack;
	tid = thread->tid;
	if (thread->flags & THREAD_DETACHED)
		_rthread_free(thread);
	else
		_rthread_setflag(thread, THREAD_DONE);

	if (tid != _initial_thread.tid)
		_rthread_add_to_reaper(tid, stack);

	_rthread_reaper();
	threxit(0);
	for(;;);
}
Exemplo n.º 28
0
static int
_rthread_mutex_lock(pthread_mutex_t *mutexp, int trywait,
    const struct timespec *abstime)
{
	struct pthread_mutex *mutex;
	pthread_t self = pthread_self();
	int ret = 0;

	/*
	 * If the mutex is statically initialized, perform the dynamic
	 * initialization. Note: _thread_mutex_lock() in libc requires
	 * _rthread_mutex_lock() to perform the mutex init when *mutexp
	 * is NULL.
	 */
	if (*mutexp == NULL) {
		_spinlock(&static_init_lock);
		if (*mutexp == NULL)
			ret = pthread_mutex_init(mutexp, NULL);
		_spinunlock(&static_init_lock);
		if (ret != 0)
			return (EINVAL);
	}
	mutex = (struct pthread_mutex *)*mutexp;

	_rthread_debug(5, "%p: mutex_lock %p\n", (void *)self, (void *)mutex);
	_spinlock(&mutex->lock);
	if (mutex->owner == NULL && TAILQ_EMPTY(&mutex->lockers)) {
		assert(mutex->count == 0);
		mutex->owner = self;
	} else if (mutex->owner == self) {
		assert(mutex->count > 0);

		/* already owner?  handle recursive behavior */
		if (mutex->type != PTHREAD_MUTEX_RECURSIVE)
		{
			if (trywait ||
			    mutex->type == PTHREAD_MUTEX_ERRORCHECK) {
				_spinunlock(&mutex->lock);
				return (trywait ? EBUSY : EDEADLK);
			}

			/* self-deadlock is disallowed by strict */
			if (mutex->type == PTHREAD_MUTEX_STRICT_NP &&
			    abstime == NULL)
				abort();

			/* self-deadlock, possibly until timeout */
			while (__thrsleep(self, CLOCK_REALTIME |
			    _USING_TICKETS, abstime,
			    &mutex->lock.ticket, NULL) != EWOULDBLOCK)
				_spinlock(&mutex->lock);
			return (ETIMEDOUT);
		}
		if (mutex->count == INT_MAX) {
			_spinunlock(&mutex->lock);
			return (EAGAIN);
		}
	} else if (trywait) {
		/* try failed */
		_spinunlock(&mutex->lock);
		return (EBUSY);
	} else {
		/* add to the wait queue and block until at the head */
		TAILQ_INSERT_TAIL(&mutex->lockers, self, waiting);
		while (mutex->owner != self) {
			ret = __thrsleep(self, CLOCK_REALTIME | _USING_TICKETS,
			    abstime, &mutex->lock.ticket, NULL);
			_spinlock(&mutex->lock);
			assert(mutex->owner != NULL);
			if (ret == EWOULDBLOCK) {
				if (mutex->owner == self)
					break;
				TAILQ_REMOVE(&mutex->lockers, self, waiting);
				_spinunlock(&mutex->lock);
				return (ETIMEDOUT);
			}
		}
	}

	mutex->count++;
	_spinunlock(&mutex->lock);

	return (0);
}
Exemplo n.º 29
0
int
pthread_cond_broadcast(pthread_cond_t *condp)
{
	pthread_cond_t cond;
	struct pthread_mutex *mutex;
	pthread_t thread;
	pthread_t p;
	int wakeup;

	/* uninitialized?  Then there's obviously no one waiting! */
	if (!*condp)
		return 0;

	cond = *condp;
	_rthread_debug(5, "%p: cond_broadcast %p,%p\n", (void *)pthread_self(),
	    (void *)cond, (void *)cond->mutex);
	_spinlock(&cond->lock);
	thread = TAILQ_FIRST(&cond->waiters);
	if (thread == NULL) {
		assert(cond->mutex == NULL);
		_spinunlock(&cond->lock);
		return (0);
	}

	mutex = cond->mutex;
	assert(mutex != NULL);

	/* walk the list, clearing the "blocked on condvar" pointer */
	p = thread;
	do
		p->blocking_cond = NULL;
	while ((p = TAILQ_NEXT(p, waiting)) != NULL);

	/*
	 * We want to transfer all the threads from the condvar's list
	 * to the mutex's list.  The TAILQ_* macros don't let us do that
	 * efficiently, so this is direct list surgery.  Pay attention!
	 */

	/* 1) attach the first thread to the end of the mutex's list */
	_spinlock(&mutex->lock);
	wakeup = mutex->owner == NULL && TAILQ_EMPTY(&mutex->lockers);
	thread->waiting.tqe_prev = mutex->lockers.tqh_last;
	*(mutex->lockers.tqh_last) = thread;

	/* 2) fix up the end pointer for the mutex's list */
	mutex->lockers.tqh_last = cond->waiters.tqh_last;

	if (wakeup) {
		TAILQ_REMOVE(&mutex->lockers, thread, waiting);
		mutex->owner = thread;
		_spinunlock(&mutex->lock);
		__thrwakeup(thread, 1);
	} else
		_spinunlock(&mutex->lock);

	/* 3) reset the condvar's list and mutex pointer */
	TAILQ_INIT(&cond->waiters);
	assert(cond->mutex != NULL);
	cond->mutex = NULL;
	_spinunlock(&cond->lock);

	return (0);
}
Exemplo n.º 30
0
int
pthread_cond_wait(pthread_cond_t *condp, pthread_mutex_t *mutexp)
{
	pthread_cond_t cond;
	struct pthread_mutex *mutex = (struct pthread_mutex *)*mutexp;
	struct tib *tib = TIB_GET();
	pthread_t self = tib->tib_thread;
	pthread_t next;
	int mutex_count;
	int canceled = 0;
	int error;
	PREP_CANCEL_POINT(tib);

	if (!*condp)
		if ((error = pthread_cond_init(condp, NULL)))
			return (error);
	cond = *condp;
	_rthread_debug(5, "%p: cond_wait %p,%p\n", (void *)self,
	    (void *)cond, (void *)mutex);

	if (mutex == NULL)
#if PTHREAD_MUTEX_DEFAULT == PTHREAD_MUTEX_ERRORCHECK
		return (EPERM);
#else
		abort();
#endif

	if (mutex->owner != self) {
		if (mutex->type == PTHREAD_MUTEX_ERRORCHECK)
			return (EPERM);
		else
			abort();
	}

	ENTER_DELAYED_CANCEL_POINT(tib, self);

	_spinlock(&cond->lock);

	/* mark the condvar as being associated with this mutex */
	if (cond->mutex == NULL) {
		cond->mutex = mutex;
		assert(TAILQ_EMPTY(&cond->waiters));
	} else if (cond->mutex != mutex) {
		assert(cond->mutex == mutex);
		_spinunlock(&cond->lock);
		LEAVE_CANCEL_POINT_INNER(tib, 1);
		return (EINVAL);
	} else
		assert(! TAILQ_EMPTY(&cond->waiters));

	/* snag the count in case this is a recursive mutex */
	mutex_count = mutex->count;

	/* transfer from the mutex queue to the condvar queue */
	_spinlock(&mutex->lock);
	self->blocking_cond = cond;
	TAILQ_INSERT_TAIL(&cond->waiters, self, waiting);
	_spinunlock(&cond->lock);

	/* wake the next guy blocked on the mutex */
	mutex->count = 0;
	mutex->owner = next = TAILQ_FIRST(&mutex->lockers);
	if (next != NULL) {
		TAILQ_REMOVE(&mutex->lockers, next, waiting);
		__thrwakeup(next, 1);
	}

	/* wait until we're the owner of the mutex again */
	while (mutex->owner != self) {
		error = __thrsleep(self, 0 | _USING_TICKETS, NULL,
		    &mutex->lock.ticket, &self->delayed_cancel);

		/*
		 * If we took a normal signal (not from
		 * cancellation) then we should just go back to
		 * sleep without changing state (timeouts, etc).
		 */
		if (error == EINTR && (tib->tib_canceled == 0 ||
		    (tib->tib_cantcancel & CANCEL_DISABLED))) {
			_spinlock(&mutex->lock);
			continue;
		}

		/*
		 * The remaining reasons for waking up (normal
		 * wakeup and cancellation) all mean that we won't
		 * be staying in the condvar queue and we'll no
		 * longer be cancelable.
		 */
		LEAVE_CANCEL_POINT_INNER(tib, 0);

		/*
		 * If we're no longer in the condvar's queue then
		 * we're just waiting for mutex ownership.  Need
		 * cond->lock here to prevent race with cond_signal().
		 */
		_spinlock(&cond->lock);
		if (self->blocking_cond == NULL) {
			_spinunlock(&cond->lock);
			_spinlock(&mutex->lock);
			continue;
		}
		assert(self->blocking_cond == cond);

		/* if canceled, make note of that */
		if (error == EINTR)
			canceled = 1;

		/* transfer between the queues */
		TAILQ_REMOVE(&cond->waiters, self, waiting);
		assert(mutex == cond->mutex);
		if (TAILQ_EMPTY(&cond->waiters))
			cond->mutex = NULL;
		self->blocking_cond = NULL;
		_spinunlock(&cond->lock);
		_spinlock(&mutex->lock);

		/* mutex unlocked right now? */
		if (mutex->owner == NULL &&
		    TAILQ_EMPTY(&mutex->lockers)) {
			assert(mutex->count == 0);
			mutex->owner = self;
			break;
		}
		TAILQ_INSERT_TAIL(&mutex->lockers, self, waiting);
	}

	/* restore the mutex's count */
	mutex->count = mutex_count;
	_spinunlock(&mutex->lock);

	LEAVE_CANCEL_POINT_INNER(tib, canceled);

	return (0);
}