Beispiel #1
0
int
__thr_umutex_lock_spin(struct umutex *mtx, uint32_t id)
{
	uint32_t owner;

	if (!_thr_is_smp)
		return __thr_umutex_lock(mtx, id);

	if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0) {
		for (;;) {
			int count = SPINLOOPS;
			while (count--) {
				owner = mtx->m_owner;
				if ((owner & ~UMUTEX_CONTESTED) == 0) {
					if (atomic_cmpset_acq_32(
					    &mtx->m_owner,
					    owner, id|owner)) {
						return (0);
					}
				}
				CPU_SPINWAIT;
			}

			/* wait in kernel */
			_umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0, 0, 0);
		}
	}

	return	_umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0, 0, 0);
}
Beispiel #2
0
int
__thr_umutex_lock(struct umutex *mtx, uint32_t id)
{
	uint32_t owner;

	if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0) {
		for (;;) {
			/* wait in kernel */
			_umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0, 0, 0);

			owner = mtx->m_owner;
			if ((owner & ~UMUTEX_CONTESTED) == 0 &&
			     atomic_cmpset_acq_32(&mtx->m_owner, owner, id|owner))
				return (0);
		}
	}

	return	_umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0, 0, 0);
}
Beispiel #3
0
static int
cond_wait_user(struct pthread_cond *cvp, struct pthread_mutex *mp,
               const struct timespec *abstime, int cancel)
{
    struct pthread	*curthread = _get_curthread();
    struct sleepqueue *sq;
    int	recurse;
    int	error;
    int	defered;

    if (curthread->wchan != NULL)
        PANIC("thread was already on queue.");

    if (cancel)
        _thr_testcancel(curthread);

    _sleepq_lock(cvp);
    /*
     * set __has_user_waiters before unlocking mutex, this allows
     * us to check it without locking in pthread_cond_signal().
     */
    cvp->__has_user_waiters = 1;
    defered = 0;
    (void)_mutex_cv_unlock(mp, &recurse, &defered);
    curthread->mutex_obj = mp;
    _sleepq_add(cvp, curthread);
    for(;;) {
        _thr_clear_wake(curthread);
        _sleepq_unlock(cvp);
        if (defered) {
            defered = 0;
            if ((mp->m_lock.m_owner & UMUTEX_CONTESTED) == 0)
                (void)_umtx_op_err(&mp->m_lock, UMTX_OP_MUTEX_WAKE2,
                                   mp->m_lock.m_flags, 0, 0);
        }
        if (curthread->nwaiter_defer > 0) {
            _thr_wake_all(curthread->defer_waiters,
                          curthread->nwaiter_defer);
            curthread->nwaiter_defer = 0;
        }

        if (cancel) {
            _thr_cancel_enter2(curthread, 0);
            error = _thr_sleep(curthread, cvp->__clock_id, abstime);
            _thr_cancel_leave(curthread, 0);
        } else {
            error = _thr_sleep(curthread, cvp->__clock_id, abstime);
        }

        _sleepq_lock(cvp);
        if (curthread->wchan == NULL) {
            error = 0;
            break;
        } else if (cancel && SHOULD_CANCEL(curthread)) {
            sq = _sleepq_lookup(cvp);
            cvp->__has_user_waiters =
                _sleepq_remove(sq, curthread);
            _sleepq_unlock(cvp);
            curthread->mutex_obj = NULL;
            _mutex_cv_lock(mp, recurse);
            if (!THR_IN_CRITICAL(curthread))
                _pthread_exit(PTHREAD_CANCELED);
            else /* this should not happen */
                return (0);
        } else if (error == ETIMEDOUT) {
            sq = _sleepq_lookup(cvp);
            cvp->__has_user_waiters =
                _sleepq_remove(sq, curthread);
            break;
        }
    }
    _sleepq_unlock(cvp);
    curthread->mutex_obj = NULL;
    _mutex_cv_lock(mp, recurse);
    return (error);
}