Beispiel #1
0
int
__rw_try_rlock(volatile uintptr_t *c, const char *file, int line)
{
	struct rwlock *rw;
	uintptr_t x;

	if (SCHEDULER_STOPPED())
		return (1);

	rw = rwlock2rw(c);

	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
	    ("rw_try_rlock() by idle thread %p on rwlock %s @ %s:%d",
	    curthread, rw->lock_object.lo_name, file, line));

	for (;;) {
		x = rw->rw_lock;
		KASSERT(rw->rw_lock != RW_DESTROYED,
		    ("rw_try_rlock() of destroyed rwlock @ %s:%d", file, line));
		if (!(x & RW_LOCK_READ))
			break;
		if (atomic_cmpset_acq_ptr(&rw->rw_lock, x, x + RW_ONE_READER)) {
			LOCK_LOG_TRY("RLOCK", &rw->lock_object, 0, 1, file,
			    line);
			WITNESS_LOCK(&rw->lock_object, LOP_TRYLOCK, file, line);
			curthread->td_locks++;
			curthread->td_rw_rlocks++;
			return (1);
		}
	}

	LOCK_LOG_TRY("RLOCK", &rw->lock_object, 0, 0, file, line);
	return (0);
}
Beispiel #2
0
int
sx_try_slock_(struct sx *sx, const char *file, int line)
{
	uintptr_t x;

	if (SCHEDULER_STOPPED())
		return (1);

	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
	    ("sx_try_slock() by idle thread %p on sx %s @ %s:%d",
	    curthread, sx->lock_object.lo_name, file, line));

	for (;;) {
		x = sx->sx_lock;
		KASSERT(x != SX_LOCK_DESTROYED,
		    ("sx_try_slock() of destroyed sx @ %s:%d", file, line));
		if (!(x & SX_LOCK_SHARED))
			break;
		if (atomic_cmpset_acq_ptr(&sx->sx_lock, x, x + SX_ONE_SHARER)) {
			LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 1, file, line);
			WITNESS_LOCK(&sx->lock_object, LOP_TRYLOCK, file, line);
			LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire,
			    sx, 0, 0, file, line, LOCKSTAT_READER);
			curthread->td_locks++;
			return (1);
		}
	}

	LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 0, file, line);
	return (0);
}
Beispiel #3
0
int
__mtx_trylock_spin_flags(volatile uintptr_t *c, int opts, const char *file,
    int line)
{
	struct mtx *m;

	if (SCHEDULER_STOPPED())
		return (1);

	m = mtxlock2mtx(c);

	KASSERT(m->mtx_lock != MTX_DESTROYED,
	    ("mtx_trylock_spin() of destroyed mutex @ %s:%d", file, line));
	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
	    ("mtx_trylock_spin() of sleep mutex %s @ %s:%d",
	    m->lock_object.lo_name, file, line));
	KASSERT((opts & MTX_RECURSE) == 0,
	    ("mtx_trylock_spin: unsupp. opt MTX_RECURSE on mutex %s @ %s:%d\n",
	    m->lock_object.lo_name, file, line));
	if (__mtx_trylock_spin(m, curthread, opts, file, line)) {
		LOCK_LOG_TRY("LOCK", &m->lock_object, opts, 1, file, line);
		WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
		return (1);
	}
	LOCK_LOG_TRY("LOCK", &m->lock_object, opts, 0, file, line);
	return (0);
}
Beispiel #4
0
int
_rw_try_rlock(struct rwlock *rw, const char *file, int line)
{
	uintptr_t x;

	if (SCHEDULER_STOPPED())
		return (1);

	for (;;) {
		x = rw->rw_lock;
		KASSERT(rw->rw_lock != RW_DESTROYED,
		    ("rw_try_rlock() of destroyed rwlock @ %s:%d", file, line));
		if (!(x & RW_LOCK_READ))
			break;
		if (atomic_cmpset_acq_ptr(&rw->rw_lock, x, x + RW_ONE_READER)) {
			LOCK_LOG_TRY("RLOCK", &rw->lock_object, 0, 1, file,
			    line);
			WITNESS_LOCK(&rw->lock_object, LOP_TRYLOCK, file, line);
			curthread->td_locks++;
			curthread->td_rw_rlocks++;
			return (1);
		}
	}

	LOCK_LOG_TRY("RLOCK", &rw->lock_object, 0, 0, file, line);
	return (0);
}
Beispiel #5
0
int
sx_try_slock_(struct sx *sx, const char *file, int line)
{
    uintptr_t x;

    if (SCHEDULER_STOPPED())
        return (1);

    for (;;) {
        x = sx->sx_lock;
        KASSERT(x != SX_LOCK_DESTROYED,
                ("sx_try_slock() of destroyed sx @ %s:%d", file, line));
        if (!(x & SX_LOCK_SHARED))
            break;
        if (atomic_cmpset_acq_ptr(&sx->sx_lock, x, x + SX_ONE_SHARER)) {
            LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 1, file, line);
            WITNESS_LOCK(&sx->lock_object, LOP_TRYLOCK, file, line);
            curthread->td_locks++;
            return (1);
        }
    }

    LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 0, file, line);
    return (0);
}
Beispiel #6
0
int
_rm_rlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
    int trylock, const char *file, int line)
{

	if (SCHEDULER_STOPPED())
		return (1);

#ifdef INVARIANTS
	if (!(rm->lock_object.lo_flags & LO_RECURSABLE) && !trylock) {
		critical_enter();
		KASSERT(rm_trackers_present(pcpu_find(curcpu), rm,
		    curthread) == 0,
		    ("rm_rlock: recursed on non-recursive rmlock %s @ %s:%d\n",
		    rm->lock_object.lo_name, file, line));
		critical_exit();
	}
#endif
	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
	    ("rm_rlock() by idle thread %p on rmlock %s @ %s:%d",
	    curthread, rm->lock_object.lo_name, file, line));
	KASSERT(!rm_destroyed(rm),
	    ("rm_rlock() of destroyed rmlock @ %s:%d", file, line));
	if (!trylock) {
		KASSERT(!rm_wowned(rm),
		    ("rm_rlock: wlock already held for %s @ %s:%d",
		    rm->lock_object.lo_name, file, line));
		WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER, file, line,
		    NULL);
	}

	if (_rm_rlock(rm, tracker, trylock)) {
		if (trylock)
			LOCK_LOG_TRY("RMRLOCK", &rm->lock_object, 0, 1, file,
			    line);
		else
			LOCK_LOG_LOCK("RMRLOCK", &rm->lock_object, 0, 0, file,
			    line);
		WITNESS_LOCK(&rm->lock_object, 0, file, line);

		curthread->td_locks++;

		return (1);
	} else if (trylock)
		LOCK_LOG_TRY("RMRLOCK", &rm->lock_object, 0, 0, file, line);

	return (0);
}
Beispiel #7
0
/*
 * Try to do a non-blocking upgrade from a shared lock to an exclusive lock.
 * This will only succeed if this thread holds a single shared lock.
 * Return 1 if if the upgrade succeed, 0 otherwise.
 */
int
sx_try_upgrade_(struct sx *sx, const char *file, int line)
{
	uintptr_t x;
	int success;

	if (SCHEDULER_STOPPED())
		return (1);

	KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
	    ("sx_try_upgrade() of destroyed sx @ %s:%d", file, line));
	_sx_assert(sx, SA_SLOCKED, file, line);

	/*
	 * Try to switch from one shared lock to an exclusive lock.  We need
	 * to maintain the SX_LOCK_EXCLUSIVE_WAITERS flag if set so that
	 * we will wake up the exclusive waiters when we drop the lock.
	 */
	x = sx->sx_lock & SX_LOCK_EXCLUSIVE_WAITERS;
	success = atomic_cmpset_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) | x,
	    (uintptr_t)curthread | x);
	LOCK_LOG_TRY("XUPGRADE", &sx->lock_object, 0, success, file, line);
	if (success) {
		WITNESS_UPGRADE(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
		    file, line);
		LOCKSTAT_RECORD0(sx__upgrade, sx);
	}
	return (success);
}
Beispiel #8
0
int
__rw_try_wlock(volatile uintptr_t *c, const char *file, int line)
{
	struct rwlock *rw;
	int rval;

	if (SCHEDULER_STOPPED())
		return (1);

	rw = rwlock2rw(c);

	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
	    ("rw_try_wlock() by idle thread %p on rwlock %s @ %s:%d",
	    curthread, rw->lock_object.lo_name, file, line));
	KASSERT(rw->rw_lock != RW_DESTROYED,
	    ("rw_try_wlock() of destroyed rwlock @ %s:%d", file, line));

	if (rw_wlocked(rw) &&
	    (rw->lock_object.lo_flags & LO_RECURSABLE) != 0) {
		rw->rw_recurse++;
		rval = 1;
	} else
		rval = atomic_cmpset_acq_ptr(&rw->rw_lock, RW_UNLOCKED,
		    (uintptr_t)curthread);

	LOCK_LOG_TRY("WLOCK", &rw->lock_object, 0, rval, file, line);
	if (rval) {
		WITNESS_LOCK(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
		    file, line);
		curthread->td_locks++;
	}
	return (rval);
}
Beispiel #9
0
int
sx_try_xlock_(struct sx *sx, const char *file, int line)
{
	int rval;

	if (SCHEDULER_STOPPED())
		return (1);

	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
	    ("sx_try_xlock() by idle thread %p on sx %s @ %s:%d",
	    curthread, sx->lock_object.lo_name, file, line));
	KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
	    ("sx_try_xlock() of destroyed sx @ %s:%d", file, line));

	if (sx_xlocked(sx) &&
	    (sx->lock_object.lo_flags & LO_RECURSABLE) != 0) {
		sx->sx_recurse++;
		atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
		rval = 1;
	} else
		rval = atomic_cmpset_acq_ptr(&sx->sx_lock, SX_LOCK_UNLOCKED,
		    (uintptr_t)curthread);
	LOCK_LOG_TRY("XLOCK", &sx->lock_object, 0, rval, file, line);
	if (rval) {
		WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
		    file, line);
		if (!sx_recursed(sx))
			LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire,
			    sx, 0, 0, file, line, LOCKSTAT_WRITER);
		curthread->td_locks++;
	}

	return (rval);
}
Beispiel #10
0
int
sx_try_xlock_(struct sx *sx, const char *file, int line)
{
    int rval;

    if (SCHEDULER_STOPPED())
        return (1);

    MPASS(curthread != NULL);
    KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
            ("sx_try_xlock() of destroyed sx @ %s:%d", file, line));

    if (sx_xlocked(sx) &&
            (sx->lock_object.lo_flags & LO_RECURSABLE) != 0) {
        sx->sx_recurse++;
        atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
        rval = 1;
    } else
        rval = atomic_cmpset_acq_ptr(&sx->sx_lock, SX_LOCK_UNLOCKED,
                                     (uintptr_t)curthread);
    LOCK_LOG_TRY("XLOCK", &sx->lock_object, 0, rval, file, line);
    if (rval) {
        WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
                     file, line);
        curthread->td_locks++;
    }

    return (rval);
}
Beispiel #11
0
int
_rw_try_wlock(struct rwlock *rw, const char *file, int line)
{
	int rval;

	if (SCHEDULER_STOPPED())
		return (1);

	KASSERT(rw->rw_lock != RW_DESTROYED,
	    ("rw_try_wlock() of destroyed rwlock @ %s:%d", file, line));

	if (rw_wlocked(rw) &&
	    (rw->lock_object.lo_flags & LO_RECURSABLE) != 0) {
		rw->rw_recurse++;
		rval = 1;
	} else
		rval = atomic_cmpset_acq_ptr(&rw->rw_lock, RW_UNLOCKED,
		    (uintptr_t)curthread);

	LOCK_LOG_TRY("WLOCK", &rw->lock_object, 0, rval, file, line);
	if (rval) {
		WITNESS_LOCK(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
		    file, line);
		curthread->td_locks++;
	}
	return (rval);
}
Beispiel #12
0
/*
 * The important part of mtx_trylock{,_flags}()
 * Tries to acquire lock `m.'  If this function is called on a mutex that
 * is already owned, it will recursively acquire the lock.
 */
int
_mtx_trylock_flags_int(struct mtx *m, int opts LOCK_FILE_LINE_ARG_DEF)
{
	struct thread *td;
	uintptr_t tid, v;
#ifdef LOCK_PROFILING
	uint64_t waittime = 0;
	int contested = 0;
#endif
	int rval;
	bool recursed;

	td = curthread;
	tid = (uintptr_t)td;
	if (SCHEDULER_STOPPED_TD(td))
		return (1);

	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(td),
	    ("mtx_trylock() by idle thread %p on sleep mutex %s @ %s:%d",
	    curthread, m->lock_object.lo_name, file, line));
	KASSERT(m->mtx_lock != MTX_DESTROYED,
	    ("mtx_trylock() of destroyed mutex @ %s:%d", file, line));
	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
	    ("mtx_trylock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
	    file, line));

	rval = 1;
	recursed = false;
	v = MTX_UNOWNED;
	for (;;) {
		if (_mtx_obtain_lock_fetch(m, &v, tid))
			break;
		if (v == MTX_UNOWNED)
			continue;
		if (v == tid &&
		    ((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||
		    (opts & MTX_RECURSE) != 0)) {
			m->mtx_recurse++;
			atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
			recursed = true;
			break;
		}
		rval = 0;
		break;
	}

	opts &= ~MTX_RECURSE;

	LOCK_LOG_TRY("LOCK", &m->lock_object, opts, rval, file, line);
	if (rval) {
		WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK,
		    file, line);
		TD_LOCKS_INC(curthread);
		if (!recursed)
			LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire,
			    m, contested, waittime, file, line);
	}

	return (rval);
}
Beispiel #13
0
int
_sx_try_slock(struct sx *sx, const char *file, int line)
{

	mtx_lock(sx->sx_lock);
	if (sx->sx_cnt >= 0) {
		sx->sx_cnt++;
		LOCK_LOG_TRY("SLOCK", &sx->sx_object, 0, 1, file, line);
		WITNESS_LOCK(&sx->sx_object, LOP_TRYLOCK, file, line);
		mtx_unlock(sx->sx_lock);
		return (1);
	} else {
		LOCK_LOG_TRY("SLOCK", &sx->sx_object, 0, 0, file, line);
		mtx_unlock(sx->sx_lock);
		return (0);
	}
}
Beispiel #14
0
int
_sx_try_xlock(struct sx *sx, const char *file, int line)
{

	mtx_lock(sx->sx_lock);
	if (sx->sx_cnt == 0) {
		sx->sx_cnt--;
		sx->sx_xholder = curthread;
		LOCK_LOG_TRY("XLOCK", &sx->sx_object, 0, 1, file, line);
		WITNESS_LOCK(&sx->sx_object, LOP_EXCLUSIVE | LOP_TRYLOCK, file,
		    line);
		mtx_unlock(sx->sx_lock);
		return (1);
	} else {
		LOCK_LOG_TRY("XLOCK", &sx->sx_object, 0, 0, file, line);
		mtx_unlock(sx->sx_lock);
		return (0);
	}
}
Beispiel #15
0
int
_sx_try_upgrade(struct sx *sx, const char *file, int line)
{

	_sx_assert(sx, SX_SLOCKED, file, line);
	mtx_lock(sx->sx_lock);

	if (sx->sx_cnt == 1) {
		sx->sx_cnt = -1;
		sx->sx_xholder = curthread;

		LOCK_LOG_TRY("XUPGRADE", &sx->sx_object, 0, 1, file, line);
		WITNESS_UPGRADE(&sx->sx_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
		    file, line);

		mtx_unlock(sx->sx_lock);
		return (1);
	} else {
		LOCK_LOG_TRY("XUPGRADE", &sx->sx_object, 0, 0, file, line);
		mtx_unlock(sx->sx_lock);
		return (0);
	}
}
Beispiel #16
0
/*
 * The important part of mtx_trylock{,_flags}()
 * Tries to acquire lock `m.'  If this function is called on a mutex that
 * is already owned, it will recursively acquire the lock.
 */
int
_mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file, int line)
{
	struct mtx *m;
#ifdef LOCK_PROFILING
	uint64_t waittime = 0;
	int contested = 0;
#endif
	int rval;

	if (SCHEDULER_STOPPED())
		return (1);

	m = mtxlock2mtx(c);

	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
	    ("mtx_trylock() by idle thread %p on sleep mutex %s @ %s:%d",
	    curthread, m->lock_object.lo_name, file, line));
	KASSERT(m->mtx_lock != MTX_DESTROYED,
	    ("mtx_trylock() of destroyed mutex @ %s:%d", file, line));
	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
	    ("mtx_trylock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
	    file, line));

	if (mtx_owned(m) && ((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||
	    (opts & MTX_RECURSE) != 0)) {
		m->mtx_recurse++;
		atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
		rval = 1;
	} else
		rval = _mtx_obtain_lock(m, (uintptr_t)curthread);
	opts &= ~MTX_RECURSE;

	LOCK_LOG_TRY("LOCK", &m->lock_object, opts, rval, file, line);
	if (rval) {
		WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK,
		    file, line);
		TD_LOCKS_INC(curthread);
		if (m->mtx_recurse == 0)
			LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire,
			    m, contested, waittime, file, line);

	}

	return (rval);
}
Beispiel #17
0
/*
 * The important part of mtx_trylock{,_flags}()
 * Tries to acquire lock `m.'  If this function is called on a mutex that
 * is already owned, it will recursively acquire the lock.
 */
int
_mtx_trylock(struct mtx *m, int opts, const char *file, int line)
{
#ifdef LOCK_PROFILING
	uint64_t waittime = 0;
	int contested = 0;
#endif
	int rval;

	MPASS(curthread != NULL);
	KASSERT(m->mtx_lock != MTX_DESTROYED,
	    ("mtx_trylock() of destroyed mutex @ %s:%d", file, line));
	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
	    ("mtx_trylock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
	    file, line));

	if (mtx_owned(m) && (m->lock_object.lo_flags & LO_RECURSABLE) != 0) {
		m->mtx_recurse++;
		atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
		rval = 1;
	} else
		rval = _obtain_lock(m, (uintptr_t)curthread);

	LOCK_LOG_TRY("LOCK", &m->lock_object, opts, rval, file, line);
	if (rval) {
		WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK,
		    file, line);
		curthread->td_locks++;
		if (m->mtx_recurse == 0)
			LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_LOCK_ACQUIRE,
			    m, contested, waittime, file, line);

	}

	return (rval);
}
Beispiel #18
0
/*
 * Attempt to do a non-blocking upgrade from a read lock to a write
 * lock.  This will only succeed if this thread holds a single read
 * lock.  Returns true if the upgrade succeeded and false otherwise.
 */
int
__rw_try_upgrade(volatile uintptr_t *c, const char *file, int line)
{
	struct rwlock *rw;
	uintptr_t v, x, tid;
	struct turnstile *ts;
	int success;

	if (SCHEDULER_STOPPED())
		return (1);

	rw = rwlock2rw(c);

	KASSERT(rw->rw_lock != RW_DESTROYED,
	    ("rw_try_upgrade() of destroyed rwlock @ %s:%d", file, line));
	__rw_assert(c, RA_RLOCKED, file, line);

	/*
	 * Attempt to switch from one reader to a writer.  If there
	 * are any write waiters, then we will have to lock the
	 * turnstile first to prevent races with another writer
	 * calling turnstile_wait() before we have claimed this
	 * turnstile.  So, do the simple case of no waiters first.
	 */
	tid = (uintptr_t)curthread;
	success = 0;
	for (;;) {
		v = rw->rw_lock;
		if (RW_READERS(v) > 1)
			break;
		if (!(v & RW_LOCK_WAITERS)) {
			success = atomic_cmpset_ptr(&rw->rw_lock, v, tid);
			if (!success)
				continue;
			break;
		}

		/*
		 * Ok, we think we have waiters, so lock the turnstile.
		 */
		ts = turnstile_trywait(&rw->lock_object);
		v = rw->rw_lock;
		if (RW_READERS(v) > 1) {
			turnstile_cancel(ts);
			break;
		}
		/*
		 * Try to switch from one reader to a writer again.  This time
		 * we honor the current state of the waiters flags.
		 * If we obtain the lock with the flags set, then claim
		 * ownership of the turnstile.
		 */
		x = rw->rw_lock & RW_LOCK_WAITERS;
		success = atomic_cmpset_ptr(&rw->rw_lock, v, tid | x);
		if (success) {
			if (x)
				turnstile_claim(ts);
			else
				turnstile_cancel(ts);
			break;
		}
		turnstile_cancel(ts);
	}
	LOCK_LOG_TRY("WUPGRADE", &rw->lock_object, 0, success, file, line);
	if (success) {
		curthread->td_rw_rlocks--;
		WITNESS_UPGRADE(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
		    file, line);
		LOCKSTAT_RECORD0(rw__upgrade, rw);
	}
	return (success);
}