Exemplo n.º 1
0
void
_sx_xunlock(struct sx *sx, const char *file, int line)
{

	_sx_assert(sx, SX_XLOCKED, file, line);
	mtx_lock(sx->sx_lock);
	MPASS(sx->sx_cnt == -1);

	WITNESS_UNLOCK(&sx->sx_object, LOP_EXCLUSIVE, file, line);

	/* Release. */
	sx->sx_cnt++;
	sx->sx_xholder = NULL;

	/*
	 * Wake up waiters if there are any.  Give precedence to slock waiters.
	 */
	if (sx->sx_shrd_wcnt > 0)
		cv_broadcast(&sx->sx_shrd_cv);
	else if (sx->sx_excl_wcnt > 0)
		cv_signal(&sx->sx_excl_cv);

	LOCK_LOG_LOCK("XUNLOCK", &sx->sx_object, 0, 0, file, line);

	mtx_unlock(sx->sx_lock);
}
Exemplo n.º 2
0
void
_sx_sunlock(struct sx *sx, const char *file, int line)
{

	_sx_assert(sx, SX_SLOCKED, file, line);
	mtx_lock(sx->sx_lock);

	WITNESS_UNLOCK(&sx->sx_object, 0, file, line);

	/* Release. */
	sx->sx_cnt--;

	/*
	 * If we just released the last shared lock, wake any waiters up, giving
	 * exclusive lockers precedence.  In order to make sure that exclusive
	 * lockers won't be blocked forever, don't wake shared lock waiters if
	 * there are exclusive lock waiters.
	 */
	if (sx->sx_excl_wcnt > 0) {
		if (sx->sx_cnt == 0)
			cv_signal(&sx->sx_excl_cv);
	} else if (sx->sx_shrd_wcnt > 0)
		cv_broadcast(&sx->sx_shrd_cv);

	LOCK_LOG_LOCK("SUNLOCK", &sx->sx_object, 0, 0, file, line);

	mtx_unlock(sx->sx_lock);
}
Exemplo n.º 3
0
void
_sx_sunlock(struct sx *sx, const char *file, int line)
{

	if (SCHEDULER_STOPPED())
		return;
	KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
	    ("sx_sunlock() of destroyed sx @ %s:%d", file, line));
	_sx_assert(sx, SA_SLOCKED, file, line);
	WITNESS_UNLOCK(&sx->lock_object, 0, file, line);
	LOCK_LOG_LOCK("SUNLOCK", &sx->lock_object, 0, 0, file, line);
	__sx_sunlock(sx, file, line);
	curthread->td_locks--;
}
Exemplo n.º 4
0
void
_sx_sunlock(struct sx *sx, const char *file, int line)
{

	MPASS(curthread != NULL);
	KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
	    ("sx_sunlock() of destroyed sx @ %s:%d", file, line));
	_sx_assert(sx, SA_SLOCKED, file, line);
	curthread->td_locks--;
	WITNESS_UNLOCK(&sx->lock_object, 0, file, line);
	LOCK_LOG_LOCK("SUNLOCK", &sx->lock_object, 0, 0, file, line);
	__sx_sunlock(sx, file, line);
	LOCKSTAT_PROFILE_RELEASE_LOCK(LS_SX_SUNLOCK_RELEASE, sx);
}
Exemplo n.º 5
0
void
_rm_wunlock_debug(struct rmlock *rm, const char *file, int line)
{

	if (SCHEDULER_STOPPED())
		return;

	KASSERT(!rm_destroyed(rm),
	    ("rm_wunlock() of destroyed rmlock @ %s:%d", file, line));
	_rm_assert(rm, RA_WLOCKED, file, line);
	WITNESS_UNLOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line);
	LOCK_LOG_LOCK("RMWUNLOCK", &rm->lock_object, 0, 0, file, line);
	_rm_wunlock(rm);
	TD_LOCKS_DEC(curthread);
}
Exemplo n.º 6
0
void
_rm_runlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
    const char *file, int line)
{

	if (SCHEDULER_STOPPED())
		return;

	KASSERT(!rm_destroyed(rm),
	    ("rm_runlock() of destroyed rmlock @ %s:%d", file, line));
	_rm_assert(rm, RA_RLOCKED, file, line);
	WITNESS_UNLOCK(&rm->lock_object, 0, file, line);
	LOCK_LOG_LOCK("RMRUNLOCK", &rm->lock_object, 0, 0, file, line);
	_rm_runlock(rm, tracker);
	TD_LOCKS_DEC(curthread);
}
Exemplo n.º 7
0
void
_sx_xunlock(struct sx *sx, const char *file, int line)
{

	MPASS(curthread != NULL);
	KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
	    ("sx_xunlock() of destroyed sx @ %s:%d", file, line));
	_sx_assert(sx, SA_XLOCKED, file, line);
	curthread->td_locks--;
	WITNESS_UNLOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line);
	LOCK_LOG_LOCK("XUNLOCK", &sx->lock_object, 0, sx->sx_recurse, file,
	    line);
	if (!sx_recursed(sx))
		LOCKSTAT_PROFILE_RELEASE_LOCK(LS_SX_XUNLOCK_RELEASE, sx);
	__sx_xunlock(sx, curthread, file, line);
}
Exemplo n.º 8
0
void
_mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line)
{

	MPASS(curthread != NULL);
	KASSERT(m->mtx_lock != MTX_DESTROYED,
	    ("mtx_unlock_spin() of destroyed mutex @ %s:%d", file, line));
	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
	    ("mtx_unlock_spin() of sleep mutex %s @ %s:%d",
	    m->lock_object.lo_name, file, line));
	WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
	LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file,
	    line);
	mtx_assert(m, MA_OWNED);

	_rel_spin_lock(m);
}
Exemplo n.º 9
0
void
_rw_wunlock(struct rwlock *rw, const char *file, int line)
{

	if (SCHEDULER_STOPPED())
		return;
	KASSERT(rw->rw_lock != RW_DESTROYED,
	    ("rw_wunlock() of destroyed rwlock @ %s:%d", file, line));
	_rw_assert(rw, RA_WLOCKED, file, line);
	curthread->td_locks--;
	WITNESS_UNLOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line);
	LOCK_LOG_LOCK("WUNLOCK", &rw->lock_object, 0, rw->rw_recurse, file,
	    line);
	if (!rw_recursed(rw))
		LOCKSTAT_PROFILE_RELEASE_LOCK(LS_RW_WUNLOCK_RELEASE, rw);
	__rw_wunlock(rw, curthread, file, line);
}
Exemplo n.º 10
0
void
_mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line)
{
	MPASS(curthread != NULL);
	KASSERT(m->mtx_lock != MTX_DESTROYED,
	    ("mtx_unlock() of destroyed mutex @ %s:%d", file, line));
	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
	    ("mtx_unlock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
	    file, line));
	curthread->td_locks--;
	WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
	LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file,
	    line);
	mtx_assert(m, MA_OWNED);

	if (m->mtx_recurse == 0)
		LOCKSTAT_PROFILE_RELEASE_LOCK(LS_MTX_UNLOCK_RELEASE, m);
	_rel_sleep_lock(m, curthread, opts, file, line);
}
Exemplo n.º 11
0
void
_rw_wunlock_cookie(volatile uintptr_t *c, const char *file, int line)
{
	struct rwlock *rw;

	if (SCHEDULER_STOPPED())
		return;

	rw = rwlock2rw(c);

	KASSERT(rw->rw_lock != RW_DESTROYED,
	    ("rw_wunlock() of destroyed rwlock @ %s:%d", file, line));
	__rw_assert(c, RA_WLOCKED, file, line);
	WITNESS_UNLOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line);
	LOCK_LOG_LOCK("WUNLOCK", &rw->lock_object, 0, rw->rw_recurse, file,
	    line);
	__rw_wunlock(rw, curthread, file, line);
	curthread->td_locks--;
}
Exemplo n.º 12
0
void
__mtx_unlock_spin_flags(volatile uintptr_t *c, int opts, const char *file,
    int line)
{
	struct mtx *m;

	m = mtxlock2mtx(c);

	KASSERT(m->mtx_lock != MTX_DESTROYED,
	    ("mtx_unlock_spin() of destroyed mutex @ %s:%d", file, line));
	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
	    ("mtx_unlock_spin() of sleep mutex %s @ %s:%d",
	    m->lock_object.lo_name, file, line));
	WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
	LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file,
	    line);
	mtx_assert(m, MA_OWNED);

	__mtx_unlock_spin(m);
}
Exemplo n.º 13
0
void
__mtx_unlock_flags(volatile uintptr_t *c, int opts, const char *file, int line)
{
	struct mtx *m;

	if (SCHEDULER_STOPPED())
		return;

	m = mtxlock2mtx(c);

	KASSERT(m->mtx_lock != MTX_DESTROYED,
	    ("mtx_unlock() of destroyed mutex @ %s:%d", file, line));
	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
	    ("mtx_unlock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
	    file, line));
	WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
	LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file,
	    line);
	mtx_assert(m, MA_OWNED);

	__mtx_unlock(m, curthread, opts, file, line);
	TD_LOCKS_DEC(curthread);
}
Exemplo n.º 14
0
void
_rw_runlock_cookie(volatile uintptr_t *c, const char *file, int line)
{
	struct rwlock *rw;
	struct turnstile *ts;
	uintptr_t x, v, queue;

	if (SCHEDULER_STOPPED())
		return;

	rw = rwlock2rw(c);

	KASSERT(rw->rw_lock != RW_DESTROYED,
	    ("rw_runlock() of destroyed rwlock @ %s:%d", file, line));
	__rw_assert(c, RA_RLOCKED, file, line);
	WITNESS_UNLOCK(&rw->lock_object, 0, file, line);
	LOCK_LOG_LOCK("RUNLOCK", &rw->lock_object, 0, 0, file, line);

	/* TODO: drop "owner of record" here. */

	for (;;) {
		/*
		 * See if there is more than one read lock held.  If so,
		 * just drop one and return.
		 */
		x = rw->rw_lock;
		if (RW_READERS(x) > 1) {
			if (atomic_cmpset_rel_ptr(&rw->rw_lock, x,
			    x - RW_ONE_READER)) {
				if (LOCK_LOG_TEST(&rw->lock_object, 0))
					CTR4(KTR_LOCK,
					    "%s: %p succeeded %p -> %p",
					    __func__, rw, (void *)x,
					    (void *)(x - RW_ONE_READER));
				break;
			}
			continue;
		}
		/*
		 * If there aren't any waiters for a write lock, then try
		 * to drop it quickly.
		 */
		if (!(x & RW_LOCK_WAITERS)) {
			MPASS((x & ~RW_LOCK_WRITE_SPINNER) ==
			    RW_READERS_LOCK(1));
			if (atomic_cmpset_rel_ptr(&rw->rw_lock, x,
			    RW_UNLOCKED)) {
				if (LOCK_LOG_TEST(&rw->lock_object, 0))
					CTR2(KTR_LOCK, "%s: %p last succeeded",
					    __func__, rw);
				break;
			}
			continue;
		}
		/*
		 * Ok, we know we have waiters and we think we are the
		 * last reader, so grab the turnstile lock.
		 */
		turnstile_chain_lock(&rw->lock_object);
		v = rw->rw_lock & (RW_LOCK_WAITERS | RW_LOCK_WRITE_SPINNER);
		MPASS(v & RW_LOCK_WAITERS);

		/*
		 * Try to drop our lock leaving the lock in a unlocked
		 * state.
		 *
		 * If you wanted to do explicit lock handoff you'd have to
		 * do it here.  You'd also want to use turnstile_signal()
		 * and you'd have to handle the race where a higher
		 * priority thread blocks on the write lock before the
		 * thread you wakeup actually runs and have the new thread
		 * "steal" the lock.  For now it's a lot simpler to just
		 * wakeup all of the waiters.
		 *
		 * As above, if we fail, then another thread might have
		 * acquired a read lock, so drop the turnstile lock and
		 * restart.
		 */
		x = RW_UNLOCKED;
		if (v & RW_LOCK_WRITE_WAITERS) {
			queue = TS_EXCLUSIVE_QUEUE;
			x |= (v & RW_LOCK_READ_WAITERS);
		} else
			queue = TS_SHARED_QUEUE;
		if (!atomic_cmpset_rel_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v,
		    x)) {
			turnstile_chain_unlock(&rw->lock_object);
			continue;
		}
		if (LOCK_LOG_TEST(&rw->lock_object, 0))
			CTR2(KTR_LOCK, "%s: %p last succeeded with waiters",
			    __func__, rw);

		/*
		 * Ok.  The lock is released and all that's left is to
		 * wake up the waiters.  Note that the lock might not be
		 * free anymore, but in that case the writers will just
		 * block again if they run before the new lock holder(s)
		 * release the lock.
		 */
		ts = turnstile_lookup(&rw->lock_object);
		MPASS(ts != NULL);
		turnstile_broadcast(ts, queue);
		turnstile_unpend(ts, TS_SHARED_LOCK);
		turnstile_chain_unlock(&rw->lock_object);
		break;
	}
	LOCKSTAT_PROFILE_RELEASE_RWLOCK(rw__release, rw, LOCKSTAT_READER);
	curthread->td_locks--;
	curthread->td_rw_rlocks--;
}