Exemple #1
0
int
sx_try_xlock_(struct sx *sx, const char *file, int line)
{
	int rval;

	if (SCHEDULER_STOPPED())
		return (1);

	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
	    ("sx_try_xlock() by idle thread %p on sx %s @ %s:%d",
	    curthread, sx->lock_object.lo_name, file, line));
	KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
	    ("sx_try_xlock() of destroyed sx @ %s:%d", file, line));

	if (sx_xlocked(sx) &&
	    (sx->lock_object.lo_flags & LO_RECURSABLE) != 0) {
		sx->sx_recurse++;
		atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
		rval = 1;
	} else
		rval = atomic_cmpset_acq_ptr(&sx->sx_lock, SX_LOCK_UNLOCKED,
		    (uintptr_t)curthread);
	LOCK_LOG_TRY("XLOCK", &sx->lock_object, 0, rval, file, line);
	if (rval) {
		WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
		    file, line);
		if (!sx_recursed(sx))
			LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire,
			    sx, 0, 0, file, line, LOCKSTAT_WRITER);
		curthread->td_locks++;
	}

	return (rval);
}
Exemple #2
0
int
__rw_try_wlock(volatile uintptr_t *c, const char *file, int line)
{
	struct rwlock *rw;
	int rval;

	if (SCHEDULER_STOPPED())
		return (1);

	rw = rwlock2rw(c);

	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
	    ("rw_try_wlock() by idle thread %p on rwlock %s @ %s:%d",
	    curthread, rw->lock_object.lo_name, file, line));
	KASSERT(rw->rw_lock != RW_DESTROYED,
	    ("rw_try_wlock() of destroyed rwlock @ %s:%d", file, line));

	if (rw_wlocked(rw) &&
	    (rw->lock_object.lo_flags & LO_RECURSABLE) != 0) {
		rw->rw_recurse++;
		rval = 1;
	} else
		rval = atomic_cmpset_acq_ptr(&rw->rw_lock, RW_UNLOCKED,
		    (uintptr_t)curthread);

	LOCK_LOG_TRY("WLOCK", &rw->lock_object, 0, rval, file, line);
	if (rval) {
		WITNESS_LOCK(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
		    file, line);
		curthread->td_locks++;
	}
	return (rval);
}
Exemple #3
0
int
sx_try_slock_(struct sx *sx, const char *file, int line)
{
	uintptr_t x;

	if (SCHEDULER_STOPPED())
		return (1);

	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
	    ("sx_try_slock() by idle thread %p on sx %s @ %s:%d",
	    curthread, sx->lock_object.lo_name, file, line));

	for (;;) {
		x = sx->sx_lock;
		KASSERT(x != SX_LOCK_DESTROYED,
		    ("sx_try_slock() of destroyed sx @ %s:%d", file, line));
		if (!(x & SX_LOCK_SHARED))
			break;
		if (atomic_cmpset_acq_ptr(&sx->sx_lock, x, x + SX_ONE_SHARER)) {
			LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 1, file, line);
			WITNESS_LOCK(&sx->lock_object, LOP_TRYLOCK, file, line);
			LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire,
			    sx, 0, 0, file, line, LOCKSTAT_READER);
			curthread->td_locks++;
			return (1);
		}
	}

	LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 0, file, line);
	return (0);
}
Exemple #4
0
int
__rw_try_rlock(volatile uintptr_t *c, const char *file, int line)
{
	struct rwlock *rw;
	uintptr_t x;

	if (SCHEDULER_STOPPED())
		return (1);

	rw = rwlock2rw(c);

	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
	    ("rw_try_rlock() by idle thread %p on rwlock %s @ %s:%d",
	    curthread, rw->lock_object.lo_name, file, line));

	for (;;) {
		x = rw->rw_lock;
		KASSERT(rw->rw_lock != RW_DESTROYED,
		    ("rw_try_rlock() of destroyed rwlock @ %s:%d", file, line));
		if (!(x & RW_LOCK_READ))
			break;
		if (atomic_cmpset_acq_ptr(&rw->rw_lock, x, x + RW_ONE_READER)) {
			LOCK_LOG_TRY("RLOCK", &rw->lock_object, 0, 1, file,
			    line);
			WITNESS_LOCK(&rw->lock_object, LOP_TRYLOCK, file, line);
			curthread->td_locks++;
			curthread->td_rw_rlocks++;
			return (1);
		}
	}

	LOCK_LOG_TRY("RLOCK", &rw->lock_object, 0, 0, file, line);
	return (0);
}
Exemple #5
0
/*
 * Function versions of the inlined __mtx_* macros.  These are used by
 * modules and can also be called from assembly language if needed.
 */
void
__mtx_lock_flags(volatile uintptr_t *c, int opts, const char *file, int line)
{
	struct mtx *m;

	if (SCHEDULER_STOPPED())
		return;

	m = mtxlock2mtx(c);

	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
	    ("mtx_lock() by idle thread %p on sleep mutex %s @ %s:%d",
	    curthread, m->lock_object.lo_name, file, line));
	KASSERT(m->mtx_lock != MTX_DESTROYED,
	    ("mtx_lock() of destroyed mutex @ %s:%d", file, line));
	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
	    ("mtx_lock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
	    file, line));
	WITNESS_CHECKORDER(&m->lock_object, (opts & ~MTX_RECURSE) |
	    LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL);

	__mtx_lock(m, curthread, opts, file, line);
	LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
	    line);
	WITNESS_LOCK(&m->lock_object, (opts & ~MTX_RECURSE) | LOP_EXCLUSIVE,
	    file, line);
	TD_LOCKS_INC(curthread);
}
Exemple #6
0
/*
 * The important part of mtx_trylock{,_flags}()
 * Tries to acquire lock `m.'  If this function is called on a mutex that
 * is already owned, it will recursively acquire the lock.
 */
int
_mtx_trylock_flags_int(struct mtx *m, int opts LOCK_FILE_LINE_ARG_DEF)
{
	struct thread *td;
	uintptr_t tid, v;
#ifdef LOCK_PROFILING
	uint64_t waittime = 0;
	int contested = 0;
#endif
	int rval;
	bool recursed;

	td = curthread;
	tid = (uintptr_t)td;
	if (SCHEDULER_STOPPED_TD(td))
		return (1);

	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(td),
	    ("mtx_trylock() by idle thread %p on sleep mutex %s @ %s:%d",
	    curthread, m->lock_object.lo_name, file, line));
	KASSERT(m->mtx_lock != MTX_DESTROYED,
	    ("mtx_trylock() of destroyed mutex @ %s:%d", file, line));
	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
	    ("mtx_trylock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
	    file, line));

	rval = 1;
	recursed = false;
	v = MTX_UNOWNED;
	for (;;) {
		if (_mtx_obtain_lock_fetch(m, &v, tid))
			break;
		if (v == MTX_UNOWNED)
			continue;
		if (v == tid &&
		    ((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||
		    (opts & MTX_RECURSE) != 0)) {
			m->mtx_recurse++;
			atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
			recursed = true;
			break;
		}
		rval = 0;
		break;
	}

	opts &= ~MTX_RECURSE;

	LOCK_LOG_TRY("LOCK", &m->lock_object, opts, rval, file, line);
	if (rval) {
		WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK,
		    file, line);
		TD_LOCKS_INC(curthread);
		if (!recursed)
			LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire,
			    m, contested, waittime, file, line);
	}

	return (rval);
}
Exemple #7
0
/*
 * Function versions of the inlined __mtx_* macros.  These are used by
 * modules and can also be called from assembly language if needed.
 */
void
__mtx_lock_flags(volatile uintptr_t *c, int opts, const char *file, int line)
{
	struct mtx *m;
	uintptr_t tid, v;

	m = mtxlock2mtx(c);

	KASSERT(kdb_active != 0 || SCHEDULER_STOPPED() ||
	    !TD_IS_IDLETHREAD(curthread),
	    ("mtx_lock() by idle thread %p on sleep mutex %s @ %s:%d",
	    curthread, m->lock_object.lo_name, file, line));
	KASSERT(m->mtx_lock != MTX_DESTROYED,
	    ("mtx_lock() of destroyed mutex @ %s:%d", file, line));
	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
	    ("mtx_lock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
	    file, line));
	WITNESS_CHECKORDER(&m->lock_object, (opts & ~MTX_RECURSE) |
	    LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL);

	tid = (uintptr_t)curthread;
	v = MTX_UNOWNED;
	if (!_mtx_obtain_lock_fetch(m, &v, tid))
		_mtx_lock_sleep(m, v, opts, file, line);
	else
		LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire,
		    m, 0, 0, file, line);
	LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
	    line);
	WITNESS_LOCK(&m->lock_object, (opts & ~MTX_RECURSE) | LOP_EXCLUSIVE,
	    file, line);
	TD_LOCKS_INC(curthread);
}
Exemple #8
0
void
_rm_wlock_debug(struct rmlock *rm, const char *file, int line)
{

	if (SCHEDULER_STOPPED())
		return;

	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
	    ("rm_wlock() by idle thread %p on rmlock %s @ %s:%d",
	    curthread, rm->lock_object.lo_name, file, line));
	KASSERT(!rm_destroyed(rm),
	    ("rm_wlock() of destroyed rmlock @ %s:%d", file, line));
	_rm_assert(rm, RA_UNLOCKED, file, line);

	WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE,
	    file, line, NULL);

	_rm_wlock(rm);

	LOCK_LOG_LOCK("RMWLOCK", &rm->lock_object, 0, 0, file, line);

	WITNESS_LOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line);

	curthread->td_locks++;

}
Exemple #9
0
int
_rm_rlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
    int trylock, const char *file, int line)
{

	if (SCHEDULER_STOPPED())
		return (1);

#ifdef INVARIANTS
	if (!(rm->lock_object.lo_flags & LO_RECURSABLE) && !trylock) {
		critical_enter();
		KASSERT(rm_trackers_present(pcpu_find(curcpu), rm,
		    curthread) == 0,
		    ("rm_rlock: recursed on non-recursive rmlock %s @ %s:%d\n",
		    rm->lock_object.lo_name, file, line));
		critical_exit();
	}
#endif
	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
	    ("rm_rlock() by idle thread %p on rmlock %s @ %s:%d",
	    curthread, rm->lock_object.lo_name, file, line));
	KASSERT(!rm_destroyed(rm),
	    ("rm_rlock() of destroyed rmlock @ %s:%d", file, line));
	if (!trylock) {
		KASSERT(!rm_wowned(rm),
		    ("rm_rlock: wlock already held for %s @ %s:%d",
		    rm->lock_object.lo_name, file, line));
		WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER, file, line,
		    NULL);
	}

	if (_rm_rlock(rm, tracker, trylock)) {
		if (trylock)
			LOCK_LOG_TRY("RMRLOCK", &rm->lock_object, 0, 1, file,
			    line);
		else
			LOCK_LOG_LOCK("RMRLOCK", &rm->lock_object, 0, 0, file,
			    line);
		WITNESS_LOCK(&rm->lock_object, 0, file, line);

		curthread->td_locks++;

		return (1);
	} else if (trylock)
		LOCK_LOG_TRY("RMRLOCK", &rm->lock_object, 0, 0, file, line);

	return (0);
}
Exemple #10
0
/*
 * The important part of mtx_trylock{,_flags}()
 * Tries to acquire lock `m.'  If this function is called on a mutex that
 * is already owned, it will recursively acquire the lock.
 */
int
_mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file, int line)
{
	struct mtx *m;
#ifdef LOCK_PROFILING
	uint64_t waittime = 0;
	int contested = 0;
#endif
	int rval;

	if (SCHEDULER_STOPPED())
		return (1);

	m = mtxlock2mtx(c);

	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
	    ("mtx_trylock() by idle thread %p on sleep mutex %s @ %s:%d",
	    curthread, m->lock_object.lo_name, file, line));
	KASSERT(m->mtx_lock != MTX_DESTROYED,
	    ("mtx_trylock() of destroyed mutex @ %s:%d", file, line));
	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
	    ("mtx_trylock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
	    file, line));

	if (mtx_owned(m) && ((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||
	    (opts & MTX_RECURSE) != 0)) {
		m->mtx_recurse++;
		atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
		rval = 1;
	} else
		rval = _mtx_obtain_lock(m, (uintptr_t)curthread);
	opts &= ~MTX_RECURSE;

	LOCK_LOG_TRY("LOCK", &m->lock_object, opts, rval, file, line);
	if (rval) {
		WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK,
		    file, line);
		TD_LOCKS_INC(curthread);
		if (m->mtx_recurse == 0)
			LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire,
			    m, contested, waittime, file, line);

	}

	return (rval);
}
Exemple #11
0
void
_rw_wlock(struct rwlock *rw, const char *file, int line)
{

	if (SCHEDULER_STOPPED())
		return;
	KASSERT(!TD_IS_IDLETHREAD(curthread),
	    ("rw_wlock() by idle thread %p on rwlock %s @ %s:%d",
	    curthread, rw->lock_object.lo_name, file, line));
	KASSERT(rw->rw_lock != RW_DESTROYED,
	    ("rw_wlock() of destroyed rwlock @ %s:%d", file, line));
	WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
	    line, NULL);
	__rw_wlock(rw, curthread, file, line);
	LOCK_LOG_LOCK("WLOCK", &rw->lock_object, 0, rw->rw_recurse, file, line);
	WITNESS_LOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line);
	curthread->td_locks++;
}
Exemple #12
0
int
_sx_slock(struct sx *sx, int opts, const char *file, int line)
{
	int error = 0;

	if (SCHEDULER_STOPPED())
		return (0);
	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
	    ("sx_slock() by idle thread %p on sx %s @ %s:%d",
	    curthread, sx->lock_object.lo_name, file, line));
	KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
	    ("sx_slock() of destroyed sx @ %s:%d", file, line));
	WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER, file, line, NULL);
	error = __sx_slock(sx, opts, file, line);
	if (!error) {
		LOCK_LOG_LOCK("SLOCK", &sx->lock_object, 0, 0, file, line);
		WITNESS_LOCK(&sx->lock_object, 0, file, line);
		curthread->td_locks++;
	}

	return (error);
}
Exemple #13
0
void
__rw_rlock(volatile uintptr_t *c, const char *file, int line)
{
	struct rwlock *rw;
	struct turnstile *ts;
#ifdef ADAPTIVE_RWLOCKS
	volatile struct thread *owner;
	int spintries = 0;
	int i;
#endif
#ifdef LOCK_PROFILING
	uint64_t waittime = 0;
	int contested = 0;
#endif
	uintptr_t v;
#ifdef KDTRACE_HOOKS
	uintptr_t state;
	uint64_t spin_cnt = 0;
	uint64_t sleep_cnt = 0;
	int64_t sleep_time = 0;
	int64_t all_time = 0;
#endif

	if (SCHEDULER_STOPPED())
		return;

	rw = rwlock2rw(c);

	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
	    ("rw_rlock() by idle thread %p on rwlock %s @ %s:%d",
	    curthread, rw->lock_object.lo_name, file, line));
	KASSERT(rw->rw_lock != RW_DESTROYED,
	    ("rw_rlock() of destroyed rwlock @ %s:%d", file, line));
	KASSERT(rw_wowner(rw) != curthread,
	    ("rw_rlock: wlock already held for %s @ %s:%d",
	    rw->lock_object.lo_name, file, line));
	WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER, file, line, NULL);

#ifdef KDTRACE_HOOKS
	all_time -= lockstat_nsecs(&rw->lock_object);
	state = rw->rw_lock;
#endif
	for (;;) {
		/*
		 * Handle the easy case.  If no other thread has a write
		 * lock, then try to bump up the count of read locks.  Note
		 * that we have to preserve the current state of the
		 * RW_LOCK_WRITE_WAITERS flag.  If we fail to acquire a
		 * read lock, then rw_lock must have changed, so restart
		 * the loop.  Note that this handles the case of a
		 * completely unlocked rwlock since such a lock is encoded
		 * as a read lock with no waiters.
		 */
		v = rw->rw_lock;
		if (RW_CAN_READ(v)) {
			/*
			 * The RW_LOCK_READ_WAITERS flag should only be set
			 * if the lock has been unlocked and write waiters
			 * were present.
			 */
			if (atomic_cmpset_acq_ptr(&rw->rw_lock, v,
			    v + RW_ONE_READER)) {
				if (LOCK_LOG_TEST(&rw->lock_object, 0))
					CTR4(KTR_LOCK,
					    "%s: %p succeed %p -> %p", __func__,
					    rw, (void *)v,
					    (void *)(v + RW_ONE_READER));
				break;
			}
			continue;
		}
#ifdef KDTRACE_HOOKS
		spin_cnt++;
#endif
#ifdef HWPMC_HOOKS
		PMC_SOFT_CALL( , , lock, failed);
#endif
		lock_profile_obtain_lock_failed(&rw->lock_object,
		    &contested, &waittime);

#ifdef ADAPTIVE_RWLOCKS
		/*
		 * If the owner is running on another CPU, spin until
		 * the owner stops running or the state of the lock
		 * changes.
		 */
		if ((v & RW_LOCK_READ) == 0) {
			owner = (struct thread *)RW_OWNER(v);
			if (TD_IS_RUNNING(owner)) {
				if (LOCK_LOG_TEST(&rw->lock_object, 0))
					CTR3(KTR_LOCK,
					    "%s: spinning on %p held by %p",
					    __func__, rw, owner);
				KTR_STATE1(KTR_SCHED, "thread",
				    sched_tdname(curthread), "spinning",
				    "lockname:\"%s\"", rw->lock_object.lo_name);
				while ((struct thread*)RW_OWNER(rw->rw_lock) ==
				    owner && TD_IS_RUNNING(owner)) {
					cpu_spinwait();
#ifdef KDTRACE_HOOKS
					spin_cnt++;
#endif
				}
				KTR_STATE0(KTR_SCHED, "thread",
				    sched_tdname(curthread), "running");
				continue;
			}
		} else if (spintries < rowner_retries) {
			spintries++;
			KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread),
			    "spinning", "lockname:\"%s\"",
			    rw->lock_object.lo_name);
			for (i = 0; i < rowner_loops; i++) {
				v = rw->rw_lock;
				if ((v & RW_LOCK_READ) == 0 || RW_CAN_READ(v))
					break;
				cpu_spinwait();
			}
#ifdef KDTRACE_HOOKS
			spin_cnt += rowner_loops - i;
#endif
			KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread),
			    "running");
			if (i != rowner_loops)
				continue;
		}
#endif

		/*
		 * Okay, now it's the hard case.  Some other thread already
		 * has a write lock or there are write waiters present,
		 * acquire the turnstile lock so we can begin the process
		 * of blocking.
		 */
		ts = turnstile_trywait(&rw->lock_object);

		/*
		 * The lock might have been released while we spun, so
		 * recheck its state and restart the loop if needed.
		 */
		v = rw->rw_lock;
		if (RW_CAN_READ(v)) {
			turnstile_cancel(ts);
			continue;
		}

#ifdef ADAPTIVE_RWLOCKS
		/*
		 * The current lock owner might have started executing
		 * on another CPU (or the lock could have changed
		 * owners) while we were waiting on the turnstile
		 * chain lock.  If so, drop the turnstile lock and try
		 * again.
		 */
		if ((v & RW_LOCK_READ) == 0) {
			owner = (struct thread *)RW_OWNER(v);
			if (TD_IS_RUNNING(owner)) {
				turnstile_cancel(ts);
				continue;
			}
		}
#endif

		/*
		 * The lock is held in write mode or it already has waiters.
		 */
		MPASS(!RW_CAN_READ(v));

		/*
		 * If the RW_LOCK_READ_WAITERS flag is already set, then
		 * we can go ahead and block.  If it is not set then try
		 * to set it.  If we fail to set it drop the turnstile
		 * lock and restart the loop.
		 */
		if (!(v & RW_LOCK_READ_WAITERS)) {
			if (!atomic_cmpset_ptr(&rw->rw_lock, v,
			    v | RW_LOCK_READ_WAITERS)) {
				turnstile_cancel(ts);
				continue;
			}
			if (LOCK_LOG_TEST(&rw->lock_object, 0))
				CTR2(KTR_LOCK, "%s: %p set read waiters flag",
				    __func__, rw);
		}

		/*
		 * We were unable to acquire the lock and the read waiters
		 * flag is set, so we must block on the turnstile.
		 */
		if (LOCK_LOG_TEST(&rw->lock_object, 0))
			CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__,
			    rw);
#ifdef KDTRACE_HOOKS
		sleep_time -= lockstat_nsecs(&rw->lock_object);
#endif
		turnstile_wait(ts, rw_owner(rw), TS_SHARED_QUEUE);
#ifdef KDTRACE_HOOKS
		sleep_time += lockstat_nsecs(&rw->lock_object);
		sleep_cnt++;
#endif
		if (LOCK_LOG_TEST(&rw->lock_object, 0))
			CTR2(KTR_LOCK, "%s: %p resuming from turnstile",
			    __func__, rw);
	}
#ifdef KDTRACE_HOOKS
	all_time += lockstat_nsecs(&rw->lock_object);
	if (sleep_time)
		LOCKSTAT_RECORD4(rw__block, rw, sleep_time,
		    LOCKSTAT_READER, (state & RW_LOCK_READ) == 0,
		    (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state));

	/* Record only the loops spinning and not sleeping. */
	if (spin_cnt > sleep_cnt)
		LOCKSTAT_RECORD4(rw__spin, rw, all_time - sleep_time,
		    LOCKSTAT_READER, (state & RW_LOCK_READ) == 0,
		    (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state));
#endif
	/*
	 * TODO: acquire "owner of record" here.  Here be turnstile dragons
	 * however.  turnstiles don't like owners changing between calls to
	 * turnstile_wait() currently.
	 */
	LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire, rw, contested,
	    waittime, file, line, LOCKSTAT_READER);
	LOCK_LOG_LOCK("RLOCK", &rw->lock_object, 0, 0, file, line);
	WITNESS_LOCK(&rw->lock_object, 0, file, line);
	curthread->td_locks++;
	curthread->td_rw_rlocks++;
}