Exemplo n.º 1
0
static void
_rm_unlock_hard(struct thread *td,struct rm_priotracker *tracker)
{

	if (td->td_owepreempt) {
		td->td_critnest++;
		critical_exit();
	}

	if (!tracker->rmp_flags)
		return;

	mtx_lock_spin(&rm_spinlock);
	LIST_REMOVE(tracker, rmp_qentry);

	if (tracker->rmp_flags & RMPF_SIGNAL) {
		struct rmlock *rm;
		struct turnstile *ts;

		rm = tracker->rmp_rmlock;

		turnstile_chain_lock(&rm->lock_object);
		mtx_unlock_spin(&rm_spinlock);

		ts = turnstile_lookup(&rm->lock_object);

		turnstile_signal(ts, TS_EXCLUSIVE_QUEUE);
		turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
		turnstile_chain_unlock(&rm->lock_object);
	} else
		mtx_unlock_spin(&rm_spinlock);
}
Exemplo n.º 2
0
void
mutex_destroy(kmutex_t *mp)
{
	mutex_impl_t *lp = (mutex_impl_t *)mp;

	if (lp->m_owner == 0 && !MUTEX_HAS_WAITERS(lp)) {
		MUTEX_DESTROY(lp);
	} else if (MUTEX_TYPE_SPIN(lp)) {
		LOCKSTAT_RECORD0(LS_MUTEX_DESTROY_RELEASE, lp);
		MUTEX_DESTROY(lp);
	} else if (MUTEX_TYPE_ADAPTIVE(lp)) {
		LOCKSTAT_RECORD0(LS_MUTEX_DESTROY_RELEASE, lp);
		if (MUTEX_OWNER(lp) != curthread)
			mutex_panic("mutex_destroy: not owner", lp);
		if (MUTEX_HAS_WAITERS(lp)) {
			turnstile_t *ts = turnstile_lookup(lp);
			turnstile_exit(lp);
			if (ts != NULL)
				mutex_panic("mutex_destroy: has waiters", lp);
		}
		MUTEX_DESTROY(lp);
	} else {
		mutex_panic("mutex_destroy: bad mutex", lp);
	}
}
Exemplo n.º 3
0
/*
 * This function is called if the first try at releasing a write lock failed.
 * This means that one of the 2 waiter bits must be set indicating that at
 * least one thread is waiting on this lock.
 */
void
_rw_wunlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line)
{
	struct turnstile *ts;
	uintptr_t v;
	int queue;

	if (SCHEDULER_STOPPED())
		return;

	if (rw_wlocked(rw) && rw_recursed(rw)) {
		rw->rw_recurse--;
		if (LOCK_LOG_TEST(&rw->lock_object, 0))
			CTR2(KTR_LOCK, "%s: %p unrecursing", __func__, rw);
		return;
	}

	KASSERT(rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS),
	    ("%s: neither of the waiter flags are set", __func__));

	if (LOCK_LOG_TEST(&rw->lock_object, 0))
		CTR2(KTR_LOCK, "%s: %p contested", __func__, rw);

	turnstile_chain_lock(&rw->lock_object);
	ts = turnstile_lookup(&rw->lock_object);
	MPASS(ts != NULL);

	/*
	 * Use the same algo as sx locks for now.  Prefer waking up shared
	 * waiters if we have any over writers.  This is probably not ideal.
	 *
	 * 'v' is the value we are going to write back to rw_lock.  If we
	 * have waiters on both queues, we need to preserve the state of
	 * the waiter flag for the queue we don't wake up.  For now this is
	 * hardcoded for the algorithm mentioned above.
	 *
	 * In the case of both readers and writers waiting we wakeup the
	 * readers but leave the RW_LOCK_WRITE_WAITERS flag set.  If a
	 * new writer comes in before a reader it will claim the lock up
	 * above.  There is probably a potential priority inversion in
	 * there that could be worked around either by waking both queues
	 * of waiters or doing some complicated lock handoff gymnastics.
	 */
	v = RW_UNLOCKED;
	if (rw->rw_lock & RW_LOCK_WRITE_WAITERS) {
		queue = TS_EXCLUSIVE_QUEUE;
		v |= (rw->rw_lock & RW_LOCK_READ_WAITERS);
	} else
		queue = TS_SHARED_QUEUE;

	/* Wake up all waiters for the specific queue. */
	if (LOCK_LOG_TEST(&rw->lock_object, 0))
		CTR3(KTR_LOCK, "%s: %p waking up %s waiters", __func__, rw,
		    queue == TS_SHARED_QUEUE ? "read" : "write");
	turnstile_broadcast(ts, queue);
	atomic_store_rel_ptr(&rw->rw_lock, v);
	turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
	turnstile_chain_unlock(&rw->lock_object);
}
Exemplo n.º 4
0
/*
 * mutex_wakeup:
 *
 *	Support routine for mutex_exit() that wakes up all waiters.
 *	We assume that the mutex has been released, but it need not
 *	be.
 */
void
mutex_wakeup(kmutex_t *mtx)
{
	turnstile_t *ts;

	ts = turnstile_lookup(mtx);
	if (ts == NULL) {
		turnstile_exit(mtx);
		return;
	}
	MUTEX_CLEAR_WAITERS(mtx);
	turnstile_wakeup(ts, TS_WRITER_Q, TS_WAITERS(ts, TS_WRITER_Q), NULL);
}
Exemplo n.º 5
0
/*
 * mutex_vector_exit() is called from mutex_exit() if the lock is not
 * adaptive, has waiters, or is not owned by the current thread (panic).
 */
void
mutex_vector_exit(mutex_impl_t *lp)
{
	turnstile_t *ts;

	if (MUTEX_TYPE_SPIN(lp)) {
		lock_clear_splx(&lp->m_spin.m_spinlock, lp->m_spin.m_oldspl);
		return;
	}

	if (MUTEX_OWNER(lp) != curthread) {
		mutex_panic("mutex_exit: not owner", lp);
		return;
	}

	ts = turnstile_lookup(lp);
	MUTEX_CLEAR_LOCK_AND_WAITERS(lp);
	if (ts == NULL)
		turnstile_exit(lp);
	else
		turnstile_wakeup(ts, TS_WRITER_Q, ts->ts_waiters, NULL);
	LOCKSTAT_RECORD0(LS_MUTEX_EXIT_RELEASE, lp);
}
Exemplo n.º 6
0
/*
 * mutex_vector_exit:
 *
 *	Support routine for mutex_exit() that handles all cases.
 */
void
mutex_vector_exit(kmutex_t *mtx)
{
	turnstile_t *ts;
	uintptr_t curthread;

	if (MUTEX_SPIN_P(mtx)) {
#ifdef FULL
		if (__predict_false(!MUTEX_SPINBIT_LOCKED_P(mtx))) {
			if (panicstr != NULL)
				return;
			MUTEX_ABORT(mtx, "exiting unheld spin mutex");
		}
		MUTEX_UNLOCKED(mtx);
		MUTEX_SPINBIT_LOCK_UNLOCK(mtx);
#endif
		MUTEX_SPIN_SPLRESTORE(mtx);
		return;
	}

	if (__predict_false((uintptr_t)panicstr | cold)) {
		MUTEX_UNLOCKED(mtx);
		MUTEX_RELEASE(mtx);
		return;
	}

	curthread = (uintptr_t)curlwp;
	MUTEX_DASSERT(mtx, curthread != 0);
	MUTEX_ASSERT(mtx, MUTEX_OWNER(mtx->mtx_owner) == curthread);
	MUTEX_UNLOCKED(mtx);
#if !defined(LOCKDEBUG)
	__USE(curthread);
#endif

#ifdef LOCKDEBUG
	/*
	 * Avoid having to take the turnstile chain lock every time
	 * around.  Raise the priority level to splhigh() in order
	 * to disable preemption and so make the following atomic.
	 */
	{
		int s = splhigh();
		if (!MUTEX_HAS_WAITERS(mtx)) {
			MUTEX_RELEASE(mtx);
			splx(s);
			return;
		}
		splx(s);
	}
#endif

	/*
	 * Get this lock's turnstile.  This gets the interlock on
	 * the sleep queue.  Once we have that, we can clear the
	 * lock.  If there was no turnstile for the lock, there
	 * were no waiters remaining.
	 */
	ts = turnstile_lookup(mtx);

	if (ts == NULL) {
		MUTEX_RELEASE(mtx);
		turnstile_exit(mtx);
	} else {
		MUTEX_RELEASE(mtx);
		turnstile_wakeup(ts, TS_WRITER_Q,
		    TS_WAITERS(ts, TS_WRITER_Q), NULL);
	}
}
Exemplo n.º 7
0
/*
 * mutex_vector_enter:
 *
 *	Support routine for mutex_enter() that must handle all cases.  In
 *	the LOCKDEBUG case, mutex_enter() is always aliased here, even if
 *	fast-path stubs are available.  If a mutex_spin_enter() stub is
 *	not available, then it is also aliased directly here.
 */
void
mutex_vector_enter(kmutex_t *mtx)
{
	uintptr_t owner, curthread;
	turnstile_t *ts;
#ifdef MULTIPROCESSOR
	u_int count;
#endif
	LOCKSTAT_COUNTER(spincnt);
	LOCKSTAT_COUNTER(slpcnt);
	LOCKSTAT_TIMER(spintime);
	LOCKSTAT_TIMER(slptime);
	LOCKSTAT_FLAG(lsflag);

	/*
	 * Handle spin mutexes.
	 */
	if (MUTEX_SPIN_P(mtx)) {
#if defined(LOCKDEBUG) && defined(MULTIPROCESSOR)
		u_int spins = 0;
#endif
		MUTEX_SPIN_SPLRAISE(mtx);
		MUTEX_WANTLOCK(mtx);
#ifdef FULL
		if (MUTEX_SPINBIT_LOCK_TRY(mtx)) {
			MUTEX_LOCKED(mtx);
			return;
		}
#if !defined(MULTIPROCESSOR)
		MUTEX_ABORT(mtx, "locking against myself");
#else /* !MULTIPROCESSOR */

		LOCKSTAT_ENTER(lsflag);
		LOCKSTAT_START_TIMER(lsflag, spintime);
		count = SPINLOCK_BACKOFF_MIN;

		/*
		 * Spin testing the lock word and do exponential backoff
		 * to reduce cache line ping-ponging between CPUs.
		 */
		do {
			if (panicstr != NULL)
				break;
			while (MUTEX_SPINBIT_LOCKED_P(mtx)) {
				SPINLOCK_BACKOFF(count); 
#ifdef LOCKDEBUG
				if (SPINLOCK_SPINOUT(spins))
					MUTEX_ABORT(mtx, "spinout");
#endif	/* LOCKDEBUG */
			}
		} while (!MUTEX_SPINBIT_LOCK_TRY(mtx));

		if (count != SPINLOCK_BACKOFF_MIN) {
			LOCKSTAT_STOP_TIMER(lsflag, spintime);
			LOCKSTAT_EVENT(lsflag, mtx,
			    LB_SPIN_MUTEX | LB_SPIN, 1, spintime);
		}
		LOCKSTAT_EXIT(lsflag);
#endif	/* !MULTIPROCESSOR */
#endif	/* FULL */
		MUTEX_LOCKED(mtx);
		return;
	}

	curthread = (uintptr_t)curlwp;

	MUTEX_DASSERT(mtx, MUTEX_ADAPTIVE_P(mtx));
	MUTEX_ASSERT(mtx, curthread != 0);
	MUTEX_WANTLOCK(mtx);

	if (panicstr == NULL) {
		LOCKDEBUG_BARRIER(&kernel_lock, 1);
	}

	LOCKSTAT_ENTER(lsflag);

	/*
	 * Adaptive mutex; spin trying to acquire the mutex.  If we
	 * determine that the owner is not running on a processor,
	 * then we stop spinning, and sleep instead.
	 */
	KPREEMPT_DISABLE(curlwp);
	for (owner = mtx->mtx_owner;;) {
		if (!MUTEX_OWNED(owner)) {
			/*
			 * Mutex owner clear could mean two things:
			 *
			 *	* The mutex has been released.
			 *	* The owner field hasn't been set yet.
			 *
			 * Try to acquire it again.  If that fails,
			 * we'll just loop again.
			 */
			if (MUTEX_ACQUIRE(mtx, curthread))
				break;
			owner = mtx->mtx_owner;
			continue;
		}
		if (__predict_false(panicstr != NULL)) {
			KPREEMPT_ENABLE(curlwp);
			return;
		}
		if (__predict_false(MUTEX_OWNER(owner) == curthread)) {
			MUTEX_ABORT(mtx, "locking against myself");
		}
#ifdef MULTIPROCESSOR
		/*
		 * Check to see if the owner is running on a processor.
		 * If so, then we should just spin, as the owner will
		 * likely release the lock very soon.
		 */
		if (mutex_oncpu(owner)) {
			LOCKSTAT_START_TIMER(lsflag, spintime);
			count = SPINLOCK_BACKOFF_MIN;
			do {
				KPREEMPT_ENABLE(curlwp);
				SPINLOCK_BACKOFF(count);
				KPREEMPT_DISABLE(curlwp);
				owner = mtx->mtx_owner;
			} while (mutex_oncpu(owner));
			LOCKSTAT_STOP_TIMER(lsflag, spintime);
			LOCKSTAT_COUNT(spincnt, 1);
			if (!MUTEX_OWNED(owner))
				continue;
		}
#endif

		ts = turnstile_lookup(mtx);

		/*
		 * Once we have the turnstile chain interlock, mark the
		 * mutex has having waiters.  If that fails, spin again:
		 * chances are that the mutex has been released.
		 */
		if (!MUTEX_SET_WAITERS(mtx, owner)) {
			turnstile_exit(mtx);
			owner = mtx->mtx_owner;
			continue;
		}

#ifdef MULTIPROCESSOR
		/*
		 * mutex_exit() is permitted to release the mutex without
		 * any interlocking instructions, and the following can
		 * occur as a result:
		 *
		 *  CPU 1: MUTEX_SET_WAITERS()      CPU2: mutex_exit()
		 * ---------------------------- ----------------------------
		 *		..		    acquire cache line
		 *		..                   test for waiters
		 *	acquire cache line    <-      lose cache line
		 *	 lock cache line	           ..
		 *     verify mutex is held                ..
		 *	    set waiters  	           ..
		 *	 unlock cache line		   ..
		 *	  lose cache line     ->    acquire cache line
		 *		..	          clear lock word, waiters 
		 *	  return success
		 *
		 * There is another race that can occur: a third CPU could
		 * acquire the mutex as soon as it is released.  Since
		 * adaptive mutexes are primarily spin mutexes, this is not
		 * something that we need to worry about too much.  What we
		 * do need to ensure is that the waiters bit gets set.
		 *
		 * To allow the unlocked release, we need to make some
		 * assumptions here:
		 *
		 * o Release is the only non-atomic/unlocked operation
		 *   that can be performed on the mutex.  (It must still
		 *   be atomic on the local CPU, e.g. in case interrupted
		 *   or preempted).
		 *
		 * o At any given time, MUTEX_SET_WAITERS() can only ever
		 *   be in progress on one CPU in the system - guaranteed
		 *   by the turnstile chain lock.
		 *
		 * o No other operations other than MUTEX_SET_WAITERS()
		 *   and release can modify a mutex with a non-zero
		 *   owner field.
		 *
		 * o The result of a successful MUTEX_SET_WAITERS() call
		 *   is an unbuffered write that is immediately visible
		 *   to all other processors in the system.
		 *
		 * o If the holding LWP switches away, it posts a store
		 *   fence before changing curlwp, ensuring that any
		 *   overwrite of the mutex waiters flag by mutex_exit()
		 *   completes before the modification of curlwp becomes
		 *   visible to this CPU.
		 *
		 * o mi_switch() posts a store fence before setting curlwp
		 *   and before resuming execution of an LWP.
		 * 
		 * o _kernel_lock() posts a store fence before setting
		 *   curcpu()->ci_biglock_wanted, and after clearing it. 
		 *   This ensures that any overwrite of the mutex waiters
		 *   flag by mutex_exit() completes before the modification
		 *   of ci_biglock_wanted becomes visible.
		 *
		 * We now post a read memory barrier (after setting the
		 * waiters field) and check the lock holder's status again.
		 * Some of the possible outcomes (not an exhaustive list):
		 *
		 * 1. The on-CPU check returns true: the holding LWP is
		 *    running again.  The lock may be released soon and
		 *    we should spin.  Importantly, we can't trust the
		 *    value of the waiters flag.
		 *
		 * 2. The on-CPU check returns false: the holding LWP is
		 *    not running.  We now have the opportunity to check
		 *    if mutex_exit() has blatted the modifications made
		 *    by MUTEX_SET_WAITERS().
		 *
		 * 3. The on-CPU check returns false: the holding LWP may
		 *    or may not be running.  It has context switched at
		 *    some point during our check.  Again, we have the
		 *    chance to see if the waiters bit is still set or
		 *    has been overwritten.
		 *
		 * 4. The on-CPU check returns false: the holding LWP is
		 *    running on a CPU, but wants the big lock.  It's OK
		 *    to check the waiters field in this case.
		 *
		 * 5. The has-waiters check fails: the mutex has been
		 *    released, the waiters flag cleared and another LWP
		 *    now owns the mutex.
		 *
		 * 6. The has-waiters check fails: the mutex has been
		 *    released.
		 *
		 * If the waiters bit is not set it's unsafe to go asleep,
		 * as we might never be awoken.
		 */
		if ((membar_consumer(), mutex_oncpu(owner)) ||
		    (membar_consumer(), !MUTEX_HAS_WAITERS(mtx))) {
			turnstile_exit(mtx);
			owner = mtx->mtx_owner;
			continue;
		}
#endif	/* MULTIPROCESSOR */

		LOCKSTAT_START_TIMER(lsflag, slptime);

		turnstile_block(ts, TS_WRITER_Q, mtx, &mutex_syncobj);

		LOCKSTAT_STOP_TIMER(lsflag, slptime);
		LOCKSTAT_COUNT(slpcnt, 1);

		owner = mtx->mtx_owner;
	}
	KPREEMPT_ENABLE(curlwp);

	LOCKSTAT_EVENT(lsflag, mtx, LB_ADAPTIVE_MUTEX | LB_SLEEP1,
	    slpcnt, slptime);
	LOCKSTAT_EVENT(lsflag, mtx, LB_ADAPTIVE_MUTEX | LB_SPIN,
	    spincnt, spintime);
	LOCKSTAT_EXIT(lsflag);

	MUTEX_DASSERT(mtx, MUTEX_OWNER(mtx->mtx_owner) == curthread);
	MUTEX_LOCKED(mtx);
}
/*
 * rw_vector_enter:
 *
 *	Acquire a rwlock.
 */
void
rw_vector_enter(krwlock_t *rw, const krw_t op)
{
	uintptr_t owner, incr, need_wait, set_wait, curthread, next;
	turnstile_t *ts;
	int queue;
	lwp_t *l;
	LOCKSTAT_TIMER(slptime);
	LOCKSTAT_TIMER(slpcnt);
	LOCKSTAT_TIMER(spintime);
	LOCKSTAT_COUNTER(spincnt);
	LOCKSTAT_FLAG(lsflag);

	l = curlwp;
	curthread = (uintptr_t)l;

	RW_ASSERT(rw, !cpu_intr_p());
	RW_ASSERT(rw, curthread != 0);
	RW_WANTLOCK(rw, op);

	if (panicstr == NULL) {
		LOCKDEBUG_BARRIER(&kernel_lock, 1);
	}

	/*
	 * We play a slight trick here.  If we're a reader, we want
	 * increment the read count.  If we're a writer, we want to
	 * set the owner field and whe WRITE_LOCKED bit.
	 *
	 * In the latter case, we expect those bits to be zero,
	 * therefore we can use an add operation to set them, which
	 * means an add operation for both cases.
	 */
	if (__predict_true(op == RW_READER)) {
		incr = RW_READ_INCR;
		set_wait = RW_HAS_WAITERS;
		need_wait = RW_WRITE_LOCKED | RW_WRITE_WANTED;
		queue = TS_READER_Q;
	} else {
		RW_DASSERT(rw, op == RW_WRITER);
		incr = curthread | RW_WRITE_LOCKED;
		set_wait = RW_HAS_WAITERS | RW_WRITE_WANTED;
		need_wait = RW_WRITE_LOCKED | RW_THREAD;
		queue = TS_WRITER_Q;
	}

	LOCKSTAT_ENTER(lsflag);

	KPREEMPT_DISABLE(curlwp);
	for (owner = rw->rw_owner; ;) {
		/*
		 * Read the lock owner field.  If the need-to-wait
		 * indicator is clear, then try to acquire the lock.
		 */
		if ((owner & need_wait) == 0) {
			next = rw_cas(rw, owner, (owner + incr) &
			    ~RW_WRITE_WANTED);
			if (__predict_true(next == owner)) {
				/* Got it! */
				membar_enter();
				break;
			}

			/*
			 * Didn't get it -- spin around again (we'll
			 * probably sleep on the next iteration).
			 */
			owner = next;
			continue;
		}
		if (__predict_false(panicstr != NULL)) {
			kpreempt_enable();
			return;
		}
		if (__predict_false(RW_OWNER(rw) == curthread)) {
			rw_abort(rw, __func__, "locking against myself");
		}
		/*
		 * If the lock owner is running on another CPU, and
		 * there are no existing waiters, then spin.
		 */
		if (rw_oncpu(owner)) {
			LOCKSTAT_START_TIMER(lsflag, spintime);
			u_int count = SPINLOCK_BACKOFF_MIN;
			do {
				KPREEMPT_ENABLE(curlwp);
				SPINLOCK_BACKOFF(count);
				KPREEMPT_DISABLE(curlwp);
				owner = rw->rw_owner;
			} while (rw_oncpu(owner));
			LOCKSTAT_STOP_TIMER(lsflag, spintime);
			LOCKSTAT_COUNT(spincnt, 1);
			if ((owner & need_wait) == 0)
				continue;
		}

		/*
		 * Grab the turnstile chain lock.  Once we have that, we
		 * can adjust the waiter bits and sleep queue.
		 */
		ts = turnstile_lookup(rw);

		/*
		 * Mark the rwlock as having waiters.  If the set fails,
		 * then we may not need to sleep and should spin again.
		 * Reload rw_owner because turnstile_lookup() may have
		 * spun on the turnstile chain lock.
		 */
		owner = rw->rw_owner;
		if ((owner & need_wait) == 0 || rw_oncpu(owner)) {
			turnstile_exit(rw);
			continue;
		}
		next = rw_cas(rw, owner, owner | set_wait);
		if (__predict_false(next != owner)) {
			turnstile_exit(rw);
			owner = next;
			continue;
		}

		LOCKSTAT_START_TIMER(lsflag, slptime);
		turnstile_block(ts, queue, rw, &rw_syncobj);
		LOCKSTAT_STOP_TIMER(lsflag, slptime);
		LOCKSTAT_COUNT(slpcnt, 1);

		/*
		 * No need for a memory barrier because of context switch.
		 * If not handed the lock, then spin again.
		 */
		if (op == RW_READER || (rw->rw_owner & RW_THREAD) == curthread)
			break;

		owner = rw->rw_owner;
	}
	KPREEMPT_ENABLE(curlwp);

	LOCKSTAT_EVENT(lsflag, rw, LB_RWLOCK |
	    (op == RW_WRITER ? LB_SLEEP1 : LB_SLEEP2), slpcnt, slptime);
	LOCKSTAT_EVENT(lsflag, rw, LB_RWLOCK | LB_SPIN, spincnt, spintime);
	LOCKSTAT_EXIT(lsflag);

	RW_DASSERT(rw, (op != RW_READER && RW_OWNER(rw) == curthread) ||
	    (op == RW_READER && RW_COUNT(rw) != 0));
	RW_LOCKED(rw, op);
}
Exemplo n.º 9
0
void
_rw_runlock_cookie(volatile uintptr_t *c, const char *file, int line)
{
	struct rwlock *rw;
	struct turnstile *ts;
	uintptr_t x, v, queue;

	if (SCHEDULER_STOPPED())
		return;

	rw = rwlock2rw(c);

	KASSERT(rw->rw_lock != RW_DESTROYED,
	    ("rw_runlock() of destroyed rwlock @ %s:%d", file, line));
	__rw_assert(c, RA_RLOCKED, file, line);
	WITNESS_UNLOCK(&rw->lock_object, 0, file, line);
	LOCK_LOG_LOCK("RUNLOCK", &rw->lock_object, 0, 0, file, line);

	/* TODO: drop "owner of record" here. */

	for (;;) {
		/*
		 * See if there is more than one read lock held.  If so,
		 * just drop one and return.
		 */
		x = rw->rw_lock;
		if (RW_READERS(x) > 1) {
			if (atomic_cmpset_rel_ptr(&rw->rw_lock, x,
			    x - RW_ONE_READER)) {
				if (LOCK_LOG_TEST(&rw->lock_object, 0))
					CTR4(KTR_LOCK,
					    "%s: %p succeeded %p -> %p",
					    __func__, rw, (void *)x,
					    (void *)(x - RW_ONE_READER));
				break;
			}
			continue;
		}
		/*
		 * If there aren't any waiters for a write lock, then try
		 * to drop it quickly.
		 */
		if (!(x & RW_LOCK_WAITERS)) {
			MPASS((x & ~RW_LOCK_WRITE_SPINNER) ==
			    RW_READERS_LOCK(1));
			if (atomic_cmpset_rel_ptr(&rw->rw_lock, x,
			    RW_UNLOCKED)) {
				if (LOCK_LOG_TEST(&rw->lock_object, 0))
					CTR2(KTR_LOCK, "%s: %p last succeeded",
					    __func__, rw);
				break;
			}
			continue;
		}
		/*
		 * Ok, we know we have waiters and we think we are the
		 * last reader, so grab the turnstile lock.
		 */
		turnstile_chain_lock(&rw->lock_object);
		v = rw->rw_lock & (RW_LOCK_WAITERS | RW_LOCK_WRITE_SPINNER);
		MPASS(v & RW_LOCK_WAITERS);

		/*
		 * Try to drop our lock leaving the lock in a unlocked
		 * state.
		 *
		 * If you wanted to do explicit lock handoff you'd have to
		 * do it here.  You'd also want to use turnstile_signal()
		 * and you'd have to handle the race where a higher
		 * priority thread blocks on the write lock before the
		 * thread you wakeup actually runs and have the new thread
		 * "steal" the lock.  For now it's a lot simpler to just
		 * wakeup all of the waiters.
		 *
		 * As above, if we fail, then another thread might have
		 * acquired a read lock, so drop the turnstile lock and
		 * restart.
		 */
		x = RW_UNLOCKED;
		if (v & RW_LOCK_WRITE_WAITERS) {
			queue = TS_EXCLUSIVE_QUEUE;
			x |= (v & RW_LOCK_READ_WAITERS);
		} else
			queue = TS_SHARED_QUEUE;
		if (!atomic_cmpset_rel_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v,
		    x)) {
			turnstile_chain_unlock(&rw->lock_object);
			continue;
		}
		if (LOCK_LOG_TEST(&rw->lock_object, 0))
			CTR2(KTR_LOCK, "%s: %p last succeeded with waiters",
			    __func__, rw);

		/*
		 * Ok.  The lock is released and all that's left is to
		 * wake up the waiters.  Note that the lock might not be
		 * free anymore, but in that case the writers will just
		 * block again if they run before the new lock holder(s)
		 * release the lock.
		 */
		ts = turnstile_lookup(&rw->lock_object);
		MPASS(ts != NULL);
		turnstile_broadcast(ts, queue);
		turnstile_unpend(ts, TS_SHARED_LOCK);
		turnstile_chain_unlock(&rw->lock_object);
		break;
	}
	LOCKSTAT_PROFILE_RELEASE_RWLOCK(rw__release, rw, LOCKSTAT_READER);
	curthread->td_locks--;
	curthread->td_rw_rlocks--;
}
Exemplo n.º 10
0
/*
 * Downgrade a write lock into a single read lock.
 */
void
__rw_downgrade(volatile uintptr_t *c, const char *file, int line)
{
	struct rwlock *rw;
	struct turnstile *ts;
	uintptr_t tid, v;
	int rwait, wwait;

	if (SCHEDULER_STOPPED())
		return;

	rw = rwlock2rw(c);

	KASSERT(rw->rw_lock != RW_DESTROYED,
	    ("rw_downgrade() of destroyed rwlock @ %s:%d", file, line));
	__rw_assert(c, RA_WLOCKED | RA_NOTRECURSED, file, line);
#ifndef INVARIANTS
	if (rw_recursed(rw))
		panic("downgrade of a recursed lock");
#endif

	WITNESS_DOWNGRADE(&rw->lock_object, 0, file, line);

	/*
	 * Convert from a writer to a single reader.  First we handle
	 * the easy case with no waiters.  If there are any waiters, we
	 * lock the turnstile and "disown" the lock.
	 */
	tid = (uintptr_t)curthread;
	if (atomic_cmpset_rel_ptr(&rw->rw_lock, tid, RW_READERS_LOCK(1)))
		goto out;

	/*
	 * Ok, we think we have waiters, so lock the turnstile so we can
	 * read the waiter flags without any races.
	 */
	turnstile_chain_lock(&rw->lock_object);
	v = rw->rw_lock & RW_LOCK_WAITERS;
	rwait = v & RW_LOCK_READ_WAITERS;
	wwait = v & RW_LOCK_WRITE_WAITERS;
	MPASS(rwait | wwait);

	/*
	 * Downgrade from a write lock while preserving waiters flag
	 * and give up ownership of the turnstile.
	 */
	ts = turnstile_lookup(&rw->lock_object);
	MPASS(ts != NULL);
	if (!wwait)
		v &= ~RW_LOCK_READ_WAITERS;
	atomic_store_rel_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v);
	/*
	 * Wake other readers if there are no writers pending.  Otherwise they
	 * won't be able to acquire the lock anyway.
	 */
	if (rwait && !wwait) {
		turnstile_broadcast(ts, TS_SHARED_QUEUE);
		turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
	} else
		turnstile_disown(ts);
	turnstile_chain_unlock(&rw->lock_object);
out:
	curthread->td_rw_rlocks++;
	LOCK_LOG_LOCK("WDOWNGRADE", &rw->lock_object, 0, 0, file, line);
	LOCKSTAT_RECORD0(rw__downgrade, rw);
}
Exemplo n.º 11
0
/*
 * mutex_vector_enter() is called from the assembly mutex_enter() routine
 * if the lock is held or is not of type MUTEX_ADAPTIVE.
 */
void
mutex_vector_enter(mutex_impl_t *lp)
{
	kthread_id_t	owner;
	hrtime_t	sleep_time = 0;	/* how long we slept */
	uint_t		spin_count = 0;	/* how many times we spun */
	cpu_t 		*cpup, *last_cpu;
	extern cpu_t	*cpu_list;
	turnstile_t	*ts;
	volatile mutex_impl_t *vlp = (volatile mutex_impl_t *)lp;
	int		backoff;	/* current backoff */
	int		backctr;	/* ctr for backoff */
	int		sleep_count = 0;

	ASSERT_STACK_ALIGNED();

	if (MUTEX_TYPE_SPIN(lp)) {
		lock_set_spl(&lp->m_spin.m_spinlock, lp->m_spin.m_minspl,
		    &lp->m_spin.m_oldspl);
		return;
	}

	if (!MUTEX_TYPE_ADAPTIVE(lp)) {
		mutex_panic("mutex_enter: bad mutex", lp);
		return;
	}

	/*
	 * Adaptive mutexes must not be acquired from above LOCK_LEVEL.
	 * We can migrate after loading CPU but before checking CPU_ON_INTR,
	 * so we must verify by disabling preemption and loading CPU again.
	 */
	cpup = CPU;
	if (CPU_ON_INTR(cpup) && !panicstr) {
		kpreempt_disable();
		if (CPU_ON_INTR(CPU))
			mutex_panic("mutex_enter: adaptive at high PIL", lp);
		kpreempt_enable();
	}

	CPU_STATS_ADDQ(cpup, sys, mutex_adenters, 1);

	if (&plat_lock_delay) {
		backoff = 0;
	} else {
		backoff = BACKOFF_BASE;
	}

	for (;;) {
spin:
		spin_count++;
		/*
		 * Add an exponential backoff delay before trying again
		 * to touch the mutex data structure.
		 * the spin_count test and call to nulldev are to prevent
		 * the compiler optimizer from eliminating the delay loop.
		 */
		if (&plat_lock_delay) {
			plat_lock_delay(&backoff);
		} else {
			for (backctr = backoff; backctr; backctr--) {
				if (!spin_count) (void) nulldev();
			};    /* delay */
			backoff = backoff << 1;			/* double it */
			if (backoff > BACKOFF_CAP) {
				backoff = BACKOFF_CAP;
			}

			SMT_PAUSE();
		}

		if (panicstr)
			return;

		if ((owner = MUTEX_OWNER(vlp)) == NULL) {
			if (mutex_adaptive_tryenter(lp))
				break;
			continue;
		}

		if (owner == curthread)
			mutex_panic("recursive mutex_enter", lp);

		/*
		 * If lock is held but owner is not yet set, spin.
		 * (Only relevant for platforms that don't have cas.)
		 */
		if (owner == MUTEX_NO_OWNER)
			continue;

		/*
		 * When searching the other CPUs, start with the one where
		 * we last saw the owner thread.  If owner is running, spin.
		 *
		 * We must disable preemption at this point to guarantee
		 * that the list doesn't change while we traverse it
		 * without the cpu_lock mutex.  While preemption is
		 * disabled, we must revalidate our cached cpu pointer.
		 */
		kpreempt_disable();
		if (cpup->cpu_next == NULL)
			cpup = cpu_list;
		last_cpu = cpup;	/* mark end of search */
		do {
			if (cpup->cpu_thread == owner) {
				kpreempt_enable();
				goto spin;
			}
		} while ((cpup = cpup->cpu_next) != last_cpu);
		kpreempt_enable();

		/*
		 * The owner appears not to be running, so block.
		 * See the Big Theory Statement for memory ordering issues.
		 */
		ts = turnstile_lookup(lp);
		MUTEX_SET_WAITERS(lp);
		membar_enter();

		/*
		 * Recheck whether owner is running after waiters bit hits
		 * global visibility (above).  If owner is running, spin.
		 *
		 * Since we are at ipl DISP_LEVEL, kernel preemption is
		 * disabled, however we still need to revalidate our cached
		 * cpu pointer to make sure the cpu hasn't been deleted.
		 */
		if (cpup->cpu_next == NULL)
			last_cpu = cpup = cpu_list;
		do {
			if (cpup->cpu_thread == owner) {
				turnstile_exit(lp);
				goto spin;
			}
		} while ((cpup = cpup->cpu_next) != last_cpu);
		membar_consumer();

		/*
		 * If owner and waiters bit are unchanged, block.
		 */
		if (MUTEX_OWNER(vlp) == owner && MUTEX_HAS_WAITERS(vlp)) {
			sleep_time -= gethrtime();
			(void) turnstile_block(ts, TS_WRITER_Q, lp,
			    &mutex_sobj_ops, NULL, NULL);
			sleep_time += gethrtime();
			sleep_count++;
		} else {
			turnstile_exit(lp);
		}
	}

	ASSERT(MUTEX_OWNER(lp) == curthread);

	if (sleep_time != 0) {
		/*
		 * Note, sleep time is the sum of all the sleeping we
		 * did.
		 */
		LOCKSTAT_RECORD(LS_MUTEX_ENTER_BLOCK, lp, sleep_time);
	}

	/*
	 * We do not count a sleep as a spin.
	 */
	if (spin_count > sleep_count)
		LOCKSTAT_RECORD(LS_MUTEX_ENTER_SPIN, lp,
		    spin_count - sleep_count);

	LOCKSTAT_RECORD0(LS_MUTEX_ENTER_ACQUIRE, lp);
}