Пример #1
0
/*
 * Downgrade an unrecursed exclusive lock into a single shared lock.
 */
void
_sx_downgrade(struct sx *sx, const char *file, int line)
{
	uintptr_t x;
	int wakeup_swapper;

	KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
	    ("sx_downgrade() of destroyed sx @ %s:%d", file, line));
	_sx_assert(sx, SA_XLOCKED | SA_NOTRECURSED, file, line);
#ifndef INVARIANTS
	if (sx_recursed(sx))
		panic("downgrade of a recursed lock");
#endif

	WITNESS_DOWNGRADE(&sx->lock_object, 0, file, line);

	/*
	 * Try to switch from an exclusive lock with no shared waiters
	 * to one sharer with no shared waiters.  If there are
	 * exclusive waiters, we don't need to lock the sleep queue so
	 * long as we preserve the flag.  We do one quick try and if
	 * that fails we grab the sleepq lock to keep the flags from
	 * changing and do it the slow way.
	 *
	 * We have to lock the sleep queue if there are shared waiters
	 * so we can wake them up.
	 */
	x = sx->sx_lock;
	if (!(x & SX_LOCK_SHARED_WAITERS) &&
	    atomic_cmpset_rel_ptr(&sx->sx_lock, x, SX_SHARERS_LOCK(1) |
	    (x & SX_LOCK_EXCLUSIVE_WAITERS))) {
		LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line);
		return;
	}

	/*
	 * Lock the sleep queue so we can read the waiters bits
	 * without any races and wakeup any shared waiters.
	 */
	sleepq_lock(&sx->lock_object);

	/*
	 * Preserve SX_LOCK_EXCLUSIVE_WAITERS while downgraded to a single
	 * shared lock.  If there are any shared waiters, wake them up.
	 */
	wakeup_swapper = 0;
	x = sx->sx_lock;
	atomic_store_rel_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) |
	    (x & SX_LOCK_EXCLUSIVE_WAITERS));
	if (x & SX_LOCK_SHARED_WAITERS)
		wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX,
		    0, SQ_SHARED_QUEUE);
	sleepq_release(&sx->lock_object);

	LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line);
	LOCKSTAT_RECORD0(LS_SX_DOWNGRADE_DOWNGRADE, sx);

	if (wakeup_swapper)
		kick_proc0();
}
Пример #2
0
/*
 * This function represents the so-called 'hard case' for sx_xunlock
 * operation.  All 'easy case' failures are redirected to this.  Note
 * that ideally this would be a static function, but it needs to be
 * accessible from at least sx.h.
 */
void
_sx_xunlock_hard(struct sx *sx, uintptr_t tid, const char *file, int line)
{
	uintptr_t x;
	int queue, wakeup_swapper;

	if (SCHEDULER_STOPPED())
		return;

	MPASS(!(sx->sx_lock & SX_LOCK_SHARED));

	/* If the lock is recursed, then unrecurse one level. */
	if (sx_xlocked(sx) && sx_recursed(sx)) {
		if ((--sx->sx_recurse) == 0)
			atomic_clear_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
		if (LOCK_LOG_TEST(&sx->lock_object, 0))
			CTR2(KTR_LOCK, "%s: %p unrecursing", __func__, sx);
		return;
	}
	MPASS(sx->sx_lock & (SX_LOCK_SHARED_WAITERS |
	    SX_LOCK_EXCLUSIVE_WAITERS));
	if (LOCK_LOG_TEST(&sx->lock_object, 0))
		CTR2(KTR_LOCK, "%s: %p contested", __func__, sx);

	sleepq_lock(&sx->lock_object);
	x = SX_LOCK_UNLOCKED;

	/*
	 * The wake up algorithm here is quite simple and probably not
	 * ideal.  It gives precedence to shared waiters if they are
	 * present.  For this condition, we have to preserve the
	 * state of the exclusive waiters flag.
	 * If interruptible sleeps left the shared queue empty avoid a
	 * starvation for the threads sleeping on the exclusive queue by giving
	 * them precedence and cleaning up the shared waiters bit anyway.
	 */
	if ((sx->sx_lock & SX_LOCK_SHARED_WAITERS) != 0 &&
	    sleepq_sleepcnt(&sx->lock_object, SQ_SHARED_QUEUE) != 0) {
		queue = SQ_SHARED_QUEUE;
		x |= (sx->sx_lock & SX_LOCK_EXCLUSIVE_WAITERS);
	} else
		queue = SQ_EXCLUSIVE_QUEUE;

	/* Wake up all the waiters for the specific queue. */
	if (LOCK_LOG_TEST(&sx->lock_object, 0))
		CTR3(KTR_LOCK, "%s: %p waking up all threads on %s queue",
		    __func__, sx, queue == SQ_SHARED_QUEUE ? "shared" :
		    "exclusive");
	atomic_store_rel_ptr(&sx->sx_lock, x);
	wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 0,
	    queue);
	sleepq_release(&sx->lock_object);
	if (wakeup_swapper)
		kick_proc0();
}
Пример #3
0
/*
 * This function represents the so-called 'hard case' for sx_sunlock
 * operation.  All 'easy case' failures are redirected to this.  Note
 * that ideally this would be a static function, but it needs to be
 * accessible from at least sx.h.
 */
void
_sx_sunlock_hard(struct sx *sx, const char *file, int line)
{
	uintptr_t x;
	int wakeup_swapper;

	if (SCHEDULER_STOPPED())
		return;

	for (;;) {
		x = sx->sx_lock;

		/*
		 * We should never have sharers while at least one thread
		 * holds a shared lock.
		 */
		KASSERT(!(x & SX_LOCK_SHARED_WAITERS),
		    ("%s: waiting sharers", __func__));

		/*
		 * See if there is more than one shared lock held.  If
		 * so, just drop one and return.
		 */
		if (SX_SHARERS(x) > 1) {
			if (atomic_cmpset_rel_ptr(&sx->sx_lock, x,
			    x - SX_ONE_SHARER)) {
				if (LOCK_LOG_TEST(&sx->lock_object, 0))
					CTR4(KTR_LOCK,
					    "%s: %p succeeded %p -> %p",
					    __func__, sx, (void *)x,
					    (void *)(x - SX_ONE_SHARER));
				break;
			}
			continue;
		}

		/*
		 * If there aren't any waiters for an exclusive lock,
		 * then try to drop it quickly.
		 */
		if (!(x & SX_LOCK_EXCLUSIVE_WAITERS)) {
			MPASS(x == SX_SHARERS_LOCK(1));
			if (atomic_cmpset_rel_ptr(&sx->sx_lock,
			    SX_SHARERS_LOCK(1), SX_LOCK_UNLOCKED)) {
				if (LOCK_LOG_TEST(&sx->lock_object, 0))
					CTR2(KTR_LOCK, "%s: %p last succeeded",
					    __func__, sx);
				break;
			}
			continue;
		}

		/*
		 * At this point, there should just be one sharer with
		 * exclusive waiters.
		 */
		MPASS(x == (SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS));

		sleepq_lock(&sx->lock_object);

		/*
		 * Wake up semantic here is quite simple:
		 * Just wake up all the exclusive waiters.
		 * Note that the state of the lock could have changed,
		 * so if it fails loop back and retry.
		 */
		if (!atomic_cmpset_rel_ptr(&sx->sx_lock,
		    SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS,
		    SX_LOCK_UNLOCKED)) {
			sleepq_release(&sx->lock_object);
			continue;
		}
		if (LOCK_LOG_TEST(&sx->lock_object, 0))
			CTR2(KTR_LOCK, "%s: %p waking up all thread on"
			    "exclusive queue", __func__, sx);
		wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX,
		    0, SQ_EXCLUSIVE_QUEUE);
		sleepq_release(&sx->lock_object);
		if (wakeup_swapper)
			kick_proc0();
		break;
	}
}