コード例 #1
0
/*
 * Downgrade an unrecursed exclusive lock into a single shared lock.
 */
void
_sx_downgrade(struct sx *sx, const char *file, int line)
{
	uintptr_t x;
	int wakeup_swapper;

	KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
	    ("sx_downgrade() of destroyed sx @ %s:%d", file, line));
	_sx_assert(sx, SA_XLOCKED | SA_NOTRECURSED, file, line);
#ifndef INVARIANTS
	if (sx_recursed(sx))
		panic("downgrade of a recursed lock");
#endif

	WITNESS_DOWNGRADE(&sx->lock_object, 0, file, line);

	/*
	 * Try to switch from an exclusive lock with no shared waiters
	 * to one sharer with no shared waiters.  If there are
	 * exclusive waiters, we don't need to lock the sleep queue so
	 * long as we preserve the flag.  We do one quick try and if
	 * that fails we grab the sleepq lock to keep the flags from
	 * changing and do it the slow way.
	 *
	 * We have to lock the sleep queue if there are shared waiters
	 * so we can wake them up.
	 */
	x = sx->sx_lock;
	if (!(x & SX_LOCK_SHARED_WAITERS) &&
	    atomic_cmpset_rel_ptr(&sx->sx_lock, x, SX_SHARERS_LOCK(1) |
	    (x & SX_LOCK_EXCLUSIVE_WAITERS))) {
		LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line);
		return;
	}

	/*
	 * Lock the sleep queue so we can read the waiters bits
	 * without any races and wakeup any shared waiters.
	 */
	sleepq_lock(&sx->lock_object);

	/*
	 * Preserve SX_LOCK_EXCLUSIVE_WAITERS while downgraded to a single
	 * shared lock.  If there are any shared waiters, wake them up.
	 */
	wakeup_swapper = 0;
	x = sx->sx_lock;
	atomic_store_rel_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) |
	    (x & SX_LOCK_EXCLUSIVE_WAITERS));
	if (x & SX_LOCK_SHARED_WAITERS)
		wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX,
		    0, SQ_SHARED_QUEUE);
	sleepq_release(&sx->lock_object);

	LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line);
	LOCKSTAT_RECORD0(LS_SX_DOWNGRADE_DOWNGRADE, sx);

	if (wakeup_swapper)
		kick_proc0();
}
コード例 #2
0
ファイル: kern_sx.c プロジェクト: jmgurney/freebsd
/*
 * Try to do a non-blocking upgrade from a shared lock to an exclusive lock.
 * This will only succeed if this thread holds a single shared lock.
 * Return 1 if if the upgrade succeed, 0 otherwise.
 */
int
sx_try_upgrade_(struct sx *sx, const char *file, int line)
{
	uintptr_t x;
	int success;

	if (SCHEDULER_STOPPED())
		return (1);

	KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
	    ("sx_try_upgrade() of destroyed sx @ %s:%d", file, line));
	_sx_assert(sx, SA_SLOCKED, file, line);

	/*
	 * Try to switch from one shared lock to an exclusive lock.  We need
	 * to maintain the SX_LOCK_EXCLUSIVE_WAITERS flag if set so that
	 * we will wake up the exclusive waiters when we drop the lock.
	 */
	x = sx->sx_lock & SX_LOCK_EXCLUSIVE_WAITERS;
	success = atomic_cmpset_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) | x,
	    (uintptr_t)curthread | x);
	LOCK_LOG_TRY("XUPGRADE", &sx->lock_object, 0, success, file, line);
	if (success) {
		WITNESS_UPGRADE(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
		    file, line);
		LOCKSTAT_RECORD0(sx__upgrade, sx);
	}
	return (success);
}
コード例 #3
0
ファイル: kern_sx.c プロジェクト: jmgurney/freebsd
/*
 * This function represents the so-called 'hard case' for sx_sunlock
 * operation.  All 'easy case' failures are redirected to this.  Note
 * that ideally this would be a static function, but it needs to be
 * accessible from at least sx.h.
 */
void
_sx_sunlock_hard(struct sx *sx, const char *file, int line)
{
	uintptr_t x;
	int wakeup_swapper;

	if (SCHEDULER_STOPPED())
		return;

	for (;;) {
		x = sx->sx_lock;

		/*
		 * We should never have sharers while at least one thread
		 * holds a shared lock.
		 */
		KASSERT(!(x & SX_LOCK_SHARED_WAITERS),
		    ("%s: waiting sharers", __func__));

		/*
		 * See if there is more than one shared lock held.  If
		 * so, just drop one and return.
		 */
		if (SX_SHARERS(x) > 1) {
			if (atomic_cmpset_rel_ptr(&sx->sx_lock, x,
			    x - SX_ONE_SHARER)) {
				if (LOCK_LOG_TEST(&sx->lock_object, 0))
					CTR4(KTR_LOCK,
					    "%s: %p succeeded %p -> %p",
					    __func__, sx, (void *)x,
					    (void *)(x - SX_ONE_SHARER));
				break;
			}
			continue;
		}

		/*
		 * If there aren't any waiters for an exclusive lock,
		 * then try to drop it quickly.
		 */
		if (!(x & SX_LOCK_EXCLUSIVE_WAITERS)) {
			MPASS(x == SX_SHARERS_LOCK(1));
			if (atomic_cmpset_rel_ptr(&sx->sx_lock,
			    SX_SHARERS_LOCK(1), SX_LOCK_UNLOCKED)) {
				if (LOCK_LOG_TEST(&sx->lock_object, 0))
					CTR2(KTR_LOCK, "%s: %p last succeeded",
					    __func__, sx);
				break;
			}
			continue;
		}

		/*
		 * At this point, there should just be one sharer with
		 * exclusive waiters.
		 */
		MPASS(x == (SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS));

		sleepq_lock(&sx->lock_object);

		/*
		 * Wake up semantic here is quite simple:
		 * Just wake up all the exclusive waiters.
		 * Note that the state of the lock could have changed,
		 * so if it fails loop back and retry.
		 */
		if (!atomic_cmpset_rel_ptr(&sx->sx_lock,
		    SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS,
		    SX_LOCK_UNLOCKED)) {
			sleepq_release(&sx->lock_object);
			continue;
		}
		if (LOCK_LOG_TEST(&sx->lock_object, 0))
			CTR2(KTR_LOCK, "%s: %p waking up all thread on"
			    "exclusive queue", __func__, sx);
		wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX,
		    0, SQ_EXCLUSIVE_QUEUE);
		sleepq_release(&sx->lock_object);
		if (wakeup_swapper)
			kick_proc0();
		break;
	}
}