Пример #1
0
void
mutex_destroy(kmutex_t *mp)
{
	mutex_impl_t *lp = (mutex_impl_t *)mp;

	if (lp->m_owner == 0 && !MUTEX_HAS_WAITERS(lp)) {
		MUTEX_DESTROY(lp);
	} else if (MUTEX_TYPE_SPIN(lp)) {
		LOCKSTAT_RECORD0(LS_MUTEX_DESTROY_RELEASE, lp);
		MUTEX_DESTROY(lp);
	} else if (MUTEX_TYPE_ADAPTIVE(lp)) {
		LOCKSTAT_RECORD0(LS_MUTEX_DESTROY_RELEASE, lp);
		if (MUTEX_OWNER(lp) != curthread)
			mutex_panic("mutex_destroy: not owner", lp);
		if (MUTEX_HAS_WAITERS(lp)) {
			turnstile_t *ts = turnstile_lookup(lp);
			turnstile_exit(lp);
			if (ts != NULL)
				mutex_panic("mutex_destroy: has waiters", lp);
		}
		MUTEX_DESTROY(lp);
	} else {
		mutex_panic("mutex_destroy: bad mutex", lp);
	}
}
Пример #2
0
/*
 * Try to do a non-blocking upgrade from a shared lock to an exclusive lock.
 * This will only succeed if this thread holds a single shared lock.
 * Return 1 if if the upgrade succeed, 0 otherwise.
 */
int
sx_try_upgrade_(struct sx *sx, const char *file, int line)
{
	uintptr_t x;
	int success;

	if (SCHEDULER_STOPPED())
		return (1);

	KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
	    ("sx_try_upgrade() of destroyed sx @ %s:%d", file, line));
	_sx_assert(sx, SA_SLOCKED, file, line);

	/*
	 * Try to switch from one shared lock to an exclusive lock.  We need
	 * to maintain the SX_LOCK_EXCLUSIVE_WAITERS flag if set so that
	 * we will wake up the exclusive waiters when we drop the lock.
	 */
	x = sx->sx_lock & SX_LOCK_EXCLUSIVE_WAITERS;
	success = atomic_cmpset_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) | x,
	    (uintptr_t)curthread | x);
	LOCK_LOG_TRY("XUPGRADE", &sx->lock_object, 0, success, file, line);
	if (success) {
		WITNESS_UPGRADE(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
		    file, line);
		LOCKSTAT_RECORD0(sx__upgrade, sx);
	}
	return (success);
}
Пример #3
0
/*
 * Downgrade an unrecursed exclusive lock into a single shared lock.
 */
void
_sx_downgrade(struct sx *sx, const char *file, int line)
{
	uintptr_t x;
	int wakeup_swapper;

	KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
	    ("sx_downgrade() of destroyed sx @ %s:%d", file, line));
	_sx_assert(sx, SA_XLOCKED | SA_NOTRECURSED, file, line);
#ifndef INVARIANTS
	if (sx_recursed(sx))
		panic("downgrade of a recursed lock");
#endif

	WITNESS_DOWNGRADE(&sx->lock_object, 0, file, line);

	/*
	 * Try to switch from an exclusive lock with no shared waiters
	 * to one sharer with no shared waiters.  If there are
	 * exclusive waiters, we don't need to lock the sleep queue so
	 * long as we preserve the flag.  We do one quick try and if
	 * that fails we grab the sleepq lock to keep the flags from
	 * changing and do it the slow way.
	 *
	 * We have to lock the sleep queue if there are shared waiters
	 * so we can wake them up.
	 */
	x = sx->sx_lock;
	if (!(x & SX_LOCK_SHARED_WAITERS) &&
	    atomic_cmpset_rel_ptr(&sx->sx_lock, x, SX_SHARERS_LOCK(1) |
	    (x & SX_LOCK_EXCLUSIVE_WAITERS))) {
		LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line);
		return;
	}

	/*
	 * Lock the sleep queue so we can read the waiters bits
	 * without any races and wakeup any shared waiters.
	 */
	sleepq_lock(&sx->lock_object);

	/*
	 * Preserve SX_LOCK_EXCLUSIVE_WAITERS while downgraded to a single
	 * shared lock.  If there are any shared waiters, wake them up.
	 */
	wakeup_swapper = 0;
	x = sx->sx_lock;
	atomic_store_rel_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) |
	    (x & SX_LOCK_EXCLUSIVE_WAITERS));
	if (x & SX_LOCK_SHARED_WAITERS)
		wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX,
		    0, SQ_SHARED_QUEUE);
	sleepq_release(&sx->lock_object);

	LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line);
	LOCKSTAT_RECORD0(LS_SX_DOWNGRADE_DOWNGRADE, sx);

	if (wakeup_swapper)
		kick_proc0();
}
Пример #4
0
/*
 * Simple C support for the cases where spin locks miss on the first try.
 */
void
lock_set_spin(lock_t *lp)
{
	int spin_count = 1;
	int backoff;	/* current backoff */
	int backctr;	/* ctr for backoff */

	if (panicstr)
		return;

	if (ncpus == 1)
		panic("lock_set: %p lock held and only one CPU", lp);

	if (&plat_lock_delay) {
		backoff = 0;
	} else {
		backoff = BACKOFF_BASE;
	}

	while (LOCK_HELD(lp) || !lock_spin_try(lp)) {
		if (panicstr)
			return;
		spin_count++;
		/*
		 * Add an exponential backoff delay before trying again
		 * to touch the mutex data structure.
		 * the spin_count test and call to nulldev are to prevent
		 * the compiler optimizer from eliminating the delay loop.
		 */
		if (&plat_lock_delay) {
			plat_lock_delay(&backoff);
		} else {
			/* delay */
			for (backctr = backoff; backctr; backctr--) {
				if (!spin_count) (void) nulldev();
			}

			backoff = backoff << 1;		/* double it */
			if (backoff > BACKOFF_CAP) {
				backoff = BACKOFF_CAP;
			}
			SMT_PAUSE();
		}
	}

	if (spin_count) {
		LOCKSTAT_RECORD(LS_LOCK_SET_SPIN, lp, spin_count);
	}

	LOCKSTAT_RECORD0(LS_LOCK_SET_ACQUIRE, lp);
}
Пример #5
0
/*
 * mutex_vector_exit() is called from mutex_exit() if the lock is not
 * adaptive, has waiters, or is not owned by the current thread (panic).
 */
void
mutex_vector_exit(mutex_impl_t *lp)
{
	turnstile_t *ts;

	if (MUTEX_TYPE_SPIN(lp)) {
		lock_clear_splx(&lp->m_spin.m_spinlock, lp->m_spin.m_oldspl);
		return;
	}

	if (MUTEX_OWNER(lp) != curthread) {
		mutex_panic("mutex_exit: not owner", lp);
		return;
	}

	ts = turnstile_lookup(lp);
	MUTEX_CLEAR_LOCK_AND_WAITERS(lp);
	if (ts == NULL)
		turnstile_exit(lp);
	else
		turnstile_wakeup(ts, TS_WRITER_Q, ts->ts_waiters, NULL);
	LOCKSTAT_RECORD0(LS_MUTEX_EXIT_RELEASE, lp);
}
Пример #6
0
/*
 * Downgrade a write lock into a single read lock.
 */
void
__rw_downgrade(volatile uintptr_t *c, const char *file, int line)
{
	struct rwlock *rw;
	struct turnstile *ts;
	uintptr_t tid, v;
	int rwait, wwait;

	if (SCHEDULER_STOPPED())
		return;

	rw = rwlock2rw(c);

	KASSERT(rw->rw_lock != RW_DESTROYED,
	    ("rw_downgrade() of destroyed rwlock @ %s:%d", file, line));
	__rw_assert(c, RA_WLOCKED | RA_NOTRECURSED, file, line);
#ifndef INVARIANTS
	if (rw_recursed(rw))
		panic("downgrade of a recursed lock");
#endif

	WITNESS_DOWNGRADE(&rw->lock_object, 0, file, line);

	/*
	 * Convert from a writer to a single reader.  First we handle
	 * the easy case with no waiters.  If there are any waiters, we
	 * lock the turnstile and "disown" the lock.
	 */
	tid = (uintptr_t)curthread;
	if (atomic_cmpset_rel_ptr(&rw->rw_lock, tid, RW_READERS_LOCK(1)))
		goto out;

	/*
	 * Ok, we think we have waiters, so lock the turnstile so we can
	 * read the waiter flags without any races.
	 */
	turnstile_chain_lock(&rw->lock_object);
	v = rw->rw_lock & RW_LOCK_WAITERS;
	rwait = v & RW_LOCK_READ_WAITERS;
	wwait = v & RW_LOCK_WRITE_WAITERS;
	MPASS(rwait | wwait);

	/*
	 * Downgrade from a write lock while preserving waiters flag
	 * and give up ownership of the turnstile.
	 */
	ts = turnstile_lookup(&rw->lock_object);
	MPASS(ts != NULL);
	if (!wwait)
		v &= ~RW_LOCK_READ_WAITERS;
	atomic_store_rel_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v);
	/*
	 * Wake other readers if there are no writers pending.  Otherwise they
	 * won't be able to acquire the lock anyway.
	 */
	if (rwait && !wwait) {
		turnstile_broadcast(ts, TS_SHARED_QUEUE);
		turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
	} else
		turnstile_disown(ts);
	turnstile_chain_unlock(&rw->lock_object);
out:
	curthread->td_rw_rlocks++;
	LOCK_LOG_LOCK("WDOWNGRADE", &rw->lock_object, 0, 0, file, line);
	LOCKSTAT_RECORD0(rw__downgrade, rw);
}
Пример #7
0
/*
 * Attempt to do a non-blocking upgrade from a read lock to a write
 * lock.  This will only succeed if this thread holds a single read
 * lock.  Returns true if the upgrade succeeded and false otherwise.
 */
int
__rw_try_upgrade(volatile uintptr_t *c, const char *file, int line)
{
	struct rwlock *rw;
	uintptr_t v, x, tid;
	struct turnstile *ts;
	int success;

	if (SCHEDULER_STOPPED())
		return (1);

	rw = rwlock2rw(c);

	KASSERT(rw->rw_lock != RW_DESTROYED,
	    ("rw_try_upgrade() of destroyed rwlock @ %s:%d", file, line));
	__rw_assert(c, RA_RLOCKED, file, line);

	/*
	 * Attempt to switch from one reader to a writer.  If there
	 * are any write waiters, then we will have to lock the
	 * turnstile first to prevent races with another writer
	 * calling turnstile_wait() before we have claimed this
	 * turnstile.  So, do the simple case of no waiters first.
	 */
	tid = (uintptr_t)curthread;
	success = 0;
	for (;;) {
		v = rw->rw_lock;
		if (RW_READERS(v) > 1)
			break;
		if (!(v & RW_LOCK_WAITERS)) {
			success = atomic_cmpset_ptr(&rw->rw_lock, v, tid);
			if (!success)
				continue;
			break;
		}

		/*
		 * Ok, we think we have waiters, so lock the turnstile.
		 */
		ts = turnstile_trywait(&rw->lock_object);
		v = rw->rw_lock;
		if (RW_READERS(v) > 1) {
			turnstile_cancel(ts);
			break;
		}
		/*
		 * Try to switch from one reader to a writer again.  This time
		 * we honor the current state of the waiters flags.
		 * If we obtain the lock with the flags set, then claim
		 * ownership of the turnstile.
		 */
		x = rw->rw_lock & RW_LOCK_WAITERS;
		success = atomic_cmpset_ptr(&rw->rw_lock, v, tid | x);
		if (success) {
			if (x)
				turnstile_claim(ts);
			else
				turnstile_cancel(ts);
			break;
		}
		turnstile_cancel(ts);
	}
	LOCK_LOG_TRY("WUPGRADE", &rw->lock_object, 0, success, file, line);
	if (success) {
		curthread->td_rw_rlocks--;
		WITNESS_UPGRADE(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
		    file, line);
		LOCKSTAT_RECORD0(rw__upgrade, rw);
	}
	return (success);
}
Пример #8
0
/*
 * mutex_vector_enter() is called from the assembly mutex_enter() routine
 * if the lock is held or is not of type MUTEX_ADAPTIVE.
 */
void
mutex_vector_enter(mutex_impl_t *lp)
{
	kthread_id_t	owner;
	hrtime_t	sleep_time = 0;	/* how long we slept */
	uint_t		spin_count = 0;	/* how many times we spun */
	cpu_t 		*cpup, *last_cpu;
	extern cpu_t	*cpu_list;
	turnstile_t	*ts;
	volatile mutex_impl_t *vlp = (volatile mutex_impl_t *)lp;
	int		backoff;	/* current backoff */
	int		backctr;	/* ctr for backoff */
	int		sleep_count = 0;

	ASSERT_STACK_ALIGNED();

	if (MUTEX_TYPE_SPIN(lp)) {
		lock_set_spl(&lp->m_spin.m_spinlock, lp->m_spin.m_minspl,
		    &lp->m_spin.m_oldspl);
		return;
	}

	if (!MUTEX_TYPE_ADAPTIVE(lp)) {
		mutex_panic("mutex_enter: bad mutex", lp);
		return;
	}

	/*
	 * Adaptive mutexes must not be acquired from above LOCK_LEVEL.
	 * We can migrate after loading CPU but before checking CPU_ON_INTR,
	 * so we must verify by disabling preemption and loading CPU again.
	 */
	cpup = CPU;
	if (CPU_ON_INTR(cpup) && !panicstr) {
		kpreempt_disable();
		if (CPU_ON_INTR(CPU))
			mutex_panic("mutex_enter: adaptive at high PIL", lp);
		kpreempt_enable();
	}

	CPU_STATS_ADDQ(cpup, sys, mutex_adenters, 1);

	if (&plat_lock_delay) {
		backoff = 0;
	} else {
		backoff = BACKOFF_BASE;
	}

	for (;;) {
spin:
		spin_count++;
		/*
		 * Add an exponential backoff delay before trying again
		 * to touch the mutex data structure.
		 * the spin_count test and call to nulldev are to prevent
		 * the compiler optimizer from eliminating the delay loop.
		 */
		if (&plat_lock_delay) {
			plat_lock_delay(&backoff);
		} else {
			for (backctr = backoff; backctr; backctr--) {
				if (!spin_count) (void) nulldev();
			};    /* delay */
			backoff = backoff << 1;			/* double it */
			if (backoff > BACKOFF_CAP) {
				backoff = BACKOFF_CAP;
			}

			SMT_PAUSE();
		}

		if (panicstr)
			return;

		if ((owner = MUTEX_OWNER(vlp)) == NULL) {
			if (mutex_adaptive_tryenter(lp))
				break;
			continue;
		}

		if (owner == curthread)
			mutex_panic("recursive mutex_enter", lp);

		/*
		 * If lock is held but owner is not yet set, spin.
		 * (Only relevant for platforms that don't have cas.)
		 */
		if (owner == MUTEX_NO_OWNER)
			continue;

		/*
		 * When searching the other CPUs, start with the one where
		 * we last saw the owner thread.  If owner is running, spin.
		 *
		 * We must disable preemption at this point to guarantee
		 * that the list doesn't change while we traverse it
		 * without the cpu_lock mutex.  While preemption is
		 * disabled, we must revalidate our cached cpu pointer.
		 */
		kpreempt_disable();
		if (cpup->cpu_next == NULL)
			cpup = cpu_list;
		last_cpu = cpup;	/* mark end of search */
		do {
			if (cpup->cpu_thread == owner) {
				kpreempt_enable();
				goto spin;
			}
		} while ((cpup = cpup->cpu_next) != last_cpu);
		kpreempt_enable();

		/*
		 * The owner appears not to be running, so block.
		 * See the Big Theory Statement for memory ordering issues.
		 */
		ts = turnstile_lookup(lp);
		MUTEX_SET_WAITERS(lp);
		membar_enter();

		/*
		 * Recheck whether owner is running after waiters bit hits
		 * global visibility (above).  If owner is running, spin.
		 *
		 * Since we are at ipl DISP_LEVEL, kernel preemption is
		 * disabled, however we still need to revalidate our cached
		 * cpu pointer to make sure the cpu hasn't been deleted.
		 */
		if (cpup->cpu_next == NULL)
			last_cpu = cpup = cpu_list;
		do {
			if (cpup->cpu_thread == owner) {
				turnstile_exit(lp);
				goto spin;
			}
		} while ((cpup = cpup->cpu_next) != last_cpu);
		membar_consumer();

		/*
		 * If owner and waiters bit are unchanged, block.
		 */
		if (MUTEX_OWNER(vlp) == owner && MUTEX_HAS_WAITERS(vlp)) {
			sleep_time -= gethrtime();
			(void) turnstile_block(ts, TS_WRITER_Q, lp,
			    &mutex_sobj_ops, NULL, NULL);
			sleep_time += gethrtime();
			sleep_count++;
		} else {
			turnstile_exit(lp);
		}
	}

	ASSERT(MUTEX_OWNER(lp) == curthread);

	if (sleep_time != 0) {
		/*
		 * Note, sleep time is the sum of all the sleeping we
		 * did.
		 */
		LOCKSTAT_RECORD(LS_MUTEX_ENTER_BLOCK, lp, sleep_time);
	}

	/*
	 * We do not count a sleep as a spin.
	 */
	if (spin_count > sleep_count)
		LOCKSTAT_RECORD(LS_MUTEX_ENTER_SPIN, lp,
		    spin_count - sleep_count);

	LOCKSTAT_RECORD0(LS_MUTEX_ENTER_ACQUIRE, lp);
}