Ejemplo n.º 1
0
void
__mp_lock(struct __mp_lock *mpl)
{
	register_t sr;
	struct cpu_info *ci = curcpu();

	/*
	 * Please notice that mpl_count gets incremented twice for the
	 * first lock. This is on purpose. The way we release the lock
	 * in mp_unlock is to decrement the mpl_count and then check if
	 * the lock should be released. Since mpl_count is what we're
	 * spinning on, decrementing it in mpl_unlock to 0 means that
	 * we can't clear mpl_cpu, because we're no longer holding the
	 * lock. In theory mpl_cpu doesn't need to be cleared, but it's
	 * safer to clear it and besides, setting mpl_count to 2 on the
	 * first lock makes most of this code much simpler.
	 */
	while (1) {
		sr = disableintr();
		if (__cpu_cas(&mpl->mpl_count, 0, 1) == 0) {
			mips_sync();
			mpl->mpl_cpu = ci;
		}

		if (mpl->mpl_cpu == ci) {
			mpl->mpl_count++;
			setsr(sr);
			break;
		}
		setsr(sr);
		
		__mp_lock_spin(mpl);
	}
}
Ejemplo n.º 2
0
void
__mp_lock(struct __mp_lock *mpl)
{
	struct __mp_lock_cpu *cpu = &mpl->mpl_cpus[cpu_number()];
	u_int64_t s;

	s = intr_disable();
	if (cpu->mplc_depth++ == 0)
		cpu->mplc_ticket = atomic_inc_int_nv(&mpl->mpl_users);
	intr_restore(s);

	__mp_lock_spin(mpl, cpu->mplc_ticket);
	sparc_membar(LoadLoad | LoadStore);
}
Ejemplo n.º 3
0
void
__mp_lock(struct __mp_lock *mpl)
{
	struct cpu_info *ci = curcpu();
	uint32_t psr;
	uint gcsr;

	/*
	 * Please notice that mpl_count gets incremented twice for the
	 * first lock. This is on purpose. The way we release the lock
	 * in mp_unlock is to decrement the mpl_count and then check if
	 * the lock should be released. Since mpl_count is what we're
	 * spinning on, decrementing it in mpl_unlock to 0 means that
	 * we can't clear mpl_cpu, because we're no longer holding the
	 * lock. In theory mpl_cpu doesn't need to be cleared, but it's
	 * safer to clear it and besides, setting mpl_count to 2 on the
	 * first lock makes most of this code much simpler.
	 */

	for (;;) {
		psr = (*ci->ci_mp_atomic_begin)(&mpl->mpl_lock, &gcsr);

		if (mpl->mpl_count == 0) {
			mpl->mpl_count = 1;
			mpl->mpl_cpu = ci;
		}
		if (mpl->mpl_cpu == ci) {
			mpl->mpl_count++;
			(*ci->ci_mp_atomic_end)(psr, &mpl->mpl_lock, gcsr);
			break;
		}
		(*ci->ci_mp_atomic_end)(psr, &mpl->mpl_lock, gcsr);

		__mp_lock_spin(mpl);
	}
}