Example #1
0
/*
 * Make sure any stale selectors are cleared from the segment registers
 * by putting KDS_SEL (the kernel's default %ds gdt selector) into them.
 * This is necessary because the kernel itself does not use %es, %fs, nor
 * %ds. (%cs and %ss are necessary, and are set up by the kernel - along with
 * %gs - to point to the current cpu struct.) If we enter kmdb while in the
 * kernel and resume with a stale ldt or brandz selector sitting there in a
 * segment register, kmdb will #gp fault if the stale selector points to,
 * for example, an ldt in the context of another process.
 *
 * WARNING: Intel and AMD chips behave differently when storing
 * the null selector into %fs and %gs while in long mode. On AMD
 * chips fsbase and gsbase are not cleared. But on Intel chips, storing
 * a null selector into %fs or %gs has the side effect of clearing
 * fsbase or gsbase. For that reason we use KDS_SEL, which has
 * consistent behavor between AMD and Intel.
 *
 * Caller responsible for preventing cpu migration.
 */
void
reset_sregs(void)
{
	ulong_t kgsbase = (ulong_t)CPU;

	ASSERT(curthread->t_preempt != 0 || getpil() >= DISP_LEVEL);

	cli();
	__set_gs(KGS_SEL);

	/*
	 * restore kernel gsbase
	 */
#if defined(__xpv)
	xen_set_segment_base(SEGBASE_GS_KERNEL, kgsbase);
#else
	wrmsr(MSR_AMD_GSBASE, kgsbase);
#endif

	sti();

	__set_ds(KDS_SEL);
	__set_es(0 | SEL_KPL);	/* selector RPL not ring 0 on hypervisor */
	__set_fs(KFS_SEL);
}
Example #2
0
static void
i_cpr_xcall(xcfunc_t func)
{
	uint_t pil, reset_pil;

	pil = getpil();
	if (pil < XCALL_PIL)
		reset_pil = 0;
	else {
		reset_pil = 1;
		setpil(XCALL_PIL - 1);
	}
	xc_some(cpu_ready_set, func, 0, 0);
	if (reset_pil)
		setpil(pil);
}
Example #3
0
/*
 * Gets called when softcall queue is not moving forward. We choose
 * a CPU and poke except the ones which are already poked.
 */
static int
softcall_choose_cpu()
{
	cpu_t *cplist = CPU;
	cpu_t *cp;
	int intr_load = INT_MAX;
	int cpuid = -1;
	cpuset_t poke;
	int s;

	ASSERT(getpil() >= DISP_LEVEL);
	ASSERT(ncpus > 1);
	ASSERT(MUTEX_HELD(&softcall_lock));

	CPUSET_ZERO(poke);

	/*
	 * The hint is to start from current CPU.
	 */
	cp = cplist;
	do {
		if (CPU_IN_SET(*softcall_cpuset, cp->cpu_id) ||
		    (cp->cpu_flags & CPU_ENABLE) == 0)
			continue;

		/* if CPU is not busy */
		if (cp->cpu_intrload == 0) {
			cpuid = cp->cpu_id;
			break;
		}

		if (cp->cpu_intrload < intr_load) {
			cpuid = cp->cpu_id;
			intr_load = cp->cpu_intrload;
		} else if (cp->cpu_intrload == intr_load) {
			/*
			 * We want to poke CPUs having similar
			 * load because we don't know which CPU is
			 * can acknowledge level1 interrupt. The
			 * list of such CPUs should not be large.
			 */
			if (cpuid != -1) {
				/*
				 * Put the last CPU chosen because
				 * it also has same interrupt load.
				 */
				CPUSET_ADD(poke, cpuid);
				cpuid = -1;
			}

			CPUSET_ADD(poke, cp->cpu_id);
		}
	} while ((cp = cp->cpu_next_onln) != cplist);

	/* if we found a CPU which suits best to poke */
	if (cpuid != -1) {
		CPUSET_ZERO(poke);
		CPUSET_ADD(poke, cpuid);
	}

	if (CPUSET_ISNULL(poke)) {
		mutex_exit(&softcall_lock);
		return (0);
	}

	/*
	 * We first set the bit in cpuset and then poke.
	 */
	CPUSET_XOR(*softcall_cpuset, poke);
	mutex_exit(&softcall_lock);

	/*
	 * If softcall() was called at low pil then we may
	 * get preempted before we raise PIL. It should be okay
	 * because we are just going to poke CPUs now or at most
	 * another thread may start choosing CPUs in this routine.
	 */
	s = splhigh();
	siron_poke_cpu(poke);
	splx(s);
	return (1);
}
Example #4
0
/*
 * Gets called when softcall queue is not moving forward. We choose
 * a CPU and poke except the ones which are already poked.
 */
static int
softcall_choose_cpu()
{
	cpu_t *cplist = CPU;
	cpu_t *cp;
	int intr_load = INT_MAX;
	int cpuid = -1;
	cpuset_t poke;
	int s;

	ASSERT(getpil() >= DISP_LEVEL);
	ASSERT(ncpus > 1);
	ASSERT(MUTEX_HELD(&softcall_lock));

	CPUSET_ZERO(poke);

	/*
	 * The hint is to start from current CPU.
	 */
	cp = cplist;
	do {
		/*
		 * Don't select this CPU if :
		 *   - in cpuset already
		 *   - CPU is not accepting interrupts
		 *   - CPU is being offlined
		 */
		if (CPU_IN_SET(*softcall_cpuset, cp->cpu_id) ||
		    (cp->cpu_flags & CPU_ENABLE) == 0 ||
		    (cp == cpu_inmotion))
			continue;
#if defined(__x86)
		/*
		 * Don't select this CPU if a hypervisor indicates it
		 * isn't currently scheduled onto a physical cpu.  We are
		 * looking for a cpu that can respond quickly and the time
		 * to get the virtual cpu scheduled and switched to running
		 * state is likely to be relatively lengthy.
		 */
		if (vcpu_on_pcpu(cp->cpu_id) == VCPU_NOT_ON_PCPU)
			continue;
#endif	/* __x86 */

		/* if CPU is not busy */
		if (cp->cpu_intrload == 0) {
			cpuid = cp->cpu_id;
			break;
		}

		if (cp->cpu_intrload < intr_load) {
			cpuid = cp->cpu_id;
			intr_load = cp->cpu_intrload;
		} else if (cp->cpu_intrload == intr_load) {
			/*
			 * We want to poke CPUs having similar
			 * load because we don't know which CPU is
			 * can acknowledge level1 interrupt. The
			 * list of such CPUs should not be large.
			 */
			if (cpuid != -1) {
				/*
				 * Put the last CPU chosen because
				 * it also has same interrupt load.
				 */
				CPUSET_ADD(poke, cpuid);
				cpuid = -1;
			}

			CPUSET_ADD(poke, cp->cpu_id);
		}
	} while ((cp = cp->cpu_next_onln) != cplist);

	/* if we found a CPU which suits best to poke */
	if (cpuid != -1) {
		CPUSET_ZERO(poke);
		CPUSET_ADD(poke, cpuid);
	}

	if (CPUSET_ISNULL(poke)) {
		mutex_exit(&softcall_lock);
		return (0);
	}

	/*
	 * We first set the bit in cpuset and then poke.
	 */
	CPUSET_XOR(*softcall_cpuset, poke);
	mutex_exit(&softcall_lock);

	/*
	 * If softcall() was called at low pil then we may
	 * get preempted before we raise PIL. It should be okay
	 * because we are just going to poke CPUs now or at most
	 * another thread may start choosing CPUs in this routine.
	 */
	s = splhigh();
	siron_poke_cpu(poke);
	splx(s);
	return (1);
}
Example #5
0
void
kern_preprom(void)
{
	for (;;) {
		/*
		 * Load the current CPU pointer and examine the mutex_ready bit.
		 * It doesn't matter if we are preempted here because we are
		 * only trying to determine if we are in the *set* of mutex
		 * ready CPUs.  We cannot disable preemption until we confirm
		 * that we are running on a CPU in this set, since a call to
		 * kpreempt_disable() requires access to curthread.
		 */
		processorid_t cpuid = getprocessorid();
		cpu_t *cp = cpu[cpuid];
		cpu_t *prcp;

		if (panicstr)
			return; /* just return if we are currently panicking */

		if (CPU_IN_SET(cpu_ready_set, cpuid) && cp->cpu_m.mutex_ready) {
			/*
			 * Disable premption, and reload the current CPU.  We
			 * can't move from a mutex_ready cpu to a non-ready cpu
			 * so we don't need to re-check cp->cpu_m.mutex_ready.
			 */
			kpreempt_disable();
			cp = CPU;
			ASSERT(cp->cpu_m.mutex_ready);

			/*
			 * Try the lock.  If we don't get the lock, re-enable
			 * preemption and see if we should sleep.  If we are
			 * already the lock holder, remove the effect of the
			 * previous kpreempt_disable() before returning since
			 * preemption was disabled by an earlier kern_preprom.
			 */
			prcp = atomic_cas_ptr((void *)&prom_cpu, NULL, cp);
			if (prcp == NULL ||
			    (prcp == cp && prom_thread == curthread)) {
				if (prcp == cp)
					kpreempt_enable();
				break;
			}

			kpreempt_enable();

			/*
			 * We have to be very careful here since both prom_cpu
			 * and prcp->cpu_m.mutex_ready can be changed at any
			 * time by a non mutex_ready cpu holding the lock.
			 * If the owner is mutex_ready, holding prom_mutex
			 * prevents kern_postprom() from completing.  If the
			 * owner isn't mutex_ready, we only know it will clear
			 * prom_cpu before changing cpu_m.mutex_ready, so we
			 * issue a membar after checking mutex_ready and then
			 * re-verify that prom_cpu is still held by the same
			 * cpu before actually proceeding to cv_wait().
			 */
			mutex_enter(&prom_mutex);
			prcp = prom_cpu;
			if (prcp != NULL && prcp->cpu_m.mutex_ready != 0) {
				membar_consumer();
				if (prcp == prom_cpu)
					cv_wait(&prom_cv, &prom_mutex);
			}
			mutex_exit(&prom_mutex);

		} else {
			/*
			 * If we are not yet mutex_ready, just attempt to grab
			 * the lock.  If we get it or already hold it, break.
			 */
			ASSERT(getpil() == PIL_MAX);
			prcp = atomic_cas_ptr((void *)&prom_cpu, NULL, cp);
			if (prcp == NULL || prcp == cp)
				break;
		}
	}

	/*
	 * We now hold the prom_cpu lock.  Increment the hold count by one
	 * and assert our current state before returning to the caller.
	 */
	atomic_inc_32(&prom_holdcnt);
	ASSERT(prom_holdcnt >= 1);
	prom_thread = curthread;
}