Exemplo n.º 1
0
void
alpha_ipi_discard_fpu(struct cpu_info *ci, struct trapframe *framep)
{
    if (ci->ci_flags & CPUF_FPUSAVE)
        return;
    fpusave_cpu(ci, 0);
}
Exemplo n.º 2
0
void
alpha_ipi_halt(struct cpu_info *ci, struct trapframe *framep)
{
    SCHED_ASSERT_UNLOCKED();
    fpusave_cpu(ci, 1);
    (void)splhigh();

    cpu_halt();
    /* NOTREACHED */
}
Exemplo n.º 3
0
void
fpusave_lwp(struct lwp *l, bool save)
{
#ifdef MULTIPROCESSOR
    volatile struct cpu_info *ci;

    if (l == fplwp) {
        int s = intr_disable();
        fpusave_cpu(save);
        intr_restore(s);
        return;
    }

    for (ci = cpus; ci != NULL; ci = ci->ci_next) {
        int spincount;

        if (ci == curcpu() || !CPUSET_HAS(cpus_active, ci->ci_index))
            continue;
        if (ci->ci_fplwp != l)
            continue;
        sparc64_send_ipi(ci->ci_cpuid, save ?
                         sparc64_ipi_save_fpstate :
                         sparc64_ipi_drop_fpstate, (uintptr_t)l, 0);

        spincount = 0;
        while (ci->ci_fplwp == l) {
            membar_Sync();
            spincount++;
            if (spincount > 10000000)
                panic("fpusave_lwp ipi didn't");
        }
        break;
    }
#else
    if (l == fplwp)
        fpusave_cpu(save);
#endif
}
Exemplo n.º 4
0
void
x86_64_ipi_halt(struct cpu_info *ci)
{
	SCHED_ASSERT_UNLOCKED();
	KASSERT(!__mp_lock_held(&kernel_lock));
	
	fpusave_cpu(ci, 1);
	disable_intr();
	lapic_disable();
	wbinvd();
	ci->ci_flags &= ~CPUF_RUNNING;
	wbinvd();

	for(;;) {
		__asm volatile("hlt");
	}
}
Exemplo n.º 5
0
/*
 * Save l's FPU state, which may be on this processor or another processor.
 * It may take some time, so we avoid disabling preemption where possible.
 * Caller must know that the target LWP is stopped, otherwise this routine
 * may race against it.
 */
void
fpusave_lwp(struct lwp *l, bool save)
{
	struct cpu_info *oci;
	struct pcb *pcb;
	int s, spins, ticks;

	spins = 0;
	ticks = hardclock_ticks;
	for (;;) {
		s = splhigh();
		pcb = lwp_getpcb(l);
		oci = pcb->pcb_fpcpu;
		if (oci == NULL) {
			splx(s);
			break;
		}
		if (oci == curcpu()) {
			KASSERT(oci->ci_fpcurlwp == l);
			fpusave_cpu(save);
			splx(s);
			break;
		}
		splx(s);
#ifdef XEN
		if (xen_send_ipi(oci, XEN_IPI_SYNCH_FPU) != 0) {
			panic("xen_send_ipi(%s, XEN_IPI_SYNCH_FPU) failed.",
			    cpu_name(oci));
		}
#else /* XEN */
		x86_send_ipi(oci, X86_IPI_SYNCH_FPU);
#endif
		while (pcb->pcb_fpcpu == oci && ticks == hardclock_ticks) {
			x86_pause();
			spins++;
		}
		if (spins > 100000000) {
			panic("fpusave_lwp: did not");
		}
	}

	if (!save) {
		/* Ensure we restart with a clean slate. */
	 	l->l_md.md_flags &= ~MDL_USEDFPU;
	}
}
Exemplo n.º 6
0
void
x86_64_ipi_synch_fpu(struct cpu_info *ci)
{
	if (ci->ci_fpsaveproc == ci->ci_fpcurproc)
		fpusave_cpu(ci, 1);
}
Exemplo n.º 7
0
/*
 * Implement device not available (DNA) exception
 *
 * If we were the last lwp to use the FPU, we can simply return.
 * Otherwise, we save the previous state, if necessary, and restore
 * our last saved state.
 */
void
fpudna(struct cpu_info *ci)
{
	uint16_t cw;
	uint32_t mxcsr;
	struct lwp *l, *fl;
	struct pcb *pcb;
	int s;

	if (ci->ci_fpsaving) {
		/* Recursive trap. */
		x86_enable_intr();
		return;
	}

	/* Lock out IPIs and disable preemption. */
	s = splhigh();
	x86_enable_intr();

	/* Save state on current CPU. */
	l = ci->ci_curlwp;
	pcb = lwp_getpcb(l);
	fl = ci->ci_fpcurlwp;
	if (fl != NULL) {
		/*
		 * It seems we can get here on Xen even if we didn't
		 * switch lwp.  In this case do nothing
		 */
		if (fl == l) {
			KASSERT(pcb->pcb_fpcpu == ci);
			clts();
			splx(s);
			return;
		}
		KASSERT(fl != l);
		fpusave_cpu(true);
		KASSERT(ci->ci_fpcurlwp == NULL);
	}

	/* Save our state if on a remote CPU. */
	if (pcb->pcb_fpcpu != NULL) {
		/* Explicitly disable preemption before dropping spl. */
		KPREEMPT_DISABLE(l);
		splx(s);
		fpusave_lwp(l, true);
		KASSERT(pcb->pcb_fpcpu == NULL);
		s = splhigh();
		KPREEMPT_ENABLE(l);
	}

	/*
	 * Restore state on this CPU, or initialize.  Ensure that
	 * the entire update is atomic with respect to FPU-sync IPIs.
	 */
	clts();
	ci->ci_fpcurlwp = l;
	pcb->pcb_fpcpu = ci;
	if ((l->l_md.md_flags & MDL_USEDFPU) == 0) {
		fninit();
		cw = pcb->pcb_savefpu.fp_fxsave.fx_fcw;
		fldcw(&cw);
		mxcsr = pcb->pcb_savefpu.fp_fxsave.fx_mxcsr;
		x86_ldmxcsr(&mxcsr);
		l->l_md.md_flags |= MDL_USEDFPU;
	} else {
		/*
		 * AMD FPU's do not restore FIP, FDP, and FOP on fxrstor,
		 * leaking other process's execution history. Clear them
		 * manually.
		 */
		static const double zero = 0.0;
		int status;
		/*
		 * Clear the ES bit in the x87 status word if it is currently
		 * set, in order to avoid causing a fault in the upcoming load.
		 */
		fnstsw(&status);
		if (status & 0x80)
			fnclex();
		/*
		 * Load the dummy variable into the x87 stack.  This mangles
		 * the x87 stack, but we don't care since we're about to call
		 * fxrstor() anyway.
		 */
		fldummy(&zero);
		fxrstor(&pcb->pcb_savefpu);
	}

	KASSERT(ci == curcpu());
	splx(s);
}