Ejemplo n.º 1
0
int
acpi_sleep_machdep(struct acpi_softc *sc, int state)
{
	ACPI_STATUS	status;

	if (sc->acpi_wakeaddr == 0ul)
		return (-1);	/* couldn't alloc wake memory */

#ifdef SMP
	suspcpus = all_cpus;
	CPU_CLR(PCPU_GET(cpuid), &suspcpus);
#endif

	if (acpi_resume_beep != 0)
		timer_spkr_acquire();

	AcpiSetFirmwareWakingVector(WAKECODE_PADDR(sc));

	intr_suspend();

	if (savectx(susppcbs[0])) {
		fpususpend(suspfpusave[0]);
#ifdef SMP
		if (!CPU_EMPTY(&suspcpus) &&
		    suspend_cpus(suspcpus) == 0) {
			device_printf(sc->acpi_dev, "Failed to suspend APs\n");
			return (0);	/* couldn't sleep */
		}
#endif

		WAKECODE_FIXUP(resume_beep, uint8_t, (acpi_resume_beep != 0));
		WAKECODE_FIXUP(reset_video, uint8_t, (acpi_reset_video != 0));

		WAKECODE_FIXUP(wakeup_pcb, struct pcb *, susppcbs[0]);
		WAKECODE_FIXUP(wakeup_fpusave, void *, suspfpusave[0]);
		WAKECODE_FIXUP(wakeup_gdt, uint16_t,
		    susppcbs[0]->pcb_gdt.rd_limit);
		WAKECODE_FIXUP(wakeup_gdt + 2, uint64_t,
		    susppcbs[0]->pcb_gdt.rd_base);
		WAKECODE_FIXUP(wakeup_cpu, int, 0);

		/* Call ACPICA to enter the desired sleep state */
		if (state == ACPI_STATE_S4 && sc->acpi_s4bios)
			status = AcpiEnterSleepStateS4bios();
		else
			status = AcpiEnterSleepState(state);

		if (status != AE_OK) {
			device_printf(sc->acpi_dev,
			    "AcpiEnterSleepState failed - %s\n",
			    AcpiFormatException(status));
			return (0);	/* couldn't sleep */
		}

		for (;;)
			ia32_pause();
	}

	return (1);	/* wakeup successfully */
}
Ejemplo n.º 2
0
/*
 * Top level routine to direct suspend/resume of a domain.
 */
void
xen_suspend_domain(void)
{
	extern void rtcsync(void);
	extern hrtime_t hres_last_tick;
	mfn_t start_info_mfn;
	ulong_t flags;
	pfn_t pfn;
	int i;

	/*
	 * Check that we are happy to suspend on this hypervisor.
	 */
	if (xen_hypervisor_supports_solaris(XEN_SUSPEND_CHECK) == 0) {
		cpr_err(CE_WARN, "Cannot suspend on this hypervisor "
		    "version: v%lu.%lu%s, need at least version v3.0.4 or "
		    "-xvm based hypervisor", XENVER_CURRENT(xv_major),
		    XENVER_CURRENT(xv_minor), XENVER_CURRENT(xv_ver));
		return;
	}

	/*
	 * XXPV - Are we definitely OK to suspend by the time we've connected
	 * the handler?
	 */

	cpr_err(CE_NOTE, "Domain suspending for save/migrate");

	SUSPEND_DEBUG("xen_suspend_domain\n");

	/*
	 * suspend interrupts and devices
	 * XXPV - we use suspend/resume for both save/restore domains (like sun
	 * cpr) and for migration.  Would be nice to know the difference if
	 * possible.  For save/restore where down time may be a long time, we
	 * may want to do more of the things that cpr does.  (i.e. notify user
	 * processes, shrink memory footprint for faster restore, etc.)
	 */
	xen_suspend_devices();
	SUSPEND_DEBUG("xenbus_suspend\n");
	xenbus_suspend();

	pfn = hat_getpfnum(kas.a_hat, (caddr_t)xen_info);
	start_info_mfn = pfn_to_mfn(pfn);

	/*
	 * XXPV: cpu hotplug can hold this under a xenbus watch. Are we safe
	 * wrt xenbus being suspended here?
	 */
	mutex_enter(&cpu_lock);

	/*
	 * Suspend must be done on vcpu 0, as no context for other CPUs is
	 * saved.
	 *
	 * XXPV - add to taskq API ?
	 */
	thread_affinity_set(curthread, 0);
	kpreempt_disable();

	SUSPEND_DEBUG("xen_start_migrate\n");
	xen_start_migrate();
	if (ncpus > 1)
		suspend_cpus();

	/*
	 * We can grab the ec_lock as it's a spinlock with a high SPL. Hence
	 * any holder would have dropped it to get through suspend_cpus().
	 */
	mutex_enter(&ec_lock);

	/*
	 * From here on in, we can't take locks.
	 */
	SUSPEND_DEBUG("ec_suspend\n");
	ec_suspend();
	SUSPEND_DEBUG("gnttab_suspend\n");
	gnttab_suspend();

	flags = intr_clear();

	xpv_time_suspend();

	/*
	 * Currently, the hypervisor incorrectly fails to bring back
	 * powered-down VCPUs.  Thus we need to record any powered-down VCPUs
	 * to prevent any attempts to operate on them.  But we have to do this
	 * *after* the very first time we do ec_suspend().
	 */
	for (i = 1; i < ncpus; i++) {
		if (cpu[i] == NULL)
			continue;

		if (cpu_get_state(cpu[i]) == P_POWEROFF)
			CPUSET_ATOMIC_ADD(cpu_suspend_lost_set, i);
	}

	/*
	 * The dom0 save/migrate code doesn't automatically translate
	 * these into PFNs, but expects them to be, so we do it here.
	 * We don't use mfn_to_pfn() because so many OS services have
	 * been disabled at this point.
	 */
	xen_info->store_mfn = mfn_to_pfn_mapping[xen_info->store_mfn];
	xen_info->console.domU.mfn =
	    mfn_to_pfn_mapping[xen_info->console.domU.mfn];

	if (CPU->cpu_m.mcpu_vcpu_info->evtchn_upcall_mask == 0) {
		prom_printf("xen_suspend_domain(): "
		    "CPU->cpu_m.mcpu_vcpu_info->evtchn_upcall_mask not set\n");
		(void) HYPERVISOR_shutdown(SHUTDOWN_crash);
	}

	if (HYPERVISOR_update_va_mapping((uintptr_t)HYPERVISOR_shared_info,
	    0, UVMF_INVLPG)) {
		prom_printf("xen_suspend_domain(): "
		    "HYPERVISOR_update_va_mapping() failed\n");
		(void) HYPERVISOR_shutdown(SHUTDOWN_crash);
	}

	SUSPEND_DEBUG("HYPERVISOR_suspend\n");

	/*
	 * At this point we suspend and sometime later resume.
	 */
	if (HYPERVISOR_suspend(start_info_mfn)) {
		prom_printf("xen_suspend_domain(): "
		    "HYPERVISOR_suspend() failed\n");
		(void) HYPERVISOR_shutdown(SHUTDOWN_crash);
	}

	/*
	 * Point HYPERVISOR_shared_info to its new value.
	 */
	if (HYPERVISOR_update_va_mapping((uintptr_t)HYPERVISOR_shared_info,
	    xen_info->shared_info | PT_NOCONSIST | PT_VALID | PT_WRITABLE,
	    UVMF_INVLPG))
		(void) HYPERVISOR_shutdown(SHUTDOWN_crash);

	if (xen_info->nr_pages != mfn_count) {
		prom_printf("xen_suspend_domain(): number of pages"
		    " changed, was 0x%lx, now 0x%lx\n", mfn_count,
		    xen_info->nr_pages);
		(void) HYPERVISOR_shutdown(SHUTDOWN_crash);
	}

	xpv_time_resume();

	cached_max_mfn = 0;

	SUSPEND_DEBUG("gnttab_resume\n");
	gnttab_resume();

	/* XXPV: add a note that this must be lockless. */
	SUSPEND_DEBUG("ec_resume\n");
	ec_resume();

	intr_restore(flags);

	if (ncpus > 1)
		resume_cpus();

	mutex_exit(&ec_lock);
	xen_end_migrate();
	mutex_exit(&cpu_lock);

	/*
	 * Now we can take locks again.
	 */

	/*
	 * Force the tick value used for tv_nsec in hres_tick() to be up to
	 * date. rtcsync() will reset the hrestime value appropriately.
	 */
	hres_last_tick = xpv_gethrtime();

	/*
	 * XXPV: we need to have resumed the CPUs since this takes locks, but
	 * can remote CPUs see bad state? Presumably yes. Should probably nest
	 * taking of todlock inside of cpu_lock, or vice versa, then provide an
	 * unlocked version.  Probably need to call clkinitf to reset cpu freq
	 * and re-calibrate if we migrated to a different speed cpu.  Also need
	 * to make a (re)init_cpu_info call to update processor info structs
	 * and device tree info.  That remains to be written at the moment.
	 */
	rtcsync();

	rebuild_mfn_list();

	SUSPEND_DEBUG("xenbus_resume\n");
	xenbus_resume();
	SUSPEND_DEBUG("xenbus_resume_devices\n");
	xen_resume_devices();

	thread_affinity_clear(curthread);
	kpreempt_enable();

	SUSPEND_DEBUG("finished xen_suspend_domain\n");

	/*
	 * We have restarted our suspended domain, update the hypervisor
	 * details. NB: This must be done at the end of this function,
	 * since we need the domain to be completely resumed before
	 * these functions will work correctly.
	 */
	xen_set_version(XENVER_CURRENT_IDX);

	/*
	 * We can check and report a warning, but we don't stop the
	 * process.
	 */
	if (xen_hypervisor_supports_solaris(XEN_SUSPEND_CHECK) == 0)
		cmn_err(CE_WARN, "Found hypervisor version: v%lu.%lu%s "
		    "but need at least version v3.0.4",
		    XENVER_CURRENT(xv_major), XENVER_CURRENT(xv_minor),
		    XENVER_CURRENT(xv_ver));

	cmn_err(CE_NOTE, "domain restore/migrate completed");
}
Ejemplo n.º 3
0
static void
xctrl_suspend()
{
#ifdef SMP
	cpuset_t cpu_suspend_map;
#endif
	int suspend_cancelled;

	EVENTHANDLER_INVOKE(power_suspend);

	if (smp_started) {
		thread_lock(curthread);
		sched_bind(curthread, 0);
		thread_unlock(curthread);
	}
	KASSERT((PCPU_GET(cpuid) == 0), ("Not running on CPU#0"));

	/*
	 * Clear our XenStore node so the toolstack knows we are
	 * responding to the suspend request.
	 */
	xs_write(XST_NIL, "control", "shutdown", "");

	/*
	 * Be sure to hold Giant across DEVICE_SUSPEND/RESUME since non-MPSAFE
	 * drivers need this.
	 */
	mtx_lock(&Giant);
	if (DEVICE_SUSPEND(root_bus) != 0) {
		mtx_unlock(&Giant);
		printf("%s: device_suspend failed\n", __func__);
		return;
	}
	mtx_unlock(&Giant);

#ifdef SMP
	CPU_ZERO(&cpu_suspend_map);	/* silence gcc */
	if (smp_started) {
		/*
		 * Suspend other CPUs. This prevents IPIs while we
		 * are resuming, and will allow us to reset per-cpu
		 * vcpu_info on resume.
		 */
		cpu_suspend_map = all_cpus;
		CPU_CLR(PCPU_GET(cpuid), &cpu_suspend_map);
		if (!CPU_EMPTY(&cpu_suspend_map))
			suspend_cpus(cpu_suspend_map);
	}
#endif

	/*
	 * Prevent any races with evtchn_interrupt() handler.
	 */
	disable_intr();
	intr_suspend();
	xen_hvm_suspend();

	suspend_cancelled = HYPERVISOR_suspend(0);

	xen_hvm_resume(suspend_cancelled != 0);
	intr_resume(suspend_cancelled != 0);
	enable_intr();

	/*
	 * Reset grant table info.
	 */
	gnttab_resume(NULL);

#ifdef SMP
	/* Send an IPI_BITMAP in case there are pending bitmap IPIs. */
	lapic_ipi_vectored(IPI_BITMAP_VECTOR, APIC_IPI_DEST_ALL);
	if (smp_started && !CPU_EMPTY(&cpu_suspend_map)) {
		/*
		 * Now that event channels have been initialized,
		 * resume CPUs.
		 */
		resume_cpus(cpu_suspend_map);
	}
#endif

	/*
	 * FreeBSD really needs to add DEVICE_SUSPEND_CANCEL or
	 * similar.
	 */
	mtx_lock(&Giant);
	DEVICE_RESUME(root_bus);
	mtx_unlock(&Giant);

	if (smp_started) {
		thread_lock(curthread);
		sched_unbind(curthread);
		thread_unlock(curthread);
	}

	EVENTHANDLER_INVOKE(power_resume);

	if (bootverbose)
		printf("System resumed after suspension\n");

}
Ejemplo n.º 4
0
int
acpi_sleep_machdep(struct acpi_softc *sc, int state)
{
	ACPI_STATUS	status;
	struct pcb	*pcb;

	if (sc->acpi_wakeaddr == 0ul)
		return (-1);	/* couldn't alloc wake memory */

#ifdef SMP
	suspcpus = all_cpus;
	CPU_CLR(PCPU_GET(cpuid), &suspcpus);
#endif

	if (acpi_resume_beep != 0)
		timer_spkr_acquire();

	AcpiSetFirmwareWakingVector(sc->acpi_wakephys, 0);

	intr_suspend();

	pcb = &susppcbs[0]->sp_pcb;
	if (savectx(pcb)) {
#ifdef __amd64__
		fpususpend(susppcbs[0]->sp_fpususpend);
#elif defined(DEV_NPX)
		npxsuspend(susppcbs[0]->sp_fpususpend);
#endif
#ifdef SMP
		if (!CPU_EMPTY(&suspcpus) && suspend_cpus(suspcpus) == 0) {
			device_printf(sc->acpi_dev, "Failed to suspend APs\n");
			return (0);	/* couldn't sleep */
		}
#endif

		WAKECODE_FIXUP(resume_beep, uint8_t, (acpi_resume_beep != 0));
		WAKECODE_FIXUP(reset_video, uint8_t, (acpi_reset_video != 0));

#ifndef __amd64__
		WAKECODE_FIXUP(wakeup_cr4, register_t, pcb->pcb_cr4);
#endif
		WAKECODE_FIXUP(wakeup_pcb, struct pcb *, pcb);
		WAKECODE_FIXUP(wakeup_gdt, uint16_t, pcb->pcb_gdt.rd_limit);
		WAKECODE_FIXUP(wakeup_gdt + 2, uint64_t, pcb->pcb_gdt.rd_base);

		/* Call ACPICA to enter the desired sleep state */
		if (state == ACPI_STATE_S4 && sc->acpi_s4bios)
			status = AcpiEnterSleepStateS4bios();
		else
			status = AcpiEnterSleepState(state);
		if (ACPI_FAILURE(status)) {
			device_printf(sc->acpi_dev,
			    "AcpiEnterSleepState failed - %s\n",
			    AcpiFormatException(status));
			return (0);	/* couldn't sleep */
		}

		for (;;)
			ia32_pause();
	} else {
#ifdef __amd64__
		fpuresume(susppcbs[0]->sp_fpususpend);
#elif defined(DEV_NPX)
		npxresume(susppcbs[0]->sp_fpususpend);
#endif
	}

	return (1);	/* wakeup successfully */
}
Ejemplo n.º 5
0
int
acpi_sleep_machdep(struct acpi_softc *sc, int state)
{
	struct savefpu	*stopfpu;
#ifdef SMP
	cpumask_t	wakeup_cpus;
#endif
	register_t	cr3, rf;
	ACPI_STATUS	status;
	int		ret;

	ret = -1;

	if (sc->acpi_wakeaddr == 0ul)
		return (ret);

#ifdef SMP
	wakeup_cpus = PCPU_GET(other_cpus);
#endif

	AcpiSetFirmwareWakingVector(WAKECODE_PADDR(sc));

	rf = intr_disable();
	intr_suspend();

	/*
	 * Temporarily switch to the kernel pmap because it provides
	 * an identity mapping (setup at boot) for the low physical
	 * memory region containing the wakeup code.
	 */
	cr3 = rcr3();
	load_cr3(KPML4phys);

	stopfpu = &stopxpcbs[0].xpcb_pcb.pcb_save;
	if (acpi_savecpu(&stopxpcbs[0])) {
		fpugetregs(curthread, stopfpu);

#ifdef SMP
		if (wakeup_cpus != 0 && suspend_cpus(wakeup_cpus) == 0) {
			device_printf(sc->acpi_dev,
			    "Failed to suspend APs: CPU mask = 0x%jx\n",
			    (uintmax_t)(wakeup_cpus & ~stopped_cpus));
			goto out;
		}
#endif

		WAKECODE_FIXUP(resume_beep, uint8_t, (acpi_resume_beep != 0));
		WAKECODE_FIXUP(reset_video, uint8_t, (acpi_reset_video != 0));

		WAKECODE_FIXUP(wakeup_xpcb, struct xpcb *, &stopxpcbs[0]);
		WAKECODE_FIXUP(wakeup_gdt, uint16_t,
		    stopxpcbs[0].xpcb_gdt.rd_limit);
		WAKECODE_FIXUP(wakeup_gdt + 2, uint64_t,
		    stopxpcbs[0].xpcb_gdt.rd_base);
		WAKECODE_FIXUP(wakeup_cpu, int, 0);

		/* Call ACPICA to enter the desired sleep state */
		if (state == ACPI_STATE_S4 && sc->acpi_s4bios)
			status = AcpiEnterSleepStateS4bios();
		else
			status = AcpiEnterSleepState(state);

		if (status != AE_OK) {
			device_printf(sc->acpi_dev,
			    "AcpiEnterSleepState failed - %s\n",
			    AcpiFormatException(status));
			goto out;
		}

		for (;;)
			ia32_pause();
	} else {
Ejemplo n.º 6
0
int
acpi_sleep_machdep(struct acpi_softc *sc, int state)
{
	ACPI_STATUS	status;
	struct pcb	*pcb;
#ifdef __amd64__
	struct pcpu *pc;
	int i;
#endif

	if (sc->acpi_wakeaddr == 0ul)
		return (-1);	/* couldn't alloc wake memory */

#ifdef SMP
	suspcpus = all_cpus;
	CPU_CLR(PCPU_GET(cpuid), &suspcpus);
#endif

	if (acpi_resume_beep != 0)
		timer_spkr_acquire();

	AcpiSetFirmwareWakingVector(sc->acpi_wakephys, 0);

	intr_suspend();

	pcb = &susppcbs[0]->sp_pcb;
	if (savectx(pcb)) {
#ifdef __amd64__
		fpususpend(susppcbs[0]->sp_fpususpend);
#else
		npxsuspend(susppcbs[0]->sp_fpususpend);
#endif
#ifdef SMP
		if (!CPU_EMPTY(&suspcpus) && suspend_cpus(suspcpus) == 0) {
			device_printf(sc->acpi_dev, "Failed to suspend APs\n");
			return (0);	/* couldn't sleep */
		}
#endif
#ifdef __amd64__
		hw_ibrs_active = 0;
		hw_ssb_active = 0;
		cpu_stdext_feature3 = 0;
		CPU_FOREACH(i) {
			pc = pcpu_find(i);
			pc->pc_ibpb_set = 0;
		}
#endif

		WAKECODE_FIXUP(resume_beep, uint8_t, (acpi_resume_beep != 0));
		WAKECODE_FIXUP(reset_video, uint8_t, (acpi_reset_video != 0));

#ifdef __amd64__
		WAKECODE_FIXUP(wakeup_efer, uint64_t, rdmsr(MSR_EFER) &
		    ~(EFER_LMA));
#else
		if ((amd_feature & AMDID_NX) != 0)
			WAKECODE_FIXUP(wakeup_efer, uint64_t, rdmsr(MSR_EFER));
		WAKECODE_FIXUP(wakeup_cr4, register_t, pcb->pcb_cr4);
#endif
		WAKECODE_FIXUP(wakeup_pcb, struct pcb *, pcb);
		WAKECODE_FIXUP(wakeup_gdt, uint16_t, pcb->pcb_gdt.rd_limit);
		WAKECODE_FIXUP(wakeup_gdt + 2, uint64_t, pcb->pcb_gdt.rd_base);

#ifdef __i386__
		/*
		 * Map some low memory with virt == phys for ACPI wakecode
		 * to use to jump to high memory after enabling paging. This
		 * is the same as for similar jump in locore, except the
		 * jump is a single instruction, and we know its address
		 * more precisely so only need a single PTD, and we have to
		 * be careful to use the kernel map (PTD[0] is for curthread
		 * which may be a user thread in deprecated APIs).
		 */
		pmap_remap_lowptdi(true);
#endif

		/* Call ACPICA to enter the desired sleep state */
		if (state == ACPI_STATE_S4 && sc->acpi_s4bios)
			status = AcpiEnterSleepStateS4bios();
		else
			status = AcpiEnterSleepState(state);
		if (ACPI_FAILURE(status)) {
			device_printf(sc->acpi_dev,
			    "AcpiEnterSleepState failed - %s\n",
			    AcpiFormatException(status));
			return (0);	/* couldn't sleep */
		}

		if (acpi_susp_bounce)
			resumectx(pcb);

		for (;;)
			ia32_pause();
	} else {