Example #1
0
/*
 * Wait specified idle threads to switch once.  This ensures that even
 * preempted threads have cycled through the switch function once,
 * exiting their codepaths.  This allows us to change global pointers
 * with no other synchronization.
 */
int
quiesce_cpus(cpuset_t map, const char *wmesg, int prio)
{
	struct pcpu *pcpu;
	u_int gen[MAXCPU];
	int error;
	int cpu;

	error = 0;
	for (cpu = 0; cpu <= mp_maxid; cpu++) {
		if (!CPU_ISSET(cpu, &map) || CPU_ABSENT(cpu))
			continue;
		pcpu = pcpu_find(cpu);
		gen[cpu] = pcpu->pc_idlethread->td_generation;
	}
	for (cpu = 0; cpu <= mp_maxid; cpu++) {
		if (!CPU_ISSET(cpu, &map) || CPU_ABSENT(cpu))
			continue;
		pcpu = pcpu_find(cpu);
		thread_lock(curthread);
		sched_bind(curthread, cpu);
		thread_unlock(curthread);
		while (gen[cpu] == pcpu->pc_idlethread->td_generation) {
			error = tsleep(quiesce_cpus, prio, wmesg, 1);
			if (error)
				goto out;
		}
	}
out:
	thread_lock(curthread);
	sched_unbind(curthread);
	thread_unlock(curthread);

	return (error);
}
Example #2
0
/*
 * Go to Px-state on all cpus considering the limit.
 */
static int
hwpstate_goto_pstate(device_t dev, int pstate)
{
	int i;
	uint64_t msr;
	int j;
	int limit;
	int id = pstate;
	int error;
	
	/* get the current pstate limit */
	msr = rdmsr(MSR_AMD_10H_11H_LIMIT);
	limit = AMD_10H_11H_GET_PSTATE_LIMIT(msr);
	if(limit > id)
		id = limit;

	/*
	 * We are going to the same Px-state on all cpus.
	 * Probably should take _PSD into account.
	 */
	error = 0;
	CPU_FOREACH(i) {
		/* Bind to each cpu. */
		thread_lock(curthread);
		sched_bind(curthread, i);
		thread_unlock(curthread);
		HWPSTATE_DEBUG(dev, "setting P%d-state on cpu%d\n",
			id, PCPU_GET(cpuid));
		/* Go To Px-state */
		wrmsr(MSR_AMD_10H_11H_CONTROL, id);
	}
	CPU_FOREACH(i) {
		/* Bind to each cpu. */
		thread_lock(curthread);
		sched_bind(curthread, i);
		thread_unlock(curthread);
		/* wait loop (100*100 usec is enough ?) */
		for(j = 0; j < 100; j++){
			/* get the result. not assure msr=id */
			msr = rdmsr(MSR_AMD_10H_11H_STATUS);
			if(msr == id){
				break;
			}
			DELAY(100);
		}
		HWPSTATE_DEBUG(dev, "result  P%d-state on cpu%d\n",
		    (int)msr, PCPU_GET(cpuid));
		if (msr != id) {
			HWPSTATE_DEBUG(dev, "error: loop is not enough.\n");
			error = ENXIO;
		}
	}
	thread_lock(curthread);
	sched_unbind(curthread);
	thread_unlock(curthread);
	return (error);
}
Example #3
0
static void
restore_cpu(int oldcpu, int is_bound, struct thread *td)
{

	KASSERT(oldcpu >= 0 && oldcpu < mp_ncpus && cpu_enabled(oldcpu),
	    ("[cpuctl,%d]: bad cpu number %d", __LINE__, oldcpu));
	thread_lock(td);
	if (is_bound == 0)
		sched_unbind(td);
	else
		sched_bind(td, oldcpu);
	thread_unlock(td);
}
Example #4
0
int
bman_pool_destroy(t_Handle pool)
{
	struct bman_softc *sc;

	sc = bman_sc;
	thread_lock(curthread);
	sched_bind(curthread, sc->sc_bpool_cpu[BM_POOL_GetId(pool)]);
	thread_unlock(curthread);

	BM_POOL_Free(pool);

	thread_lock(curthread);
	sched_unbind(curthread);
	thread_unlock(curthread);

	return (0);
}
Example #5
0
t_Error
qman_fqr_free(t_Handle fqr)
{
	struct qman_softc *sc;
	t_Error error;

	sc = qman_sc;
	thread_lock(curthread);
	sched_bind(curthread, sc->sc_fqr_cpu[QM_FQR_GetFqid(fqr)]);
	thread_unlock(curthread);

	error = QM_FQR_Free(fqr);

	thread_lock(curthread);
	sched_unbind(curthread);
	thread_unlock(curthread);

	return (error);
}
Example #6
0
int
bman_portals_detach(device_t dev)
{
    struct dpaa_portals_softc *sc;
    int i;

    bp_sc = NULL;
    sc = device_get_softc(dev);

    for (i = 0; i < ARRAY_SIZE(sc->sc_dp); i++) {
        if (sc->sc_dp[i].dp_ph != NULL) {
            thread_lock(curthread);
            sched_bind(curthread, i);
            thread_unlock(curthread);

            BM_PORTAL_Free(sc->sc_dp[i].dp_ph);

            thread_lock(curthread);
            sched_unbind(curthread);
            thread_unlock(curthread);
        }

        if (sc->sc_dp[i].dp_ires != NULL) {
            XX_DeallocIntr((int)sc->sc_dp[i].dp_ires);
            bus_release_resource(dev, SYS_RES_IRQ,
                                 sc->sc_dp[i].dp_irid, sc->sc_dp[i].dp_ires);
        }
    }
    for (i = 0; i < ARRAY_SIZE(sc->sc_rres); i++) {
        if (sc->sc_rres[i] != NULL)
            bus_release_resource(dev, SYS_RES_MEMORY,
                                 sc->sc_rrid[i],
                                 sc->sc_rres[i]);
    }

    return (0);
}
Example #7
0
static void
xctrl_suspend()
{
#ifdef SMP
	cpuset_t cpu_suspend_map;
#endif
	int suspend_cancelled;

	EVENTHANDLER_INVOKE(power_suspend);

	if (smp_started) {
		thread_lock(curthread);
		sched_bind(curthread, 0);
		thread_unlock(curthread);
	}
	KASSERT((PCPU_GET(cpuid) == 0), ("Not running on CPU#0"));

	/*
	 * Clear our XenStore node so the toolstack knows we are
	 * responding to the suspend request.
	 */
	xs_write(XST_NIL, "control", "shutdown", "");

	/*
	 * Be sure to hold Giant across DEVICE_SUSPEND/RESUME since non-MPSAFE
	 * drivers need this.
	 */
	mtx_lock(&Giant);
	if (DEVICE_SUSPEND(root_bus) != 0) {
		mtx_unlock(&Giant);
		printf("%s: device_suspend failed\n", __func__);
		return;
	}
	mtx_unlock(&Giant);

#ifdef SMP
	CPU_ZERO(&cpu_suspend_map);	/* silence gcc */
	if (smp_started) {
		/*
		 * Suspend other CPUs. This prevents IPIs while we
		 * are resuming, and will allow us to reset per-cpu
		 * vcpu_info on resume.
		 */
		cpu_suspend_map = all_cpus;
		CPU_CLR(PCPU_GET(cpuid), &cpu_suspend_map);
		if (!CPU_EMPTY(&cpu_suspend_map))
			suspend_cpus(cpu_suspend_map);
	}
#endif

	/*
	 * Prevent any races with evtchn_interrupt() handler.
	 */
	disable_intr();
	intr_suspend();
	xen_hvm_suspend();

	suspend_cancelled = HYPERVISOR_suspend(0);

	xen_hvm_resume(suspend_cancelled != 0);
	intr_resume(suspend_cancelled != 0);
	enable_intr();

	/*
	 * Reset grant table info.
	 */
	gnttab_resume(NULL);

#ifdef SMP
	/* Send an IPI_BITMAP in case there are pending bitmap IPIs. */
	lapic_ipi_vectored(IPI_BITMAP_VECTOR, APIC_IPI_DEST_ALL);
	if (smp_started && !CPU_EMPTY(&cpu_suspend_map)) {
		/*
		 * Now that event channels have been initialized,
		 * resume CPUs.
		 */
		resume_cpus(cpu_suspend_map);
	}
#endif

	/*
	 * FreeBSD really needs to add DEVICE_SUSPEND_CANCEL or
	 * similar.
	 */
	mtx_lock(&Giant);
	DEVICE_RESUME(root_bus);
	mtx_unlock(&Giant);

	if (smp_started) {
		thread_lock(curthread);
		sched_unbind(curthread);
		thread_unlock(curthread);
	}

	EVENTHANDLER_INVOKE(power_resume);

	if (bootverbose)
		printf("System resumed after suspension\n");

}
Example #8
0
/* Full PV mode suspension. */
static void
xctrl_suspend()
{
	int i, j, k, fpp, suspend_cancelled;
	unsigned long max_pfn, start_info_mfn;

	EVENTHANDLER_INVOKE(power_suspend);

#ifdef SMP
	struct thread *td;
	cpuset_t map;
	u_int cpuid;

	/*
	 * Bind us to CPU 0 and stop any other VCPUs.
	 */
	td = curthread;
	thread_lock(td);
	sched_bind(td, 0);
	thread_unlock(td);
	cpuid = PCPU_GET(cpuid);
	KASSERT(cpuid == 0, ("xen_suspend: not running on cpu 0"));

	map = all_cpus;
	CPU_CLR(cpuid, &map);
	CPU_NAND(&map, &stopped_cpus);
	if (!CPU_EMPTY(&map))
		stop_cpus(map);
#endif

	/*
	 * Be sure to hold Giant across DEVICE_SUSPEND/RESUME since non-MPSAFE
	 * drivers need this.
	 */
	mtx_lock(&Giant);
	if (DEVICE_SUSPEND(root_bus) != 0) {
		mtx_unlock(&Giant);
		printf("%s: device_suspend failed\n", __func__);
#ifdef SMP
		if (!CPU_EMPTY(&map))
			restart_cpus(map);
#endif
		return;
	}
	mtx_unlock(&Giant);

	local_irq_disable();

	xencons_suspend();
	gnttab_suspend();
	intr_suspend();

	max_pfn = HYPERVISOR_shared_info->arch.max_pfn;

	void *shared_info = HYPERVISOR_shared_info;
	HYPERVISOR_shared_info = NULL;
	pmap_kremove((vm_offset_t) shared_info);
	PT_UPDATES_FLUSH();

	xen_start_info->store_mfn = MFNTOPFN(xen_start_info->store_mfn);
	xen_start_info->console.domU.mfn = MFNTOPFN(xen_start_info->console.domU.mfn);

	/*
	 * We'll stop somewhere inside this hypercall. When it returns,
	 * we'll start resuming after the restore.
	 */
	start_info_mfn = VTOMFN(xen_start_info);
	pmap_suspend();
	suspend_cancelled = HYPERVISOR_suspend(start_info_mfn);
	pmap_resume();

	pmap_kenter_ma((vm_offset_t) shared_info, xen_start_info->shared_info);
	HYPERVISOR_shared_info = shared_info;

	HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
		VTOMFN(xen_pfn_to_mfn_frame_list_list);
  
	fpp = PAGE_SIZE/sizeof(unsigned long);
	for (i = 0, j = 0, k = -1; i < max_pfn; i += fpp, j++) {
		if ((j % fpp) == 0) {
			k++;
			xen_pfn_to_mfn_frame_list_list[k] = 
				VTOMFN(xen_pfn_to_mfn_frame_list[k]);
			j = 0;
		}
		xen_pfn_to_mfn_frame_list[k][j] = 
			VTOMFN(&xen_phys_machine[i]);
	}
	HYPERVISOR_shared_info->arch.max_pfn = max_pfn;

	gnttab_resume();
	intr_resume(suspend_cancelled != 0);
	local_irq_enable();
	xencons_resume();

#ifdef CONFIG_SMP
	for_each_cpu(i)
		vcpu_prepare(i);

#endif

	/* 
	 * Only resume xenbus /after/ we've prepared our VCPUs; otherwise
	 * the VCPU hotplug callback can race with our vcpu_prepare
	 */
	mtx_lock(&Giant);
	DEVICE_RESUME(root_bus);
	mtx_unlock(&Giant);

#ifdef SMP
	thread_lock(curthread);
	sched_unbind(curthread);
	thread_unlock(curthread);
	if (!CPU_EMPTY(&map))
		restart_cpus(map);
#endif
	EVENTHANDLER_INVOKE(power_resume);
}