Esempio n. 1
0
/*
 * Re-evaluate the _CST object when we are notified that it changed.
 */
static void
acpi_cst_notify(device_t dev)
{
    struct acpi_cst_softc *sc = device_get_softc(dev);

    KASSERT(curthread->td_type != TD_TYPE_NETISR,
        ("notify in netisr%d", mycpuid));

    lwkt_serialize_enter(&acpi_cst_slize);

    /* Update the list of Cx states. */
    acpi_cst_cx_reprobe_cst(sc);
    acpi_cst_support_list(sc);

    /* Update the new lowest useable Cx state for all CPUs. */
    acpi_cst_global_cx_count();

    /*
     * Fix up the lowest Cx being used
     */
    if (acpi_cst_cx_lowest_req < acpi_cst_cx_count)
	acpi_cst_cx_lowest = acpi_cst_cx_lowest_req;
    if (acpi_cst_cx_lowest > acpi_cst_cx_count - 1)
	acpi_cst_cx_lowest = acpi_cst_cx_count - 1;

    lwkt_serialize_exit(&acpi_cst_slize);
}
Esempio n. 2
0
/*
 * Re-evaluate the _CST object when we are notified that it changed.
 */
static void
acpi_cst_notify(device_t dev)
{
    struct acpi_cst_softc *sc = device_get_softc(dev);

    cpuhelper_assert(mycpuid, false);

    lwkt_serialize_enter(&acpi_cst_slize);

    /* Update the list of Cx states. */
    acpi_cst_cx_reprobe_cst(sc);
    acpi_cst_support_list(sc);

    /* Update the new lowest useable Cx state for all CPUs. */
    acpi_cst_global_cx_count();

    /*
     * Fix up the lowest Cx being used
     */
    if (acpi_cst_cx_lowest_req < acpi_cst_cx_count)
	acpi_cst_cx_lowest = acpi_cst_cx_lowest_req;
    if (acpi_cst_cx_lowest > acpi_cst_cx_count - 1)
	acpi_cst_cx_lowest = acpi_cst_cx_count - 1;

    lwkt_serialize_exit(&acpi_cst_slize);
}
Esempio n. 3
0
/*
 * Call this *after* all CPUs Cx states have been attached.
 */
static void
acpi_cst_postattach(void *arg)
{
    struct acpi_cst_softc *sc;
    int i;

    /* Get set of Cx state devices */
    devclass_get_devices(acpi_cst_devclass, &acpi_cst_devices,
	&acpi_cst_ndevices);

    /*
     * Setup any quirks that might necessary now that we have probed
     * all the CPUs' Cx states.
     */
    acpi_cst_set_quirks();

    if (acpi_cst_use_fadt) {
	/*
	 * We are using Cx mode from FADT, probe for available Cx states
	 * for all processors.
	 */
	for (i = 0; i < acpi_cst_ndevices; i++) {
	    sc = device_get_softc(acpi_cst_devices[i]);
	    acpi_cst_cx_probe_fadt(sc);
	}
    } else {
	/*
	 * We are using _CST mode, remove C3 state if necessary.
	 *
	 * As we now know for sure that we will be using _CST mode
	 * install our notify handler.
	 */
	for (i = 0; i < acpi_cst_ndevices; i++) {
	    sc = device_get_softc(acpi_cst_devices[i]);
	    if (acpi_cst_quirks & ACPI_CST_QUIRK_NO_C3) {
		/* Free part of unused resources */
		acpi_cst_free_resource(sc, sc->cst_non_c3 + 1);
		sc->cst_cx_count = sc->cst_non_c3 + 1;
	    }
	    sc->cst_parent->cpu_cst_notify = acpi_cst_notify;
	}
    }
    acpi_cst_global_cx_count();

    /* Perform Cx final initialization. */
    for (i = 0; i < acpi_cst_ndevices; i++) {
	sc = device_get_softc(acpi_cst_devices[i]);
	acpi_cst_startup(sc);

	if (sc->cst_parent->glob_sysctl_tree != NULL) {
	    struct acpi_cpu_softc *cpu = sc->cst_parent;

	    /* Add a sysctl handler to handle global Cx lowest setting */
	    SYSCTL_ADD_PROC(&cpu->glob_sysctl_ctx,
	    		    SYSCTL_CHILDREN(cpu->glob_sysctl_tree),
			    OID_AUTO, "cx_lowest",
			    CTLTYPE_STRING | CTLFLAG_RW, NULL, 0,
			    acpi_cst_global_lowest_sysctl, "A",
			    "Requested global lowest Cx sleep state");
	    SYSCTL_ADD_PROC(&cpu->glob_sysctl_ctx,
	    		    SYSCTL_CHILDREN(cpu->glob_sysctl_tree),
			    OID_AUTO, "cx_lowest_use",
			    CTLTYPE_STRING | CTLFLAG_RD, NULL, 0,
			    acpi_cst_global_lowest_use_sysctl, "A",
			    "Global lowest Cx sleep state to use");
	}
    }

    /* Take over idling from cpu_idle_default(). */
    acpi_cst_cx_lowest = 0;
    acpi_cst_cx_lowest_req = 0;
    acpi_cst_disable_idle = FALSE;

    cpu_sfence();
    cpu_idle_hook = acpi_cst_idle;
}