/* * Re-evaluate the _CST object when we are notified that it changed. */ static void acpi_cst_notify(device_t dev) { struct acpi_cst_softc *sc = device_get_softc(dev); KASSERT(curthread->td_type != TD_TYPE_NETISR, ("notify in netisr%d", mycpuid)); lwkt_serialize_enter(&acpi_cst_slize); /* Update the list of Cx states. */ acpi_cst_cx_reprobe_cst(sc); acpi_cst_support_list(sc); /* Update the new lowest useable Cx state for all CPUs. */ acpi_cst_global_cx_count(); /* * Fix up the lowest Cx being used */ if (acpi_cst_cx_lowest_req < acpi_cst_cx_count) acpi_cst_cx_lowest = acpi_cst_cx_lowest_req; if (acpi_cst_cx_lowest > acpi_cst_cx_count - 1) acpi_cst_cx_lowest = acpi_cst_cx_count - 1; lwkt_serialize_exit(&acpi_cst_slize); }
/* * Re-evaluate the _CST object when we are notified that it changed. */ static void acpi_cst_notify(device_t dev) { struct acpi_cst_softc *sc = device_get_softc(dev); cpuhelper_assert(mycpuid, false); lwkt_serialize_enter(&acpi_cst_slize); /* Update the list of Cx states. */ acpi_cst_cx_reprobe_cst(sc); acpi_cst_support_list(sc); /* Update the new lowest useable Cx state for all CPUs. */ acpi_cst_global_cx_count(); /* * Fix up the lowest Cx being used */ if (acpi_cst_cx_lowest_req < acpi_cst_cx_count) acpi_cst_cx_lowest = acpi_cst_cx_lowest_req; if (acpi_cst_cx_lowest > acpi_cst_cx_count - 1) acpi_cst_cx_lowest = acpi_cst_cx_count - 1; lwkt_serialize_exit(&acpi_cst_slize); }
static void acpi_cst_startup(struct acpi_cst_softc *sc) { struct acpi_cpu_softc *cpu = sc->cst_parent; int i, bm_rld_done = 0; for (i = 0; i < sc->cst_cx_count; ++i) { struct acpi_cst_cx *cx = &sc->cst_cx_states[i]; int error; /* If there are C3(+) states, always enable bus master wakeup */ if (cx->type >= ACPI_STATE_C3 && !bm_rld_done && (acpi_cst_quirks & ACPI_CST_QUIRK_NO_BM) == 0) { acpi_cst_c3_bm_rld(sc); bm_rld_done = 1; } /* Redo the Cx setup, since quirks have been changed */ error = acpi_cst_cx_setup(cx); if (error) panic("C%d startup setup failed: %d", i + 1, error); } acpi_cst_support_list(sc); SYSCTL_ADD_STRING(&cpu->pcpu_sysctl_ctx, SYSCTL_CHILDREN(cpu->pcpu_sysctl_tree), OID_AUTO, "cx_supported", CTLFLAG_RD, sc->cst_cx_supported, 0, "Cx/microsecond values for supported Cx states"); SYSCTL_ADD_PROC(&cpu->pcpu_sysctl_ctx, SYSCTL_CHILDREN(cpu->pcpu_sysctl_tree), OID_AUTO, "cx_lowest", CTLTYPE_STRING | CTLFLAG_RW, (void *)sc, 0, acpi_cst_lowest_sysctl, "A", "requested lowest Cx sleep state"); SYSCTL_ADD_PROC(&cpu->pcpu_sysctl_ctx, SYSCTL_CHILDREN(cpu->pcpu_sysctl_tree), OID_AUTO, "cx_lowest_use", CTLTYPE_STRING | CTLFLAG_RD, (void *)sc, 0, acpi_cst_lowest_use_sysctl, "A", "lowest Cx sleep state to use"); SYSCTL_ADD_PROC(&cpu->pcpu_sysctl_ctx, SYSCTL_CHILDREN(cpu->pcpu_sysctl_tree), OID_AUTO, "cx_usage", CTLTYPE_STRING | CTLFLAG_RD, (void *)sc, 0, acpi_cst_usage_sysctl, "A", "percent usage for each Cx state"); #ifdef notyet /* Signal platform that we can handle _CST notification. */ if (!acpi_cst_use_fadt && acpi_cst_ctrl != 0) { ACPI_LOCK(acpi); AcpiOsWritePort(acpi_cst_smi_cmd, acpi_cst_ctrl, 8); ACPI_UNLOCK(acpi); } #endif }