/* * Re-evaluate the _CST object when we are notified that it changed. */ static void acpi_cst_notify(device_t dev) { struct acpi_cst_softc *sc = device_get_softc(dev); KASSERT(curthread->td_type != TD_TYPE_NETISR, ("notify in netisr%d", mycpuid)); lwkt_serialize_enter(&acpi_cst_slize); /* Update the list of Cx states. */ acpi_cst_cx_reprobe_cst(sc); acpi_cst_support_list(sc); /* Update the new lowest useable Cx state for all CPUs. */ acpi_cst_global_cx_count(); /* * Fix up the lowest Cx being used */ if (acpi_cst_cx_lowest_req < acpi_cst_cx_count) acpi_cst_cx_lowest = acpi_cst_cx_lowest_req; if (acpi_cst_cx_lowest > acpi_cst_cx_count - 1) acpi_cst_cx_lowest = acpi_cst_cx_count - 1; lwkt_serialize_exit(&acpi_cst_slize); }
/* * Re-evaluate the _CST object when we are notified that it changed. */ static void acpi_cst_notify(device_t dev) { struct acpi_cst_softc *sc = device_get_softc(dev); cpuhelper_assert(mycpuid, false); lwkt_serialize_enter(&acpi_cst_slize); /* Update the list of Cx states. */ acpi_cst_cx_reprobe_cst(sc); acpi_cst_support_list(sc); /* Update the new lowest useable Cx state for all CPUs. */ acpi_cst_global_cx_count(); /* * Fix up the lowest Cx being used */ if (acpi_cst_cx_lowest_req < acpi_cst_cx_count) acpi_cst_cx_lowest = acpi_cst_cx_lowest_req; if (acpi_cst_cx_lowest > acpi_cst_cx_count - 1) acpi_cst_cx_lowest = acpi_cst_cx_count - 1; lwkt_serialize_exit(&acpi_cst_slize); }