/*
 * Free resources allocated by pwrnow_init().
 */
static void
pwrnow_fini(cpu_t *cp)
{
	cpupm_mach_state_t *mach_state =
	    (cpupm_mach_state_t *)(cp->cpu_m.mcpu_pm_mach_state);
	cpu_acpi_handle_t handle = mach_state->ms_acpi_handle;

	cpupm_free_domains(&cpupm_pstate_domains);
	cpu_acpi_free_pstate_data(handle);
}
Beispiel #2
0
/*
 * Free resources allocated by pwrnow_init().
 */
static void
pwrnow_fini(cpu_t *cp)
{
	cpupm_mach_state_t *mach_state =
	    (cpupm_mach_state_t *)(cp->cpu_m.mcpu_pm_mach_state);
	cpu_acpi_handle_t handle = mach_state->ms_acpi_handle;

	cpupm_free_domains(&cpupm_pstate_domains);
	cpu_acpi_free_pstate_data(handle);

	if (mach_state->ms_turbo != NULL)
		cpupm_turbo_fini(mach_state->ms_turbo);
	mach_state->ms_turbo = NULL;
}
/*
 * Free resources allocated by cpu_idle_init().
 */
static void
cpu_idle_fini(cpu_t *cp)
{
	cpupm_mach_state_t *mach_state =
	    (cpupm_mach_state_t *)(cp->cpu_m.mcpu_pm_mach_state);
	cpu_acpi_handle_t handle = mach_state->ms_acpi_handle;
	cpu_acpi_cstate_t *cstate;
	uint_t	cpu_max_cstates, i;

	/*
	 * idle cpu points back to the generic one
	 */
	idle_cpu = cp->cpu_m.mcpu_idle_cpu = non_deep_idle_cpu;
	disp_enq_thread = non_deep_idle_disp_enq_thread;

	cstate = (cpu_acpi_cstate_t *)CPU_ACPI_CSTATES(handle);
	if (cstate) {
		cpu_max_cstates = cpu_acpi_get_max_cstates(handle);

		for (i = CPU_ACPI_C1; i <= cpu_max_cstates; i++) {
			if (cstate->cs_ksp != NULL)
				kstat_delete(cstate->cs_ksp);
			cstate++;
		}
	}

	cpupm_free_ms_cstate(cp);
	cpupm_free_domains(&cpupm_cstate_domains);
	cpu_acpi_free_cstate_data(handle);

	mutex_enter(&cpu_idle_callb_mutex);
	if (cpu_deep_idle_callb_id != (callb_id_t)0) {
		(void) callb_delete(cpu_deep_idle_callb_id);
		cpu_deep_idle_callb_id = (callb_id_t)0;
	}
	if (cpu_idle_cpr_callb_id != (callb_id_t)0) {
		(void) callb_delete(cpu_idle_cpr_callb_id);
		cpu_idle_cpr_callb_id = (callb_id_t)0;
	}
	mutex_exit(&cpu_idle_callb_mutex);
}