/* * Validate that this processor supports PowerNow! and if so, * get the P-state data from ACPI and cache it. */ static int pwrnow_init(cpu_t *cp) { cpupm_mach_state_t *mach_state = (cpupm_mach_state_t *)cp->cpu_m.mcpu_pm_mach_state; cpu_acpi_handle_t handle = mach_state->ms_acpi_handle; cpu_acpi_pct_t *pct_stat; static int logged = 0; PWRNOW_DEBUG(("pwrnow_init: processor %d\n", cp->cpu_id)); /* * Cache the P-state specific ACPI data. */ if (cpu_acpi_cache_pstate_data(handle) != 0) { if (!logged) { cmn_err(CE_NOTE, "!PowerNow! support is being " "disabled due to errors parsing ACPI P-state " "objects exported by BIOS."); logged = 1; } pwrnow_fini(cp); return (PWRNOW_RET_NO_PM); } pct_stat = CPU_ACPI_PCT_STATUS(handle); switch (pct_stat->cr_addrspace_id) { case ACPI_ADR_SPACE_FIXED_HARDWARE: PWRNOW_DEBUG(("Transitions will use fixed hardware\n")); break; default: cmn_err(CE_WARN, "!_PCT configured for unsupported " "addrspace = %d.", pct_stat->cr_addrspace_id); cmn_err(CE_NOTE, "!CPU power management will not function."); pwrnow_fini(cp); return (PWRNOW_RET_NO_PM); } cpupm_alloc_domains(cp, CPUPM_P_STATES); /* * Check for Core Performance Boost support */ if (pwrnow_cpb_supported()) mach_state->ms_turbo = cpupm_turbo_init(cp); PWRNOW_DEBUG(("Processor %d succeeded.\n", cp->cpu_id)) return (PWRNOW_RET_SUCCESS); }
/* * Validate that this processor supports deep cstate and if so, * get the c-state data from ACPI and cache it. */ static int cpu_idle_init(cpu_t *cp) { cpupm_mach_state_t *mach_state = (cpupm_mach_state_t *)cp->cpu_m.mcpu_pm_mach_state; cpu_acpi_handle_t handle = mach_state->ms_acpi_handle; cpu_acpi_cstate_t *cstate; char name[KSTAT_STRLEN]; int cpu_max_cstates, i; int ret; /* * Cache the C-state specific ACPI data. */ if ((ret = cpu_acpi_cache_cstate_data(handle)) != 0) { if (ret < 0) cmn_err(CE_NOTE, "!Support for CPU deep idle states is being " "disabled due to errors parsing ACPI C-state " "objects exported by BIOS."); cpu_idle_fini(cp); return (-1); } cstate = (cpu_acpi_cstate_t *)CPU_ACPI_CSTATES(handle); cpu_max_cstates = cpu_acpi_get_max_cstates(handle); for (i = CPU_ACPI_C1; i <= cpu_max_cstates; i++) { (void) snprintf(name, KSTAT_STRLEN - 1, "c%d", cstate->cs_type); /* * Allocate, initialize and install cstate kstat */ cstate->cs_ksp = kstat_create("cstate", CPU->cpu_id, name, "misc", KSTAT_TYPE_NAMED, sizeof (cpu_idle_kstat) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL); if (cstate->cs_ksp == NULL) { cmn_err(CE_NOTE, "kstat_create(c_state) fail"); } else { cstate->cs_ksp->ks_data = &cpu_idle_kstat; cstate->cs_ksp->ks_lock = &cpu_idle_mutex; cstate->cs_ksp->ks_update = cpu_idle_kstat_update; cstate->cs_ksp->ks_data_size += MAXNAMELEN; cstate->cs_ksp->ks_private = cstate; kstat_install(cstate->cs_ksp); cstate++; } } cpupm_alloc_domains(cp, CPUPM_C_STATES); cpupm_alloc_ms_cstate(cp); if (cpu_deep_cstates_supported()) { uint32_t value; mutex_enter(&cpu_idle_callb_mutex); if (cpu_deep_idle_callb_id == (callb_id_t)0) cpu_deep_idle_callb_id = callb_add(&cpu_deep_idle_callb, (void *)NULL, CB_CL_CPU_DEEP_IDLE, "cpu_deep_idle"); if (cpu_idle_cpr_callb_id == (callb_id_t)0) cpu_idle_cpr_callb_id = callb_add(&cpu_idle_cpr_callb, (void *)NULL, CB_CL_CPR_PM, "cpu_idle_cpr"); mutex_exit(&cpu_idle_callb_mutex); /* * All supported CPUs (Nehalem and later) will remain in C3 * during Bus Master activity. * All CPUs set ACPI_BITREG_BUS_MASTER_RLD to 0 here if it * is not already 0 before enabling Deeper C-states. */ cpu_acpi_get_register(ACPI_BITREG_BUS_MASTER_RLD, &value); if (value & 1) cpu_acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0); } return (0); }