void pcmu_child_cfg_restore(dev_info_t *dip) { dev_info_t *cdip; /* * Restore config registers for children that did not save * their own registers. Children pwr states are UNKNOWN after * a resume since it is possible for the PM framework to call * resume without an actual power cycle. (ie if suspend fails). */ for (cdip = ddi_get_child(dip); cdip != NULL; cdip = ddi_get_next_sibling(cdip)) { /* * Not interested in children who are not already * init'ed. They will be set up by pcmu_init_child(). */ if (i_ddi_node_state(cdip) < DS_INITIALIZED) { PCMU_DBG2(PCMU_DBG_DETACH, dip, "DDI_RESUME: skipping %s%d not in CF1\n", ddi_driver_name(cdip), ddi_get_instance(cdip)); continue; } /* * Only restore config registers if saved by nexus. */ if (ddi_prop_exists(DDI_DEV_T_ANY, cdip, DDI_PROP_DONTPASS, "nexus-saved-config-regs") == 1) { (void) pci_restore_config_regs(cdip); PCMU_DBG2(PCMU_DBG_PWR, dip, "DDI_RESUME: nexus restoring %s%d config regs\n", ddi_driver_name(cdip), ddi_get_instance(cdip)); if (ndi_prop_remove(DDI_DEV_T_NONE, cdip, "nexus-saved-config-regs") != DDI_PROP_SUCCESS) { cmn_err(CE_WARN, "%s%d can't remove prop %s", ddi_driver_name(cdip), ddi_get_instance(cdip), "nexus-saved-config-regs"); } } } }
int npe_restore_htconfig_children(dev_info_t *dip) { dev_info_t *cdip = ddi_get_child(dip); int rval = DDI_SUCCESS; for (; cdip != NULL; cdip = ddi_get_next_sibling(cdip)) { if (ddi_prop_get_int(DDI_DEV_T_ANY, cdip, DDI_PROP_DONTPASS, "htconfig-saved", 0) == 0) continue; if (pci_restore_config_regs(cdip) != DDI_SUCCESS) { cmn_err(CE_WARN, "Failed to restore HT config " "regs for %s\n", ddi_node_name(cdip)); rval = DDI_FAILURE; } } return (rval); }
/*ARGSUSED*/ static int ppb_pwr(dev_info_t *dip, int component, int lvl) { ppb_devstate_t *ppb; uint16_t pmcsr; char *str; int lowest_lvl; int old_lvl; int new_lvl; ppb = (ppb_devstate_t *)ddi_get_soft_state(ppb_state, ddi_get_instance(dip)); if (ppb == NULL) { cmn_err(CE_WARN, "%s%d ppb_pwr: can't get soft state", ddi_driver_name(dip), ddi_get_instance(dip)); return (DDI_FAILURE); } DEBUG1(DBG_PWR, dip, "ppb_pwr(): ENTER level = %d\n", lvl); mutex_enter(&ppb->ppb_pwr_p->pwr_mutex); /* * Find out if the power setting is possible. If it is not, * set component busy and return failure. If it is possible, * and it is the lowest pwr setting possible, set component * busy so that the framework does not try to lower any further. */ lowest_lvl = pci_pwr_new_lvl(ppb->ppb_pwr_p); if (lowest_lvl > lvl) { pci_pwr_component_busy(ppb->ppb_pwr_p); DEBUG2(DBG_PWR, dip, "ppb_pwr: failing power request " "lowest allowed is %d requested is %d\n", lowest_lvl, lvl); mutex_exit(&ppb->ppb_pwr_p->pwr_mutex); return (DDI_FAILURE); } else if (lowest_lvl == lvl) { pci_pwr_component_busy(ppb->ppb_pwr_p); } else { pci_pwr_component_idle(ppb->ppb_pwr_p); } if ((pmcsr = PCI_CAP_GET16(ppb->ppb_conf_hdl, NULL, ppb->ppb_pm_cap_ptr, PCI_PMCSR)) == PCI_CAP_EINVAL16) return (DDI_FAILURE); /* * Save the current power level. This is the actual function level, * not the translated bridge level stored in pwr_p->current_lvl */ old_lvl = pmcsr & PCI_PMCSR_STATE_MASK; pmcsr &= ~PCI_PMCSR_STATE_MASK; switch (lvl) { case PM_LEVEL_B0: str = "PM_LEVEL_B0 (full speed)"; pmcsr |= PCI_PMCSR_D0; break; case PM_LEVEL_B1: str = "PM_LEVEL_B1 (light sleep. No bus traffic allowed)"; if ((ppb->ppb_pwr_p->pwr_flags & PCI_PWR_B1_CAPABLE) == 0) { cmn_err(CE_WARN, "%s%d PCI PM state B1 not supported", ddi_driver_name(dip), ddi_get_instance(dip)); mutex_exit(&ppb->ppb_pwr_p->pwr_mutex); return (DDI_FAILURE); } pmcsr |= PCI_PMCSR_D1; break; case PM_LEVEL_B2: str = "PM_LEVEL_B2 (clock off)"; if ((ppb->ppb_pwr_p->pwr_flags & PCI_PWR_B2_CAPABLE) == 0) { cmn_err(CE_WARN, "%s%d PM state B2 not supported...", ddi_driver_name(dip), ddi_get_instance(dip)); mutex_exit(&ppb->ppb_pwr_p->pwr_mutex); return (DDI_FAILURE); } if ((ppb->ppb_pwr_p->pwr_flags & PCI_PWR_B3_CAPABLE) == 0) { /* * If B3 isn't supported, use D3 for B2 to avoid the * possible case that D2 for B2 isn't supported. * Saves and extra check and state flag.. */ pmcsr |= PCI_PMCSR_D3HOT; } else { pmcsr |= PCI_PMCSR_D2; } break; case PM_LEVEL_B3: str = "PM_LEVEL_B30 (clock and power off)"; if ((ppb->ppb_pwr_p->pwr_flags & PCI_PWR_B3_CAPABLE) == 0) { cmn_err(CE_WARN, "%s%d PM state B3 not supported...", ddi_driver_name(dip), ddi_get_instance(dip)); mutex_exit(&ppb->ppb_pwr_p->pwr_mutex); return (DDI_FAILURE); } pmcsr |= PCI_PMCSR_D3HOT; break; default: cmn_err(CE_WARN, "%s%d Unknown PM state %d", ddi_driver_name(dip), ddi_get_instance(dip), lvl); mutex_exit(&ppb->ppb_pwr_p->pwr_mutex); return (DDI_FAILURE); } new_lvl = pmcsr & PCI_PMCSR_STATE_MASK; /* * Save config regs if going into HW state D3 (B2 or B3) */ if ((old_lvl != PCI_PMCSR_D3HOT) && (new_lvl == PCI_PMCSR_D3HOT)) { DEBUG0(DBG_PWR, dip, "ppb_pwr(): SAVING CONFIG REGS\n"); if (pci_save_config_regs(dip) != DDI_SUCCESS) { cmn_err(CE_WARN, "%s%d Save config regs failed", ddi_driver_name(dip), ddi_get_instance(dip)); mutex_exit(&ppb->ppb_pwr_p->pwr_mutex); return (DDI_FAILURE); } } PCI_CAP_PUT16(ppb->ppb_conf_hdl, NULL, ppb->ppb_pm_cap_ptr, PCI_PMCSR, pmcsr); /* * No bus transactions should occur without waiting for * settle time specified in PCI PM spec rev 2.1 sec 5.6.1 * To make things simple, just use the max time specified for * all state transitions. */ delay(drv_usectohz(PCI_CLK_SETTLE_TIME)); /* * Restore configuration registers if coming out of HW state D3 */ if ((old_lvl == PCI_PMCSR_D3HOT) && (new_lvl != PCI_PMCSR_D3HOT)) { DEBUG0(DBG_PWR, dip, "ppb_pwr(): RESTORING CONFIG REGS\n"); if (pci_restore_config_regs(dip) != DDI_SUCCESS) { panic("%s%d restore config regs failed", ddi_driver_name(dip), ddi_get_instance(dip)); } /*NOTREACHED*/ } ppb->ppb_pwr_p->current_lvl = lvl; mutex_exit(&ppb->ppb_pwr_p->pwr_mutex); DEBUG1(DBG_PWR, dip, "ppb_set_pwr: set PM state to %s\n\n", str); return (DDI_SUCCESS); }