static int ath_pci_resume(struct device *device) { struct pci_dev *pdev = to_pci_dev(device); struct ieee80211_hw *hw = pci_get_drvdata(pdev); struct ath_softc *sc = hw->priv; u32 val; /* * Suspend/Resume resets the PCI configuration space, so we have to * re-disable the RETRY_TIMEOUT register (0x41) to keep * PCI Tx retries from interfering with C3 CPU state */ pci_read_config_dword(pdev, 0x40, &val); if ((val & 0x0000ff00) != 0) pci_write_config_dword(pdev, 0x40, val & 0xffff00ff); ath9k_ps_wakeup(sc); /* Enable LED */ ath9k_hw_cfg_output(sc->sc_ah, sc->sc_ah->led_pin, AR_GPIO_OUTPUT_MUX_AS_OUTPUT); ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 0); /* * Reset key cache to sane defaults (all entries cleared) instead of * semi-random values after suspend/resume. */ ath9k_cmn_init_crypto(sc->sc_ah); ath9k_ps_restore(sc); sc->ps_idle = true; ath_radio_disable(sc, hw); return 0; }
static int ath_pci_resume(struct pci_dev *pdev) { struct ieee80211_hw *hw = pci_get_drvdata(pdev); struct ath_wiphy *aphy = hw->priv; struct ath_softc *sc = aphy->sc; u32 val; int err; pci_restore_state(pdev); err = pci_enable_device(pdev); if (err) return err; /* * Suspend/Resume resets the PCI configuration space, so we have to * re-disable the RETRY_TIMEOUT register (0x41) to keep * PCI Tx retries from interfering with C3 CPU state */ pci_read_config_dword(pdev, 0x40, &val); if ((val & 0x0000ff00) != 0) pci_write_config_dword(pdev, 0x40, val & 0xffff00ff); /* Enable LED */ ath9k_hw_cfg_output(sc->sc_ah, sc->sc_ah->led_pin, AR_GPIO_OUTPUT_MUX_AS_OUTPUT); ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1); sc->ps_idle = true; ath9k_set_wiphy_idle(aphy, true); ath_radio_disable(sc, hw); return 0; }
/* * Indication of hardware PHY state change */ void ath_hw_phystate_change(struct ath_softc *sc, int newstate) { if (sc->sc_hw_phystate == newstate) return; sc->sc_hw_phystate = newstate; /* save the hardware switch state */ #ifdef ATH_SUPPORT_LINUX_STA /* TODO: rfkill support in Linux*/ if (newstate) { ath_radio_enable(sc); sc->sc_ieee_ops->ath_net80211_resume(sc->sc_ieee); } else { sc->sc_ieee_ops->ath_net80211_suspend(sc->sc_ieee); ath_radio_disable(sc); } #endif }
int ath9k_wiphy_select(struct ath_wiphy *aphy) { struct ath_softc *sc = aphy->sc; bool now; spin_lock_bh(&sc->wiphy_lock); if (__ath9k_wiphy_scanning(sc)) { /* * For now, we are using mac80211 sw scan and it expects to * have full control over channel changes, so avoid wiphy * scheduling during a scan. This could be optimized if the * scanning control were moved into the driver. */ spin_unlock_bh(&sc->wiphy_lock); return -EBUSY; } if (__ath9k_wiphy_pausing(sc)) { if (sc->wiphy_select_failures == 0) sc->wiphy_select_first_fail = jiffies; sc->wiphy_select_failures++; if (time_after(jiffies, sc->wiphy_select_first_fail + HZ / 2)) { printk(KERN_DEBUG "ath9k: Previous wiphy select timed " "out; disable/enable hw to recover\n"); __ath9k_wiphy_mark_all_paused(sc); /* * TODO: this workaround to fix hardware is unlikely to * be specific to virtual wiphy changes. It can happen * on normal channel change, too, and as such, this * should really be made more generic. For example, * tricker radio disable/enable on GTT interrupt burst * (say, 10 GTT interrupts received without any TX * frame being completed) */ spin_unlock_bh(&sc->wiphy_lock); ath_radio_disable(sc); ath_radio_enable(sc); queue_work(aphy->sc->hw->workqueue, &aphy->sc->chan_work); return -EBUSY; /* previous select still in progress */ } spin_unlock_bh(&sc->wiphy_lock); return -EBUSY; /* previous select still in progress */ } sc->wiphy_select_failures = 0; /* Store the new channel */ sc->chan_idx = aphy->chan_idx; sc->chan_is_ht = aphy->chan_is_ht; sc->next_wiphy = aphy; __ath9k_wiphy_pause_all(sc); now = !__ath9k_wiphy_pausing(aphy->sc); spin_unlock_bh(&sc->wiphy_lock); if (now) { /* Ready to request channel change immediately */ queue_work(aphy->sc->hw->workqueue, &aphy->sc->chan_work); } /* * wiphys will be unpaused in ath9k_tx_status() once channel has been * changed if any wiphy needs time to become paused. */ return 0; }