static int wl1251_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
	struct wl1251 *wl = hw->priv;

	skb_queue_tail(&wl->tx_queue, skb);

	/*
	 * The chip specific setup must run before the first TX packet -
	 * before that, the tx_work will not be initialized!
	 */

	ieee80211_queue_work(wl->hw, &wl->tx_work);

	/*
	 * The workqueue is slow to process the tx_queue and we need stop
	 * the queue here, otherwise the queue will get too long.
	 */
	if (skb_queue_len(&wl->tx_queue) >= WL1251_TX_QUEUE_MAX_LENGTH) {
		wl1251_debug(DEBUG_TX, "op_tx: tx_queue full, stop queues");
		ieee80211_stop_queues(wl->hw);

		/*
		 * FIXME: this is racy, the variable is not properly
		 * protected. Maybe fix this by removing the stupid
		 * variable altogether and checking the real queue state?
		 */
		wl->tx_queue_stopped = true;
	}

	return NETDEV_TX_OK;
}
Esempio n. 2
0
static void brcms_ops_stop(struct ieee80211_hw *hw)
{
	struct brcms_info *wl = hw->priv;
	int status;

	ieee80211_stop_queues(hw);

	if (wl->wlc == NULL)
		return;

	spin_lock_bh(&wl->lock);
	status = brcms_c_chipmatch(wl->wlc->hw->d11core);
	spin_unlock_bh(&wl->lock);
	if (!status) {
		brcms_err(wl->wlc->hw->d11core,
			  "wl: brcms_ops_stop: chipmatch failed\n");
		return;
	}

	bcma_core_pci_power_save(wl->wlc->hw->d11core->bus, false);

	/* put driver in down state */
	spin_lock_bh(&wl->lock);
	brcms_down(wl);
	spin_unlock_bh(&wl->lock);
}
Esempio n. 3
0
File: main.c Progetto: Lyude/linux
static int mt76x0_config(struct ieee80211_hw *hw, u32 changed)
{
	struct mt76x0_dev *dev = hw->priv;
	int ret = 0;

	mutex_lock(&dev->mutex);

	if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
		if (!(hw->conf.flags & IEEE80211_CONF_MONITOR))
			dev->rxfilter |= MT_RX_FILTR_CFG_PROMISC;
		else
			dev->rxfilter &= ~MT_RX_FILTR_CFG_PROMISC;

		mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
	}

	if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
		ieee80211_stop_queues(hw);
		ret = mt76x0_phy_set_channel(dev, &hw->conf.chandef);
		ieee80211_wake_queues(hw);
	}

	mutex_unlock(&dev->mutex);

	return ret;
}
Esempio n. 4
0
static void vnt_stop(struct ieee80211_hw *hw)
{
	struct vnt_private *priv = hw->priv;
	int i;

	if (!priv)
		return;

	for (i = 0; i < MAX_KEY_TABLE; i++)
		vnt_mac_disable_keyentry(priv, i);

	/* clear all keys */
	priv->key_entry_inuse = 0;

	if (!test_bit(DEVICE_FLAGS_UNPLUG, &priv->flags))
		vnt_mac_shutdown(priv);

	ieee80211_stop_queues(hw);

	set_bit(DEVICE_FLAGS_DISCONNECTED, &priv->flags);

	cancel_delayed_work_sync(&priv->run_command_work);

	priv->cmd_running = false;

	vnt_free_tx_bufs(priv);
	vnt_free_rx_bufs(priv);
	vnt_free_int_bufs(priv);

	usb_kill_urb(priv->interrupt_urb);
	usb_free_urb(priv->interrupt_urb);
}
Esempio n. 5
0
static void ar5523_tx(struct ieee80211_hw *hw,
		       struct ieee80211_tx_control *control,
		       struct sk_buff *skb)
{
	struct ieee80211_tx_info *txi = IEEE80211_SKB_CB(skb);
	struct ar5523_tx_data *data = (struct ar5523_tx_data *)
					txi->driver_data;
	struct ar5523 *ar = hw->priv;
	unsigned long flags;

	ar5523_dbg(ar, "tx called\n");
	if (atomic_inc_return(&ar->tx_nr_total) >= AR5523_TX_DATA_COUNT) {
		ar5523_dbg(ar, "tx queue full\n");
		ar5523_dbg(ar, "stop queues (tot %d pend %d)\n",
			   atomic_read(&ar->tx_nr_total),
			   atomic_read(&ar->tx_nr_pending));
		ieee80211_stop_queues(hw);
	}

	spin_lock_irqsave(&ar->tx_data_list_lock, flags);
	list_add_tail(&data->list, &ar->tx_queue_pending);
	spin_unlock_irqrestore(&ar->tx_data_list_lock, flags);

	ieee80211_queue_work(ar->hw, &ar->tx_work);
}
Esempio n. 6
0
void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw)
{
	struct ath_hw *ah = sc->sc_ah;
	struct ieee80211_channel *channel = hw->conf.channel;
	int r;

	ath9k_ps_wakeup(sc);
	cancel_delayed_work_sync(&sc->hw_pll_work);

	spin_lock_bh(&sc->sc_pcu_lock);

	ieee80211_stop_queues(hw);

	/*
	 * Keep the LED on when the radio is disabled
	 * during idle unassociated state.
	 */
	if (!sc->ps_idle) {
		ath9k_hw_set_gpio(ah, ah->led_pin, 1);
		ath9k_hw_cfg_gpio_input(ah, ah->led_pin);
	}

	/* Disable interrupts */
	ath9k_hw_disable_interrupts(ah);

	ath_drain_all_txq(sc, false);	/* clear pendin
Esempio n. 7
0
void rt2x00lib_disable_radio(struct rt2x00_dev *rt2x00dev)
{
	if (!test_and_clear_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
		return;

	/*
	 * Stop the TX queues in mac80211.
	 */
	ieee80211_stop_queues(rt2x00dev->hw);
	rt2x00queue_stop_queues(rt2x00dev);

	/*
	 * Stop watchdog monitoring.
	 */
	rt2x00link_stop_watchdog(rt2x00dev);

	/*
	 * Disable RX.
	 */
	rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_OFF);

	/*
	 * Disable radio.
	 */
	rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_RADIO_OFF);
	rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_RADIO_IRQ_OFF);
	rt2x00led_led_activity(rt2x00dev, false);
	rt2x00leds_led_radio(rt2x00dev, false);
}
static void brcms_ops_stop(struct ieee80211_hw *hw)
{
	struct brcms_info *wl = hw->priv;
	int status;

	ieee80211_stop_queues(hw);

	if (wl->wlc == NULL)
		return;

	spin_lock_bh(&wl->lock);
	status = brcms_c_chipmatch(wl->wlc->hw->vendorid,
				   wl->wlc->hw->deviceid);
	spin_unlock_bh(&wl->lock);
	if (!status) {
		wiphy_err(wl->wiphy,
			  "wl: brcms_ops_stop: chipmatch failed\n");
		return;
	}

	/* put driver in down state */
	spin_lock_bh(&wl->lock);
	brcms_down(wl);
	spin_unlock_bh(&wl->lock);
}
Esempio n. 9
0
static void wl1251_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
	struct wl1251 *wl = hw->priv;
	unsigned long flags;

	skb_queue_tail(&wl->tx_queue, skb);

	/*
	 * The chip specific setup must run before the first TX packet -
	 * before that, the tx_work will not be initialized!
	 */

	ieee80211_queue_work(wl->hw, &wl->tx_work);

	/*
	 * The workqueue is slow to process the tx_queue and we need stop
	 * the queue here, otherwise the queue will get too long.
	 */
	if (skb_queue_len(&wl->tx_queue) >= WL1251_TX_QUEUE_HIGH_WATERMARK) {
		wl1251_debug(DEBUG_TX, "op_tx: tx_queue full, stop queues");

		spin_lock_irqsave(&wl->wl_lock, flags);
		ieee80211_stop_queues(wl->hw);
		wl->tx_queue_stopped = true;
		spin_unlock_irqrestore(&wl->wl_lock, flags);
	}
}
Esempio n. 10
0
static struct vnt_usb_send_context
	*vnt_get_free_context(struct vnt_private *priv)
{
	struct vnt_usb_send_context *context = NULL;
	int ii;

	dev_dbg(&priv->usb->dev, "%s\n", __func__);

	for (ii = 0; ii < priv->num_tx_context; ii++) {
		if (!priv->tx_context[ii])
			return NULL;

		context = priv->tx_context[ii];
		if (!context->in_use) {
			context->in_use = true;
			memset(context->data, 0,
			       MAX_TOTAL_SIZE_WITH_ALL_HEADERS);

			context->hdr = NULL;

			return context;
		}
	}

	if (ii == priv->num_tx_context) {
		dev_dbg(&priv->usb->dev, "%s No Free Tx Context\n", __func__);

		ieee80211_stop_queues(priv->hw);
	}

	return NULL;
}
Esempio n. 11
0
static int
mt76_config(struct ieee80211_hw *hw, u32 changed)
{
	struct mt76_dev *dev = hw->priv;
	int ret = 0;

	mutex_lock(&dev->mutex);

	if (changed & IEEE80211_CONF_CHANGE_POWER) {
		dev->txpower_conf = hw->conf.power_level * 2;

		if (test_bit(MT76_STATE_RUNNING, &dev->state))
			mt76_phy_set_txpower(dev);
	}

	if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
		ieee80211_stop_queues(hw);
		ret = mt76_set_channel(dev, &hw->conf.chandef);
		ieee80211_wake_queues(hw);
	}

	mutex_unlock(&dev->mutex);

	return ret;
}
Esempio n. 12
0
static void wl_ops_stop(struct ieee80211_hw *hw)
{
#ifdef BRCMDBG
	struct wl_info *wl = hw->priv;
	ASSERT(wl);
#endif /*BRCMDBG*/
	ieee80211_stop_queues(hw);
}
Esempio n. 13
0
static void rtl_tx_accounting(struct ieee80211_hw *hw, struct sk_buff *skb)
{
	struct rtl_priv *rtlpriv = rtl_priv(hw);
	struct rtl_hal *rtlhal = rtl_hal(rtlpriv);

	if (atomic_inc_return(&rtlhal->tx_pending) >= RTL_MAX_TX_PENDING)
		ieee80211_stop_queues(hw);
}
Esempio n. 14
0
int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
		 struct ieee80211_tx_control *control)
{
	struct rt2x00_dev *rt2x00dev = hw->priv;
	struct ieee80211_hdr *ieee80211hdr = (struct ieee80211_hdr *)skb->data;
	struct data_ring *ring;
	u16 frame_control;

	/*
	 * Mac80211 might be calling this function while we are trying
	 * to remove the device or perhaps suspending it.
	 * Note that we can only stop the TX queues inside the TX path
	 * due to possible race conditions in mac80211.
	 */
	if (!test_bit(DEVICE_PRESENT, &rt2x00dev->flags)) {
		ieee80211_stop_queues(hw);
		return 0;
	}

	/*
	 * Determine which ring to put packet on.
	 */
	ring = rt2x00lib_get_ring(rt2x00dev, control->queue);
	if (unlikely(!ring)) {
		ERROR(rt2x00dev,
		      "Attempt to send packet over invalid queue %d.\n"
		      "Please file bug report to %s.\n",
		      control->queue, DRV_PROJECT);
		dev_kfree_skb_any(skb);
		return NETDEV_TX_OK;
	}

	/*
	 * If CTS/RTS is required. and this frame is not CTS or RTS,
	 * create and queue that frame first. But make sure we have
	 * at least enough entries available to send this CTS/RTS
	 * frame as well as the data frame.
	 */
	frame_control = le16_to_cpu(ieee80211hdr->frame_control);
	if (!is_rts_frame(frame_control) && !is_cts_frame(frame_control) &&
	    (control->flags & (IEEE80211_TXCTL_USE_RTS_CTS |
			       IEEE80211_TXCTL_USE_CTS_PROTECT))) {
		if (rt2x00_ring_free(ring) <= 1)
			return NETDEV_TX_BUSY;

		if (rt2x00mac_tx_rts_cts(rt2x00dev, ring, skb, control))
			return NETDEV_TX_BUSY;
	}

	if (rt2x00dev->ops->lib->write_tx_data(rt2x00dev, ring, skb, control))
		return NETDEV_TX_BUSY;

	if (rt2x00dev->ops->lib->kick_tx_queue)
		rt2x00dev->ops->lib->kick_tx_queue(rt2x00dev, control->queue);

	return NETDEV_TX_OK;
}
Esempio n. 15
0
void ath9k_htc_reset(struct ath9k_htc_priv *priv)
{
	struct ath_hw *ah = priv->ah;
	struct ath_common *common = ath9k_hw_common(ah);
	struct ieee80211_channel *channel = priv->hw->conf.chandef.chan;
	struct ath9k_hw_cal_data *caldata = NULL;
	enum htc_phymode mode;
	__be16 htc_mode;
	u8 cmd_rsp;
	int ret;

	mutex_lock(&priv->mutex);
	ath9k_htc_ps_wakeup(priv);

	ath9k_htc_stop_ani(priv);
	ieee80211_stop_queues(priv->hw);

	del_timer_sync(&priv->tx.cleanup_timer);
	ath9k_htc_tx_drain(priv);

	WMI_CMD(WMI_DISABLE_INTR_CMDID);
	WMI_CMD(WMI_DRAIN_TXQ_ALL_CMDID);
	WMI_CMD(WMI_STOP_RECV_CMDID);

	ath9k_wmi_event_drain(priv);

	caldata = &priv->caldata;
	ret = ath9k_hw_reset(ah, ah->curchan, caldata, false);
	if (ret) {
		ath_err(common,
			"Unable to reset device (%u Mhz) reset status %d\n",
			channel->center_freq, ret);
	}

	ath9k_cmn_update_txpow(ah, priv->curtxpow, priv->txpowlimit,
			       &priv->curtxpow);

	WMI_CMD(WMI_START_RECV_CMDID);
	ath9k_host_rx_init(priv);

	mode = ath9k_htc_get_curmode(priv, ah->curchan);
	htc_mode = cpu_to_be16(mode);
	WMI_CMD_BUF(WMI_SET_MODE_CMDID, &htc_mode);

	WMI_CMD(WMI_ENABLE_INTR_CMDID);
	htc_start(priv->htc);
	ath9k_htc_vif_reconfig(priv);
	ieee80211_wake_queues(priv->hw);

	mod_timer(&priv->tx.cleanup_timer,
		  jiffies + msecs_to_jiffies(ATH9K_HTC_TX_CLEANUP_INTERVAL));

	ath9k_htc_ps_restore(priv);
	mutex_unlock(&priv->mutex);
}
void ath9k_htc_reset(struct ath9k_htc_priv *priv)
{
	struct ath_hw *ah = priv->ah;
	struct ath_common *common = ath9k_hw_common(ah);
	struct ieee80211_channel *channel = priv->hw->conf.channel;
	struct ath9k_hw_cal_data *caldata = NULL;
	enum htc_phymode mode;
	__be16 htc_mode;
	u8 cmd_rsp;
	int ret;

	mutex_lock(&priv->mutex);
	ath9k_htc_ps_wakeup(priv);

	if (priv->op_flags & OP_ASSOCIATED)
		cancel_delayed_work_sync(&priv->ath9k_ani_work);

	ieee80211_stop_queues(priv->hw);
	htc_stop(priv->htc);
	WMI_CMD(WMI_DISABLE_INTR_CMDID);
	WMI_CMD(WMI_DRAIN_TXQ_ALL_CMDID);
	WMI_CMD(WMI_STOP_RECV_CMDID);

	caldata = &priv->caldata;
	ret = ath9k_hw_reset(ah, ah->curchan, caldata, false);
	if (ret) {
		ath_err(common,
			"Unable to reset device (%u Mhz) reset status %d\n",
			channel->center_freq, ret);
	}

	ath9k_cmn_update_txpow(ah, priv->curtxpow, priv->txpowlimit,
			       &priv->curtxpow);

	WMI_CMD(WMI_START_RECV_CMDID);
	ath9k_host_rx_init(priv);

	mode = ath9k_htc_get_curmode(priv, ah->curchan);
	htc_mode = cpu_to_be16(mode);
	WMI_CMD_BUF(WMI_SET_MODE_CMDID, &htc_mode);

	WMI_CMD(WMI_ENABLE_INTR_CMDID);
	htc_start(priv->htc);

	if (priv->op_flags & OP_ASSOCIATED) {
		ath9k_htc_beacon_config(priv, priv->vif);
		ath_start_ani(priv);
	}

	ieee80211_wake_queues(priv->hw);

	ath9k_htc_ps_restore(priv);
	mutex_unlock(&priv->mutex);
}
Esempio n. 17
0
static void ath10k_core_restart(struct work_struct *work)
{
	struct ath10k *ar = container_of(work, struct ath10k, restart_work);

	set_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags);

	/* Place a barrier to make sure the compiler doesn't reorder
	 * CRASH_FLUSH and calling other functions.
	 */
	barrier();

	ieee80211_stop_queues(ar->hw);
	ath10k_drain_tx(ar);
	complete_all(&ar->scan.started);
	complete_all(&ar->scan.completed);
	complete_all(&ar->scan.on_channel);
	complete_all(&ar->offchan_tx_completed);
	complete_all(&ar->install_key_done);
	complete_all(&ar->vdev_setup_done);
	complete_all(&ar->thermal.wmi_sync);
	wake_up(&ar->htt.empty_tx_wq);
	wake_up(&ar->wmi.tx_credits_wq);
	wake_up(&ar->peer_mapping_wq);

	mutex_lock(&ar->conf_mutex);

	switch (ar->state) {
	case ATH10K_STATE_ON:
		ar->state = ATH10K_STATE_RESTARTING;
		ath10k_hif_stop(ar);
		ath10k_scan_finish(ar);
		ieee80211_restart_hw(ar->hw);
		break;
	case ATH10K_STATE_OFF:
		/* this can happen if driver is being unloaded
		 * or if the crash happens during FW probing */
		ath10k_warn(ar, "cannot restart a device that hasn't been started\n");
		break;
	case ATH10K_STATE_RESTARTING:
		/* hw restart might be requested from multiple places */
		break;
	case ATH10K_STATE_RESTARTED:
		ar->state = ATH10K_STATE_WEDGED;
		/* fall through */
	case ATH10K_STATE_WEDGED:
		ath10k_warn(ar, "device is wedged, will not restart\n");
		break;
	case ATH10K_STATE_UTF:
		ath10k_warn(ar, "firmware restart in UTF mode not supported\n");
		break;
	}

	mutex_unlock(&ar->conf_mutex);
}
Esempio n. 18
0
static int agnx_pci_suspend(struct pci_dev *pdev, pm_message_t state)
{
	struct ieee80211_hw *dev = pci_get_drvdata(pdev);
	AGNX_TRACE;

	ieee80211_stop_queues(dev);
	agnx_stop(dev);

	pci_save_state(pdev);
	pci_set_power_state(pdev, pci_choose_state(pdev, state));
	return 0;
}
Esempio n. 19
0
File: tx99.c Progetto: 020gzh/linux
static int ath9k_tx99_init(struct ath_softc *sc)
{
	struct ieee80211_hw *hw = sc->hw;
	struct ath_hw *ah = sc->sc_ah;
	struct ath_common *common = ath9k_hw_common(ah);
	struct ath_tx_control txctl;
	int r;

	if (test_bit(ATH_OP_INVALID, &common->op_flags)) {
		ath_err(common,
			"driver is in invalid state unable to use TX99");
		return -EINVAL;
	}

	sc->tx99_skb = ath9k_build_tx99_skb(sc);
	if (!sc->tx99_skb)
		return -ENOMEM;

	memset(&txctl, 0, sizeof(txctl));
	txctl.txq = sc->tx.txq_map[IEEE80211_AC_VO];

	ath_reset(sc, NULL);

	ath9k_ps_wakeup(sc);

	ath9k_hw_disable_interrupts(ah);
	atomic_set(&ah->intr_ref_cnt, -1);
	ath_drain_all_txq(sc);
	ath_stoprecv(sc);

	sc->tx99_state = true;

	ieee80211_stop_queues(hw);

	if (sc->tx99_power == MAX_RATE_POWER + 1)
		sc->tx99_power = MAX_RATE_POWER;

	ath9k_hw_tx99_set_txpower(ah, sc->tx99_power);
	r = ath9k_tx99_send(sc, sc->tx99_skb, &txctl);
	if (r) {
		ath_dbg(common, XMIT, "Failed to xmit TX99 skb\n");
		return r;
	}

	ath_dbg(common, XMIT, "TX99 xmit started using %d ( %ddBm)\n",
		sc->tx99_power,
		sc->tx99_power / 2);

	/* We leave the harware awake as it will be chugging on */

	return 0;
}
Esempio n. 20
0
static void tx_inc_submitted_urbs(struct zd_usb *usb)
{
	struct zd_usb_tx *tx = &usb->tx;
	unsigned long flags;

	spin_lock_irqsave(&tx->lock, flags);
	++tx->submitted_urbs;
	if (!tx->stopped && tx->submitted_urbs > ZD_USB_TX_HIGH) {
		ieee80211_stop_queues(zd_usb_to_hw(usb));
		tx->stopped = 1;
	}
	spin_unlock_irqrestore(&tx->lock, flags);
}
Esempio n. 21
0
/* caller must hold wiphy_lock */
static int __ath9k_wiphy_pause(struct ath_wiphy *aphy)
{
	ieee80211_stop_queues(aphy->hw);
	aphy->state = ATH_WIPHY_PAUSING;
	/*
	 * TODO: handle PAUSING->PAUSED for the case where there are multiple
	 * active vifs (now we do it on the first vif getting ready; should be
	 * on the last)
	 */
	ieee80211_iterate_active_interfaces_atomic(aphy->hw, ath9k_pause_iter,
						   aphy);
	return 0;
}
Esempio n. 22
0
static void vnt_tx_80211(struct ieee80211_hw *hw,
	struct ieee80211_tx_control *control, struct sk_buff *skb)
{
	struct vnt_private *priv = hw->priv;

	ieee80211_stop_queues(hw);

	if (vnt_tx_packet(priv, skb)) {
		ieee80211_free_txskb(hw, skb);

		ieee80211_wake_queues(hw);
	}
}
Esempio n. 23
0
void ath9k_htc_radio_disable(struct ieee80211_hw *hw)
{
	struct ath9k_htc_priv *priv = hw->priv;
	struct ath_hw *ah = priv->ah;
	struct ath_common *common = ath9k_hw_common(ah);
	int ret;
	u8 cmd_rsp;

	ath9k_htc_ps_wakeup(priv);

	/* Disable LED */
	ath9k_hw_set_gpio(ah, ah->led_pin, 1);
	ath9k_hw_cfg_gpio_input(ah, ah->led_pin);

	WMI_CMD(WMI_DISABLE_INTR_CMDID);

	/* Stop TX */
	ieee80211_stop_queues(hw);
	ath9k_htc_tx_drain(priv);
	WMI_CMD(WMI_DRAIN_TXQ_ALL_CMDID);

	/* Stop RX */
	WMI_CMD(WMI_STOP_RECV_CMDID);

	/* Clear the WMI event queue */
	ath9k_wmi_event_drain(priv);

	/*
	 * The MIB counters have to be disabled here,
	 * since the target doesn't do it.
	 */
	ath9k_hw_disable_mib_counters(ah);

	if (!ah->curchan)
		ah->curchan = ath9k_cmn_get_curchannel(hw, ah);

	/* Reset the HW */
	ret = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
	if (ret) {
		ath_err(common,
			"Unable to reset hardware; reset status %d (freq %u MHz)\n",
			ret, ah->curchan->channel);
	}

	/* Disable the PHY */
	ath9k_hw_phy_disable(ah);

	ath9k_htc_ps_restore(priv);
	ath9k_htc_setpower(priv, ATH9K_PM_FULL_SLEEP);
}
Esempio n. 24
0
static int p54p_suspend(struct pci_dev *pdev, pm_message_t state)
{
	struct ieee80211_hw *dev = pci_get_drvdata(pdev);
	struct p54p_priv *priv = dev->priv;

	if (priv->common.mode != NL80211_IFTYPE_UNSPECIFIED) {
		ieee80211_stop_queues(dev);
		p54p_stop(dev);
	}

	pci_save_state(pdev);
	pci_set_power_state(pdev, pci_choose_state(pdev, state));
	return 0;
}
Esempio n. 25
0
void iwlagn_dev_txfifo_flush(struct iwl_priv *priv)
{
	mutex_lock(&priv->mutex);
	ieee80211_stop_queues(priv->hw);
	if (iwlagn_txfifo_flush(priv, 0)) {
		IWL_ERR(priv, "flush request fail\n");
		goto done;
	}
	IWL_DEBUG_INFO(priv, "wait transmit/flush all frames\n");
	iwl_trans_wait_tx_queue_empty(priv->trans, 0xffffffff);
done:
	ieee80211_wake_queues(priv->hw);
	mutex_unlock(&priv->mutex);
}
Esempio n. 26
0
static void ath9k_wiphy_pause_chan(struct ath_wiphy *aphy,
				   struct ath_wiphy *selected)
{
	if (selected->state == ATH_WIPHY_SCAN) {
		if (aphy == selected)
			return;
		/*
		 * Pause all other wiphys for the duration of the scan even if
		 * they are on the current channel now.
		 */
	} else if (aphy->chan_idx == selected->chan_idx)
		return;
	aphy->state = ATH_WIPHY_PAUSED;
	ieee80211_stop_queues(aphy->hw);
}
Esempio n. 27
0
void rt2x00mac_flush(struct ieee80211_hw *hw, bool drop)
{
	struct rt2x00_dev *rt2x00dev = hw->priv;
	struct data_queue *queue;
	unsigned int i = 0;

	ieee80211_stop_queues(hw);

	/*
	 * Run over all queues to kick them, this will force
	 * any pending frames to be transmitted.
	 */
	tx_queue_for_each(rt2x00dev, queue) {
		rt2x00dev->ops->lib->kick_tx_queue(queue);
	}
Esempio n. 28
0
void wl1271_tx_work(struct work_struct *work)
{
	struct wl1271 *wl = container_of(work, struct wl1271, tx_work);
	struct sk_buff *skb;
	bool woken_up = false;
	int ret;

	mutex_lock(&wl->mutex);

	if (unlikely(wl->state == WL1271_STATE_OFF))
		goto out;

	while ((skb = skb_dequeue(&wl->tx_queue))) {
		if (!woken_up) {
			ret = wl1271_ps_elp_wakeup(wl, false);
			if (ret < 0)
				goto out;
			woken_up = true;
		}

		ret = wl1271_tx_frame(wl, skb);
		if (ret == -EBUSY) {
			/* firmware buffer is full, stop queues */
			wl1271_debug(DEBUG_TX, "tx_work: fw buffer full, "
				     "stop queues");
			ieee80211_stop_queues(wl->hw);
			wl->tx_queue_stopped = true;
			skb_queue_head(&wl->tx_queue, skb);
			goto out;
		} else if (ret < 0) {
			dev_kfree_skb(skb);
			goto out;
		} else if (wl->tx_queue_stopped) {
			/* firmware buffer has space, restart queues */
			wl1271_debug(DEBUG_TX,
				     "complete_packet: waking queues");
			ieee80211_wake_queues(wl->hw);
			wl->tx_queue_stopped = false;
		}
	}

out:
	if (woken_up)
		wl1271_ps_elp_sleep(wl);

	mutex_unlock(&wl->mutex);
}
Esempio n. 29
0
static int mt7601u_config(struct ieee80211_hw *hw, u32 changed)
{
	struct mt7601u_dev *dev = hw->priv;
	int ret = 0;

	mutex_lock(&dev->mutex);

	if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
		ieee80211_stop_queues(hw);
		ret = mt7601u_phy_set_channel(dev, &hw->conf.chandef);
		ieee80211_wake_queues(hw);
	}

	mutex_unlock(&dev->mutex);

	return ret;
}
Esempio n. 30
0
static void iwl_perform_ct_kill_task(struct iwl_priv *priv,
			   bool stop)
{
	if (stop) {
		IWL_DEBUG_POWER(priv, "Stop all queues\n");
		if (priv->mac80211_registered)
			ieee80211_stop_queues(priv->hw);
		IWL_DEBUG_POWER(priv,
				"Schedule 5 seconds CT_KILL Timer\n");
		mod_timer(&priv->thermal_throttle.ct_kill_exit_tm, /*jiffies +*/
			  CT_KILL_EXIT_DURATION * HZ);
	} else {
		IWL_DEBUG_POWER(priv, "Wake all queues\n");
		if (priv->mac80211_registered)
			ieee80211_wake_queues(priv->hw);
	}
}