Пример #1
0
int rt2x00mac_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
		      bool set)
{
	struct rt2x00_dev *rt2x00dev = hw->priv;

	if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
		return 0;

	ieee80211_iterate_active_interfaces_atomic(rt2x00dev->hw,
						   rt2x00mac_set_tim_iter,
						   rt2x00dev);

	/* queue work to upodate the beacon template */
	ieee80211_queue_work(rt2x00dev->hw, &rt2x00dev->intf_work);
	return 0;
}
Пример #2
0
static void p54spi_op_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
{
	struct p54s_priv *priv = dev->priv;
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
	struct p54_tx_info *mi = (struct p54_tx_info *) info->rate_driver_data;
	struct p54s_tx_info *di = (struct p54s_tx_info *) mi->data;
	unsigned long flags;

	BUILD_BUG_ON(sizeof(*di) > sizeof((mi->data)));

	spin_lock_irqsave(&priv->tx_lock, flags);
	list_add_tail(&di->tx_list, &priv->tx_pending);
	spin_unlock_irqrestore(&priv->tx_lock, flags);

	ieee80211_queue_work(priv->hw, &priv->work);
}
Пример #3
0
/*
 * Checks if the BB/MAC is hung.
 */
void ath_hw_check(struct work_struct *work)
{
	struct ath_softc *sc = container_of(work, struct ath_softc, hw_check_work);
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
	unsigned long flags;
	int busy;
	u8 is_alive, nbeacon = 1;

	ath9k_ps_wakeup(sc);
	is_alive = ath9k_hw_check_alive(sc->sc_ah);

	if (is_alive && !AR_SREV_9300(sc->sc_ah))
		goto out;
	else if (!is_alive && AR_SREV_9300(sc->sc_ah)) {
		ath_dbg(common, RESET,
			"DCU stuck is detected. Schedule chip reset\n");
		RESET_STAT_INC(sc, RESET_TYPE_MAC_HANG);
		goto sched_reset;
	}

	spin_lock_irqsave(&common->cc_lock, flags);
	busy = ath_update_survey_stats(sc);
	spin_unlock_irqrestore(&common->cc_lock, flags);

	ath_dbg(common, RESET, "Possible baseband hang, busy=%d (try %d)\n",
		busy, sc->hw_busy_count + 1);
	if (busy >= 99) {
		if (++sc->hw_busy_count >= 3) {
			RESET_STAT_INC(sc, RESET_TYPE_BB_HANG);
			goto sched_reset;
		}
	} else if (busy >= 0) {
		sc->hw_busy_count = 0;
		nbeacon = 3;
	}

	ath_start_rx_poll(sc, nbeacon);
	goto out;

sched_reset:
	ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
out:
	ath9k_ps_restore(sc);
}
Пример #4
0
void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
                                     const u8 *ra, u16 tid)
{
    struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
    struct ieee80211_local *local = sdata->local;
    struct ieee80211_ra_tid *ra_tid;
    struct sk_buff *skb = dev_alloc_skb(0);

    if (unlikely(!skb))
        return;

    ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
    memcpy(&ra_tid->ra, ra, ETH_ALEN);
    ra_tid->tid = tid;

    skb->pkt_type = IEEE80211_SDATA_QUEUE_AGG_STOP;
    skb_queue_tail(&sdata->skb_queue, skb);
    ieee80211_queue_work(&local->hw, &sdata->work);
}
Пример #5
0
/*
 * PLL-WAR for AR9485/AR9340
 */
static bool ath_hw_pll_rx_hang_check(struct ath_softc *sc, u32 pll_sqsum)
{
	static int count;
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);

	if (pll_sqsum >= 0x40000) {
		count++;
		if (count == 3) {
			ath_dbg(common, RESET, "PLL WAR, resetting the chip\n");
			RESET_STAT_INC(sc, RESET_TYPE_PLL_HANG);
			ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
			count = 0;
			return true;
		}
	} else {
		count = 0;
	}

	return false;
}
Пример #6
0
static irqreturn_t wl1271_irq(int irq, void *cookie)
{
	struct wl1271 *wl = cookie;
	unsigned long flags;

	wl1271_debug(DEBUG_IRQ, "IRQ");

	/* complete the ELP completion */
	spin_lock_irqsave(&wl->wl_lock, flags);
	if (wl->elp_compl) {
		complete(wl->elp_compl);
		wl->elp_compl = NULL;
	}

	if (!test_and_set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags))
		ieee80211_queue_work(wl->hw, &wl->irq_work);
	set_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags);
	spin_unlock_irqrestore(&wl->wl_lock, flags);

	return IRQ_HANDLED;
}
Пример #7
0
int ieee80211_stop_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
{
	struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
	struct ieee80211_sub_if_data *sdata = sta->sdata;
	struct ieee80211_local *local = sdata->local;
	struct tid_ampdu_tx *tid_tx;
	int ret = 0;

	trace_api_stop_tx_ba_session(pubsta, tid);

	if (!local->ops->ampdu_action)
		return -EINVAL;

	if (tid >= IEEE80211_NUM_TIDS)
		return -EINVAL;

	spin_lock_bh(&sta->lock);
	tid_tx = rcu_dereference_protected_tid_tx(sta, tid);

	if (!tid_tx) {
		ret = -ENOENT;
		goto unlock;
	}

	WARN(sta->reserved_tid == tid,
	     "Requested to stop BA session on reserved tid=%d", tid);

	if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
		/* already in progress stopping it */
		ret = 0;
		goto unlock;
	}

	set_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state);
	ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);

 unlock:
	spin_unlock_bh(&sta->lock);
	return ret;
}
Пример #8
0
void wl1271_scan_complete_work(struct work_struct *work)
{
	struct delayed_work *dwork;
	struct wl1271 *wl;

	dwork = container_of(work, struct delayed_work, work);
	wl = container_of(dwork, struct wl1271, scan_complete_work);

	wl1271_debug(DEBUG_SCAN, "Scanning complete");

	mutex_lock(&wl->mutex);

	if (wl->state == WL1271_STATE_OFF)
		goto out;

	if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
		goto out;

	wl->scan.state = WL1271_SCAN_STATE_IDLE;
	memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
	wl->scan.req = NULL;
	ieee80211_scan_completed(wl->hw, false);

	/* restore hardware connection monitoring template */
	if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) {
		if (wl1271_ps_elp_wakeup(wl) == 0) {
			wl1271_cmd_build_ap_probe_req(wl, wl->probereq);
			wl1271_ps_elp_sleep(wl);
		}
	}

	if (wl->scan.failed) {
		wl1271_info("Scan completed due to error.");
		ieee80211_queue_work(wl->hw, &wl->recovery_work);
	}

out:
	mutex_unlock(&wl->mutex);

}
Пример #9
0
/*
 * TX polling - checks if the TX engine is stuck somewhere
 * and issues a chip reset if so.
 */
void ath_tx_complete_poll_work(struct work_struct *work)
{
	struct ath_softc *sc = container_of(work, struct ath_softc,
					    tx_complete_work.work);
	struct ath_txq *txq;
	int i;
	bool needreset = false;
#ifdef CONFIG_ATH9K_DEBUGFS
	sc->tx_complete_poll_work_seen++;
#endif

	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
		if (ATH_TXQ_SETUP(sc, i)) {
			txq = &sc->tx.txq[i];
			ath_txq_lock(sc, txq);
			if (txq->axq_depth) {
				if (txq->axq_tx_inprogress) {
					needreset = true;
					ath_txq_unlock(sc, txq);
					break;
				} else {
					txq->axq_tx_inprogress = true;
				}
			}
			ath_txq_unlock_complete(sc, txq);
		}

	if (needreset) {
		ath_dbg(ath9k_hw_common(sc->sc_ah), RESET,
			"tx hung, resetting the chip\n");
		RESET_STAT_INC(sc, RESET_TYPE_TX_HANG);
		ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
		return;
	}

	ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
				     msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
}
Пример #10
0
int ieee80211_stop_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
{
	struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
	struct ieee80211_sub_if_data *sdata = sta->sdata;
	struct ieee80211_local *local = sdata->local;
	struct tid_ampdu_tx *tid_tx;
	int ret = 0;

	trace_api_stop_tx_ba_session(pubsta, tid);

	if (!local->ops->ampdu_action)
		return -EINVAL;

	if (tid >= STA_TID_NUM)
		return -EINVAL;

	spin_lock_bh(&sta->lock);
	tid_tx = sta->ampdu_mlme.tid_tx[tid];

	if (!tid_tx) {
		ret = -ENOENT;
		goto unlock;
	}

	if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
		/* already in progress stopping it */
		ret = 0;
		goto unlock;
	}

	set_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state);
	ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);

 unlock:
	spin_unlock_bh(&sta->lock);
	return ret;
}
Пример #11
0
void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
				     const u8 *ra, u16 tid)
{
	struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
	struct ieee80211_local *local = sdata->local;
	struct ieee80211_ra_tid *ra_tid;
	struct sk_buff *skb = dev_alloc_skb(0);

	if (unlikely(!skb)) {
#ifdef CONFIG_MAC80211_HT_DEBUG
		if (net_ratelimit())
			printk(KERN_WARNING "%s: Not enough memory, "
			       "dropping stop BA session", sdata->name);
#endif
		return;
	}
	ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
	memcpy(&ra_tid->ra, ra, ETH_ALEN);
	ra_tid->tid = tid;

	skb->pkt_type = IEEE80211_SDATA_QUEUE_AGG_STOP;
	skb_queue_tail(&sdata->skb_queue, skb);
	ieee80211_queue_work(&local->hw, &sdata->work);
}
Пример #12
0
int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
				    enum ieee80211_agg_stop_reason reason)
{
	struct ieee80211_local *local = sta->local;
	struct tid_ampdu_tx *tid_tx;
	struct ieee80211_ampdu_params params = {
		.sta = &sta->sta,
		.tid = tid,
		.buf_size = 0,
		.amsdu = false,
		.timeout = 0,
		.ssn = 0,
	};
	int ret;

	lockdep_assert_held(&sta->ampdu_mlme.mtx);

	switch (reason) {
	case AGG_STOP_DECLINED:
	case AGG_STOP_LOCAL_REQUEST:
	case AGG_STOP_PEER_REQUEST:
		params.action = IEEE80211_AMPDU_TX_STOP_CONT;
		break;
	case AGG_STOP_DESTROY_STA:
		params.action = IEEE80211_AMPDU_TX_STOP_FLUSH;
		break;
	default:
		WARN_ON_ONCE(1);
		return -EINVAL;
	}

	spin_lock_bh(&sta->lock);

	/* free struct pending for start, if present */
	tid_tx = sta->ampdu_mlme.tid_start_tx[tid];
	kfree(tid_tx);
	sta->ampdu_mlme.tid_start_tx[tid] = NULL;

	tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
	if (!tid_tx) {
		spin_unlock_bh(&sta->lock);
		return -ENOENT;
	}

	/*
	 * if we're already stopping ignore any new requests to stop
	 * unless we're destroying it in which case notify the driver
	 */
	if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
		spin_unlock_bh(&sta->lock);
		if (reason != AGG_STOP_DESTROY_STA)
			return -EALREADY;
		params.action = IEEE80211_AMPDU_TX_STOP_FLUSH_CONT;
		ret = drv_ampdu_action(local, sta->sdata, &params);
		WARN_ON_ONCE(ret);
		return 0;
	}

	if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) {
		/* not even started yet! */
		ieee80211_assign_tid_tx(sta, tid, NULL);
		spin_unlock_bh(&sta->lock);
		kfree_rcu(tid_tx, rcu_head);
		return 0;
	}

	set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state);

	spin_unlock_bh(&sta->lock);

	ht_dbg(sta->sdata, "Tx BA session stop requested for %pM tid %u\n",
	       sta->sta.addr, tid);

	del_timer_sync(&tid_tx->addba_resp_timer);
	del_timer_sync(&tid_tx->session_timer);

	/*
	 * After this packets are no longer handed right through
	 * to the driver but are put onto tid_tx->pending instead,
	 * with locking to ensure proper access.
	 */
	clear_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state);

	/*
	 * There might be a few packets being processed right now (on
	 * another CPU) that have already gotten past the aggregation
	 * check when it was still OPERATIONAL and consequently have
	 * IEEE80211_TX_CTL_AMPDU set. In that case, this code might
	 * call into the driver at the same time or even before the
	 * TX paths calls into it, which could confuse the driver.
	 *
	 * Wait for all currently running TX paths to finish before
	 * telling the driver. New packets will not go through since
	 * the aggregation session is no longer OPERATIONAL.
	 */
	synchronize_net();

	tid_tx->stop_initiator = reason == AGG_STOP_PEER_REQUEST ?
					WLAN_BACK_RECIPIENT :
					WLAN_BACK_INITIATOR;
	tid_tx->tx_stop = reason == AGG_STOP_LOCAL_REQUEST;

	ret = drv_ampdu_action(local, sta->sdata, &params);

	/* HW shall not deny going back to legacy */
	if (WARN_ON(ret)) {
		/*
		 * We may have pending packets get stuck in this case...
		 * Not bothering with a workaround for now.
		 */
	}

	/*
	 * In the case of AGG_STOP_DESTROY_STA, the driver won't
	 * necessarily call ieee80211_stop_tx_ba_cb(), so this may
	 * seem like we can leave the tid_tx data pending forever.
	 * This is true, in a way, but "forever" is only until the
	 * station struct is actually destroyed. In the meantime,
	 * leaving it around ensures that we don't transmit packets
	 * to the driver on this TID which might confuse it.
	 */

	return 0;
}

/*
 * After sending add Block Ack request we activated a timer until
 * add Block Ack response will arrive from the recipient.
 * If this timer expires sta_addba_resp_timer_expired will be executed.
 */
static void sta_addba_resp_timer_expired(struct timer_list *t)
{
	struct tid_ampdu_tx *tid_tx_timer =
		from_timer(tid_tx_timer, t, addba_resp_timer);
	struct sta_info *sta = tid_tx_timer->sta;
	u8 tid = tid_tx_timer->tid;
	struct tid_ampdu_tx *tid_tx;

	/* check if the TID waits for addBA response */
	rcu_read_lock();
	tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]);
	if (!tid_tx ||
	    test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state)) {
		rcu_read_unlock();
		ht_dbg(sta->sdata,
		       "timer expired on %pM tid %d not expecting addBA response\n",
		       sta->sta.addr, tid);
		return;
	}

	ht_dbg(sta->sdata, "addBA response timer expired on %pM tid %d\n",
	       sta->sta.addr, tid);

	ieee80211_stop_tx_ba_session(&sta->sta, tid);
	rcu_read_unlock();
}

void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
{
	struct tid_ampdu_tx *tid_tx;
	struct ieee80211_local *local = sta->local;
	struct ieee80211_sub_if_data *sdata = sta->sdata;
	struct ieee80211_ampdu_params params = {
		.sta = &sta->sta,
		.action = IEEE80211_AMPDU_TX_START,
		.tid = tid,
		.buf_size = 0,
		.amsdu = false,
		.timeout = 0,
	};
	int ret;

	tid_tx = rcu_dereference_protected_tid_tx(sta, tid);

	/*
	 * Start queuing up packets for this aggregation session.
	 * We're going to release them once the driver is OK with
	 * that.
	 */
	clear_bit(HT_AGG_STATE_WANT_START, &tid_tx->state);

	ieee80211_agg_stop_txq(sta, tid);

	/*
	 * Make sure no packets are being processed. This ensures that
	 * we have a valid starting sequence number and that in-flight
	 * packets have been flushed out and no packets for this TID
	 * will go into the driver during the ampdu_action call.
	 */
	synchronize_net();

	params.ssn = sta->tid_seq[tid] >> 4;
	ret = drv_ampdu_action(local, sdata, &params);
	if (ret) {
		ht_dbg(sdata,
		       "BA request denied - HW unavailable for %pM tid %d\n",
		       sta->sta.addr, tid);
		spin_lock_bh(&sta->lock);
		ieee80211_agg_splice_packets(sdata, tid_tx, tid);
		ieee80211_assign_tid_tx(sta, tid, NULL);
		ieee80211_agg_splice_finish(sdata, tid);
		spin_unlock_bh(&sta->lock);

		ieee80211_agg_start_txq(sta, tid, false);

		kfree_rcu(tid_tx, rcu_head);
		return;
	}

	/* activate the timer for the recipient's addBA response */
	mod_timer(&tid_tx->addba_resp_timer, jiffies + ADDBA_RESP_INTERVAL);
	ht_dbg(sdata, "activated addBA response timer on %pM tid %d\n",
	       sta->sta.addr, tid);

	spin_lock_bh(&sta->lock);
	sta->ampdu_mlme.last_addba_req_time[tid] = jiffies;
	sta->ampdu_mlme.addba_req_num[tid]++;
	spin_unlock_bh(&sta->lock);

	/* send AddBA request */
	ieee80211_send_addba_request(sdata, sta->sta.addr, tid,
				     tid_tx->dialog_token, params.ssn,
				     IEEE80211_MAX_AMPDU_BUF,
				     tid_tx->timeout);
}

/*
 * After accepting the AddBA Response we activated a timer,
 * resetting it after each frame that we send.
 */
static void sta_tx_agg_session_timer_expired(struct timer_list *t)
{
	struct tid_ampdu_tx *tid_tx_timer =
		from_timer(tid_tx_timer, t, session_timer);
	struct sta_info *sta = tid_tx_timer->sta;
	u8 tid = tid_tx_timer->tid;
	struct tid_ampdu_tx *tid_tx;
	unsigned long timeout;

	rcu_read_lock();
	tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]);
	if (!tid_tx || test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
		rcu_read_unlock();
		return;
	}

	timeout = tid_tx->last_tx + TU_TO_JIFFIES(tid_tx->timeout);
	if (time_is_after_jiffies(timeout)) {
		mod_timer(&tid_tx->session_timer, timeout);
		rcu_read_unlock();
		return;
	}

	rcu_read_unlock();

	ht_dbg(sta->sdata, "tx session timer expired on %pM tid %d\n",
	       sta->sta.addr, tid);

	ieee80211_stop_tx_ba_session(&sta->sta, tid);
}

int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
				  u16 timeout)
{
	struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
	struct ieee80211_sub_if_data *sdata = sta->sdata;
	struct ieee80211_local *local = sdata->local;
	struct tid_ampdu_tx *tid_tx;
	int ret = 0;

	trace_api_start_tx_ba_session(pubsta, tid);

	if (WARN(sta->reserved_tid == tid,
		 "Requested to start BA session on reserved tid=%d", tid))
		return -EINVAL;

	if (!pubsta->ht_cap.ht_supported)
		return -EINVAL;

	if (WARN_ON_ONCE(!local->ops->ampdu_action))
		return -EINVAL;

	if ((tid >= IEEE80211_NUM_TIDS) ||
	    !ieee80211_hw_check(&local->hw, AMPDU_AGGREGATION) ||
	    ieee80211_hw_check(&local->hw, TX_AMPDU_SETUP_IN_HW))
		return -EINVAL;

	if (WARN_ON(tid >= IEEE80211_FIRST_TSPEC_TSID))
		return -EINVAL;

	ht_dbg(sdata, "Open BA session requested for %pM tid %u\n",
	       pubsta->addr, tid);

	if (sdata->vif.type != NL80211_IFTYPE_STATION &&
	    sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
	    sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
	    sdata->vif.type != NL80211_IFTYPE_AP &&
	    sdata->vif.type != NL80211_IFTYPE_ADHOC)
		return -EINVAL;

	if (test_sta_flag(sta, WLAN_STA_BLOCK_BA)) {
		ht_dbg(sdata,
		       "BA sessions blocked - Denying BA session request %pM tid %d\n",
		       sta->sta.addr, tid);
		return -EINVAL;
	}

	/*
	 * 802.11n-2009 11.5.1.1: If the initiating STA is an HT STA, is a
	 * member of an IBSS, and has no other existing Block Ack agreement
	 * with the recipient STA, then the initiating STA shall transmit a
	 * Probe Request frame to the recipient STA and shall not transmit an
	 * ADDBA Request frame unless it receives a Probe Response frame
	 * from the recipient within dot11ADDBAFailureTimeout.
	 *
	 * The probe request mechanism for ADDBA is currently not implemented,
	 * but we only build up Block Ack session with HT STAs. This information
	 * is set when we receive a bss info from a probe response or a beacon.
	 */
	if (sta->sdata->vif.type == NL80211_IFTYPE_ADHOC &&
	    !sta->sta.ht_cap.ht_supported) {
		ht_dbg(sdata,
		       "BA request denied - IBSS STA %pM does not advertise HT support\n",
		       pubsta->addr);
		return -EINVAL;
	}

	spin_lock_bh(&sta->lock);

	/* we have tried too many times, receiver does not want A-MPDU */
	if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_MAX_RETRIES) {
		ret = -EBUSY;
		goto err_unlock_sta;
	}

	/*
	 * if we have tried more than HT_AGG_BURST_RETRIES times we
	 * will spread our requests in time to avoid stalling connection
	 * for too long
	 */
	if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_BURST_RETRIES &&
	    time_before(jiffies, sta->ampdu_mlme.last_addba_req_time[tid] +
			HT_AGG_RETRIES_PERIOD)) {
		ht_dbg(sdata,
		       "BA request denied - %d failed requests on %pM tid %u\n",
		       sta->ampdu_mlme.addba_req_num[tid], sta->sta.addr, tid);
		ret = -EBUSY;
		goto err_unlock_sta;
	}

	tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
	/* check if the TID is not in aggregation flow already */
	if (tid_tx || sta->ampdu_mlme.tid_start_tx[tid]) {
		ht_dbg(sdata,
		       "BA request denied - session is not idle on %pM tid %u\n",
		       sta->sta.addr, tid);
		ret = -EAGAIN;
		goto err_unlock_sta;
	}

	/* prepare A-MPDU MLME for Tx aggregation */
	tid_tx = kzalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC);
	if (!tid_tx) {
		ret = -ENOMEM;
		goto err_unlock_sta;
	}

	skb_queue_head_init(&tid_tx->pending);
	__set_bit(HT_AGG_STATE_WANT_START, &tid_tx->state);

	tid_tx->timeout = timeout;
	tid_tx->sta = sta;
	tid_tx->tid = tid;

	/* response timer */
	timer_setup(&tid_tx->addba_resp_timer, sta_addba_resp_timer_expired, 0);

	/* tx timer */
	timer_setup(&tid_tx->session_timer,
		    sta_tx_agg_session_timer_expired, TIMER_DEFERRABLE);

	/* assign a dialog token */
	sta->ampdu_mlme.dialog_token_allocator++;
	tid_tx->dialog_token = sta->ampdu_mlme.dialog_token_allocator;

	/*
	 * Finally, assign it to the start array; the work item will
	 * collect it and move it to the normal array.
	 */
	sta->ampdu_mlme.tid_start_tx[tid] = tid_tx;

	ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);

	/* this flow continues off the work */
 err_unlock_sta:
	spin_unlock_bh(&sta->lock);
	return ret;
}
EXPORT_SYMBOL(ieee80211_start_tx_ba_session);

static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
					 struct sta_info *sta, u16 tid)
{
	struct tid_ampdu_tx *tid_tx;
	struct ieee80211_ampdu_params params = {
		.sta = &sta->sta,
		.action = IEEE80211_AMPDU_TX_OPERATIONAL,
		.tid = tid,
		.timeout = 0,
		.ssn = 0,
	};

	lockdep_assert_held(&sta->ampdu_mlme.mtx);

	tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
	params.buf_size = tid_tx->buf_size;
	params.amsdu = tid_tx->amsdu;

	ht_dbg(sta->sdata, "Aggregation is on for %pM tid %d\n",
	       sta->sta.addr, tid);

	drv_ampdu_action(local, sta->sdata, &params);

	/*
	 * synchronize with TX path, while splicing the TX path
	 * should block so it won't put more packets onto pending.
	 */
	spin_lock_bh(&sta->lock);

	ieee80211_agg_splice_packets(sta->sdata, tid_tx, tid);
	/*
	 * Now mark as operational. This will be visible
	 * in the TX path, and lets it go lock-free in
	 * the common case.
	 */
	set_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state);
	ieee80211_agg_splice_finish(sta->sdata, tid);

	spin_unlock_bh(&sta->lock);

	ieee80211_agg_start_txq(sta, tid, true);
}

void ieee80211_start_tx_ba_cb(struct sta_info *sta, int tid,
			      struct tid_ampdu_tx *tid_tx)
{
	struct ieee80211_sub_if_data *sdata = sta->sdata;
	struct ieee80211_local *local = sdata->local;

	if (WARN_ON(test_and_set_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state)))
		return;

	if (test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state))
		ieee80211_agg_tx_operational(local, sta, tid);
}

static struct tid_ampdu_tx *
ieee80211_lookup_tid_tx(struct ieee80211_sub_if_data *sdata,
			const u8 *ra, u16 tid, struct sta_info **sta)
{
	struct tid_ampdu_tx *tid_tx;

	if (tid >= IEEE80211_NUM_TIDS) {
		ht_dbg(sdata, "Bad TID value: tid = %d (>= %d)\n",
		       tid, IEEE80211_NUM_TIDS);
		return NULL;
	}

	*sta = sta_info_get_bss(sdata, ra);
	if (!*sta) {
		ht_dbg(sdata, "Could not find station: %pM\n", ra);
		return NULL;
	}

	tid_tx = rcu_dereference((*sta)->ampdu_mlme.tid_tx[tid]);

	if (WARN_ON(!tid_tx))
		ht_dbg(sdata, "addBA was not requested!\n");

	return tid_tx;
}

void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
				      const u8 *ra, u16 tid)
{
	struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
	struct ieee80211_local *local = sdata->local;
	struct sta_info *sta;
	struct tid_ampdu_tx *tid_tx;

	trace_api_start_tx_ba_cb(sdata, ra, tid);

	rcu_read_lock();
	tid_tx = ieee80211_lookup_tid_tx(sdata, ra, tid, &sta);
	if (!tid_tx)
		goto out;

	set_bit(HT_AGG_STATE_START_CB, &tid_tx->state);
	ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);
 out:
	rcu_read_unlock();
}
EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe);

int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
				   enum ieee80211_agg_stop_reason reason)
{
	int ret;

	mutex_lock(&sta->ampdu_mlme.mtx);

	ret = ___ieee80211_stop_tx_ba_session(sta, tid, reason);

	mutex_unlock(&sta->ampdu_mlme.mtx);

	return ret;
}

int ieee80211_stop_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
{
	struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
	struct ieee80211_sub_if_data *sdata = sta->sdata;
	struct ieee80211_local *local = sdata->local;
	struct tid_ampdu_tx *tid_tx;
	int ret = 0;

	trace_api_stop_tx_ba_session(pubsta, tid);

	if (!local->ops->ampdu_action)
		return -EINVAL;

	if (tid >= IEEE80211_NUM_TIDS)
		return -EINVAL;

	spin_lock_bh(&sta->lock);
	tid_tx = rcu_dereference_protected_tid_tx(sta, tid);

	if (!tid_tx) {
		ret = -ENOENT;
		goto unlock;
	}

	WARN(sta->reserved_tid == tid,
	     "Requested to stop BA session on reserved tid=%d", tid);

	if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
		/* already in progress stopping it */
		ret = 0;
		goto unlock;
	}

	set_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state);
	ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);

 unlock:
	spin_unlock_bh(&sta->lock);
	return ret;
}
EXPORT_SYMBOL(ieee80211_stop_tx_ba_session);

void ieee80211_stop_tx_ba_cb(struct sta_info *sta, int tid,
			     struct tid_ampdu_tx *tid_tx)
{
	struct ieee80211_sub_if_data *sdata = sta->sdata;
	bool send_delba = false;

	ht_dbg(sdata, "Stopping Tx BA session for %pM tid %d\n",
	       sta->sta.addr, tid);

	spin_lock_bh(&sta->lock);

	if (!test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
		ht_dbg(sdata,
		       "unexpected callback to A-MPDU stop for %pM tid %d\n",
		       sta->sta.addr, tid);
		goto unlock_sta;
	}

	if (tid_tx->stop_initiator == WLAN_BACK_INITIATOR && tid_tx->tx_stop)
		send_delba = true;

	ieee80211_remove_tid_tx(sta, tid);

 unlock_sta:
	spin_unlock_bh(&sta->lock);

	if (send_delba)
		ieee80211_send_delba(sdata, sta->sta.addr, tid,
			WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE);
}

void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
				     const u8 *ra, u16 tid)
{
	struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
	struct ieee80211_local *local = sdata->local;
	struct sta_info *sta;
	struct tid_ampdu_tx *tid_tx;

	trace_api_stop_tx_ba_cb(sdata, ra, tid);

	rcu_read_lock();
	tid_tx = ieee80211_lookup_tid_tx(sdata, ra, tid, &sta);
	if (!tid_tx)
		goto out;

	set_bit(HT_AGG_STATE_STOP_CB, &tid_tx->state);
	ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);
 out:
	rcu_read_unlock();
}
EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb_irqsafe);


void ieee80211_process_addba_resp(struct ieee80211_local *local,
				  struct sta_info *sta,
				  struct ieee80211_mgmt *mgmt,
				  size_t len)
{
	struct tid_ampdu_tx *tid_tx;
	struct ieee80211_txq *txq;
	u16 capab, tid;
	u8 buf_size;
	bool amsdu;

	capab = le16_to_cpu(mgmt->u.action.u.addba_resp.capab);
	amsdu = capab & IEEE80211_ADDBA_PARAM_AMSDU_MASK;
	tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2;
	buf_size = (capab & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> 6;
	buf_size = min(buf_size, local->hw.max_tx_aggregation_subframes);

	txq = sta->sta.txq[tid];
	if (!amsdu && txq)
		set_bit(IEEE80211_TXQ_NO_AMSDU, &to_txq_info(txq)->flags);

	mutex_lock(&sta->ampdu_mlme.mtx);

	tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
	if (!tid_tx)
		goto out;

	if (mgmt->u.action.u.addba_resp.dialog_token != tid_tx->dialog_token) {
		ht_dbg(sta->sdata, "wrong addBA response token, %pM tid %d\n",
		       sta->sta.addr, tid);
		goto out;
	}

	del_timer_sync(&tid_tx->addba_resp_timer);

	ht_dbg(sta->sdata, "switched off addBA timer for %pM tid %d\n",
	       sta->sta.addr, tid);

	/*
	 * addba_resp_timer may have fired before we got here, and
	 * caused WANT_STOP to be set. If the stop then was already
	 * processed further, STOPPING might be set.
	 */
	if (test_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state) ||
	    test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
		ht_dbg(sta->sdata,
		       "got addBA resp for %pM tid %d but we already gave up\n",
		       sta->sta.addr, tid);
		goto out;
	}

	/*
	 * IEEE 802.11-2007 7.3.1.14:
	 * In an ADDBA Response frame, when the Status Code field
	 * is set to 0, the Buffer Size subfield is set to a value
	 * of at least 1.
	 */
	if (le16_to_cpu(mgmt->u.action.u.addba_resp.status)
			== WLAN_STATUS_SUCCESS && buf_size) {
		if (test_and_set_bit(HT_AGG_STATE_RESPONSE_RECEIVED,
				     &tid_tx->state)) {
			/* ignore duplicate response */
			goto out;
		}

		tid_tx->buf_size = buf_size;
		tid_tx->amsdu = amsdu;

		if (test_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state))
			ieee80211_agg_tx_operational(local, sta, tid);

		sta->ampdu_mlme.addba_req_num[tid] = 0;

		if (tid_tx->timeout) {
			mod_timer(&tid_tx->session_timer,
				  TU_TO_EXP_TIME(tid_tx->timeout));
			tid_tx->last_tx = jiffies;
		}

	} else {
		___ieee80211_stop_tx_ba_session(sta, tid, AGG_STOP_DECLINED);
	}

 out:
	mutex_unlock(&sta->ampdu_mlme.mtx);
}
Пример #13
0
/*
 *  ANI performs periodic noise floor calibration
 *  that is used to adjust and optimize the chip performance.  This
 *  takes environmental changes (location, temperature) into account.
 *  When the task is complete, it reschedules itself depending on the
 *  appropriate interval that was calculated.
 */
void ath_ani_calibrate(unsigned long data)
{
	struct ath_softc *sc = (struct ath_softc *)data;
	struct ath_hw *ah = sc->sc_ah;
	struct ath_common *common = ath9k_hw_common(ah);
	bool longcal = false;
	bool shortcal = false;
	bool aniflag = false;
	unsigned int timestamp = jiffies_to_msecs(jiffies);
	u32 cal_interval, short_cal_interval, long_cal_interval;
	unsigned long flags;

	if (ah->caldata && test_bit(NFCAL_INTF, &ah->caldata->cal_flags))
		long_cal_interval = ATH_LONG_CALINTERVAL_INT;
	else
		long_cal_interval = ATH_LONG_CALINTERVAL;

	short_cal_interval = (ah->opmode == NL80211_IFTYPE_AP) ?
		ATH_AP_SHORT_CALINTERVAL : ATH_STA_SHORT_CALINTERVAL;

	/* Only calibrate if awake */
	if (sc->sc_ah->power_mode != ATH9K_PM_AWAKE) {
		if (++ah->ani_skip_count >= ATH_ANI_MAX_SKIP_COUNT) {
			spin_lock_irqsave(&sc->sc_pm_lock, flags);
			sc->ps_flags |= PS_WAIT_FOR_ANI;
			spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
		}
		goto set_timer;
	}
	ah->ani_skip_count = 0;
	spin_lock_irqsave(&sc->sc_pm_lock, flags);
	sc->ps_flags &= ~PS_WAIT_FOR_ANI;
	spin_unlock_irqrestore(&sc->sc_pm_lock, flags);

	ath9k_ps_wakeup(sc);

	/* Long calibration runs independently of short calibration. */
	if ((timestamp - common->ani.longcal_timer) >= long_cal_interval) {
		longcal = true;
		common->ani.longcal_timer = timestamp;
	}

	/* Short calibration applies only while caldone is false */
	if (!common->ani.caldone) {
		if ((timestamp - common->ani.shortcal_timer) >= short_cal_interval) {
			shortcal = true;
			common->ani.shortcal_timer = timestamp;
			common->ani.resetcal_timer = timestamp;
		}
	} else {
		if ((timestamp - common->ani.resetcal_timer) >=
		    ATH_RESTART_CALINTERVAL) {
			common->ani.caldone = ath9k_hw_reset_calvalid(ah);
			if (common->ani.caldone)
				common->ani.resetcal_timer = timestamp;
		}
	}

	/* Verify whether we must check ANI */
	if ((timestamp - common->ani.checkani_timer) >= ah->config.ani_poll_interval) {
		aniflag = true;
		common->ani.checkani_timer = timestamp;
	}

	/* Call ANI routine if necessary */
	if (aniflag) {
		spin_lock_irqsave(&common->cc_lock, flags);
		ath9k_hw_ani_monitor(ah, ah->curchan);
		ath_update_survey_stats(sc);
		spin_unlock_irqrestore(&common->cc_lock, flags);
	}

	/* Perform calibration if necessary */
	if (longcal || shortcal) {
		common->ani.caldone =
			ath9k_hw_calibrate(ah, ah->curchan,
					   ah->rxchainmask, longcal);
	}

	ath_dbg(common, ANI,
		"Calibration @%lu finished: %s %s %s, caldone: %s\n",
		jiffies,
		longcal ? "long" : "", shortcal ? "short" : "",
		aniflag ? "ani" : "", common->ani.caldone ? "true" : "false");

	ath9k_ps_restore(sc);

set_timer:
	/*
	* Set timer interval based on previous results.
	* The interval must be the shortest necessary to satisfy ANI,
	* short calibration and long calibration.
	*/
	cal_interval = ATH_LONG_CALINTERVAL;
	cal_interval = min(cal_interval, (u32)ah->config.ani_poll_interval);
	if (!common->ani.caldone)
		cal_interval = min(cal_interval, (u32)short_cal_interval);

	mod_timer(&common->ani.timer, jiffies + msecs_to_jiffies(cal_interval));

	if (ar9003_is_paprd_enabled(ah) && ah->caldata) {
		if (!test_bit(PAPRD_DONE, &ah->caldata->cal_flags)) {
			ieee80211_queue_work(sc->hw, &sc->paprd_work);
		} else if (!ah->paprd_table_write_done) {
			ath9k_ps_wakeup(sc);
			ath_paprd_activate(sc);
			ath9k_ps_restore(sc);
		}
	}
}
Пример #14
0
void ath9k_tasklet(unsigned long data)
{
	struct ath_softc *sc = (struct ath_softc *)data;
	struct ath_hw *ah = sc->sc_ah;
	struct ath_common *common = ath9k_hw_common(ah);

	u32 status = sc->intrstatus;
	u32 rxmask;

	if ((status & ATH9K_INT_FATAL) ||
	    (status & ATH9K_INT_BB_WATCHDOG)) {
		spin_lock(&sc->sc_pcu_lock);
		ath_reset(sc, true);
		spin_unlock(&sc->sc_pcu_lock);
		return;
	}

	ath9k_ps_wakeup(sc);
	spin_lock(&sc->sc_pcu_lock);

	/*
	 * Only run the baseband hang check if beacons stop working in AP or
	 * IBSS mode, because it has a high false positive rate. For station
	 * mode it should not be necessary, since the upper layers will detect
	 * this through a beacon miss automatically and the following channel
	 * change will trigger a hardware reset anyway
	 */
	if (ath9k_hw_numtxpending(ah, sc->beacon.beaconq) != 0 &&
	    !ath9k_hw_check_alive(ah))
		ieee80211_queue_work(sc->hw, &sc->hw_check_work);

	if ((status & ATH9K_INT_TSFOOR) && sc->ps_enabled) {
		/*
		 * TSF sync does not look correct; remain awake to sync with
		 * the next Beacon.
		 */
		ath_dbg(common, ATH_DBG_PS,
			"TSFOOR - Sync with next Beacon\n");
		sc->ps_flags |= PS_WAIT_FOR_BEACON | PS_BEACON_SYNC |
				PS_TSFOOR_SYNC;
	}

	if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
		rxmask = (ATH9K_INT_RXHP | ATH9K_INT_RXLP | ATH9K_INT_RXEOL |
			  ATH9K_INT_RXORN);
	else
		rxmask = (ATH9K_INT_RX | ATH9K_INT_RXEOL | ATH9K_INT_RXORN);

	if (status & rxmask) {
		/* Check for high priority Rx first */
		if ((ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
		    (status & ATH9K_INT_RXHP))
			ath_rx_tasklet(sc, 0, true);

		ath_rx_tasklet(sc, 0, false);
	}

	if (status & ATH9K_INT_TX) {
		if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
			ath_tx_edma_tasklet(sc);
		else
			ath_tx_tasklet(sc);
	}

	if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
		if (status & ATH9K_INT_GENTIMER)
			ath_gen_timer_isr(sc->sc_ah);

	/* re-enable hardware interrupt */
	ath9k_hw_enable_interrupts(ah);

	spin_unlock(&sc->sc_pcu_lock);
	ath9k_ps_restore(sc);
}
Пример #15
0
int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
{
	struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
	struct ieee80211_sub_if_data *sdata = sta->sdata;
	struct ieee80211_local *local = sdata->local;
	struct tid_ampdu_tx *tid_tx;
	int ret = 0;

	trace_api_start_tx_ba_session(pubsta, tid);

	if (WARN_ON(!local->ops->ampdu_action))
		return -EINVAL;

	if ((tid >= STA_TID_NUM) ||
	    !(local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION))
		return -EINVAL;

#ifdef CONFIG_MAC80211_HT_DEBUG
	printk(KERN_DEBUG "Open BA session requested for %pM tid %u\n",
	       pubsta->addr, tid);
#endif /* CONFIG_MAC80211_HT_DEBUG */

	/*
	 * The aggregation code is not prepared to handle
	 * anything but STA/AP due to the BSSID handling.
	 * IBSS could work in the code but isn't supported
	 * by drivers or the standard.
	 */
	if (sdata->vif.type != NL80211_IFTYPE_STATION &&
	    sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
	    sdata->vif.type != NL80211_IFTYPE_AP)
		return -EINVAL;

	if (test_sta_flags(sta, WLAN_STA_BLOCK_BA)) {
#ifdef CONFIG_MAC80211_HT_DEBUG
		printk(KERN_DEBUG "BA sessions blocked. "
		       "Denying BA session request\n");
#endif
		return -EINVAL;
	}

	spin_lock_bh(&sta->lock);

	/* we have tried too many times, receiver does not want A-MPDU */
	if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_MAX_RETRIES) {
		ret = -EBUSY;
		goto err_unlock_sta;
	}

	tid_tx = sta->ampdu_mlme.tid_tx[tid];
	/* check if the TID is not in aggregation flow already */
	if (tid_tx) {
#ifdef CONFIG_MAC80211_HT_DEBUG
		printk(KERN_DEBUG "BA request denied - session is not "
				 "idle on tid %u\n", tid);
#endif /* CONFIG_MAC80211_HT_DEBUG */
		ret = -EAGAIN;
		goto err_unlock_sta;
	}

	/* prepare A-MPDU MLME for Tx aggregation */
	tid_tx = kzalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC);
	if (!tid_tx) {
#ifdef CONFIG_MAC80211_HT_DEBUG
		if (net_ratelimit())
			printk(KERN_ERR "allocate tx mlme to tid %d failed\n",
					tid);
#endif
		ret = -ENOMEM;
		goto err_unlock_sta;
	}

	skb_queue_head_init(&tid_tx->pending);
	__set_bit(HT_AGG_STATE_WANT_START, &tid_tx->state);

	/* Tx timer */
	tid_tx->addba_resp_timer.function = sta_addba_resp_timer_expired;
	tid_tx->addba_resp_timer.data = (unsigned long)&sta->timer_to_tid[tid];
	init_timer(&tid_tx->addba_resp_timer);

	/* assign a dialog token */
	sta->ampdu_mlme.dialog_token_allocator++;
	tid_tx->dialog_token = sta->ampdu_mlme.dialog_token_allocator;

	/* finally, assign it to the array */
	rcu_assign_pointer(sta->ampdu_mlme.tid_tx[tid], tid_tx);

	ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);

	/* this flow continues off the work */
 err_unlock_sta:
	spin_unlock_bh(&sta->lock);
	return ret;
}
Пример #16
0
void b43_dma_handle_txstatus(struct b43_wldev *dev,
			     const struct b43_txstatus *status)
{
	const struct b43_dma_ops *ops;
	struct b43_dmaring *ring;
	struct b43_dmadesc_meta *meta;
	static const struct b43_txstatus fake; /* filled with 0 */
	const struct b43_txstatus *txstat;
	int slot, firstused;
	bool frame_succeed;
	int skip;
	static u8 err_out1, err_out2;

	ring = parse_cookie(dev, status->cookie, &slot);
	if (unlikely(!ring))
		return;
	B43_WARN_ON(!ring->tx);

	/* Sanity check: TX packets are processed in-order on one ring.
	 * Check if the slot deduced from the cookie really is the first
	 * used slot. */
	firstused = ring->current_slot - ring->used_slots + 1;
	if (firstused < 0)
		firstused = ring->nr_slots + firstused;

	skip = 0;
	if (unlikely(slot != firstused)) {
		/* This possibly is a firmware bug and will result in
		 * malfunction, memory leaks and/or stall of DMA functionality.
		 */
		if (slot == next_slot(ring, next_slot(ring, firstused))) {
			/* If a single header/data pair was missed, skip over
			 * the first two slots in an attempt to recover.
			 */
			slot = firstused;
			skip = 2;
			if (!err_out1) {
				/* Report the error once. */
				b43dbg(dev->wl,
				       "Skip on DMA ring %d slot %d.\n",
				       ring->index, slot);
				err_out1 = 1;
			}
		} else {
			/* More than a single header/data pair were missed.
			 * Report this error once.
			 */
			if (!err_out2)
				b43dbg(dev->wl,
				       "Out of order TX status report on DMA ring %d. Expected %d, but got %d\n",
				       ring->index, firstused, slot);
			err_out2 = 1;
			return;
		}
	}

	ops = ring->ops;
	while (1) {
		B43_WARN_ON(slot < 0 || slot >= ring->nr_slots);
		/* get meta - ignore returned value */
		ops->idx2desc(ring, slot, &meta);

		if (b43_dma_ptr_is_poisoned(meta->skb)) {
			b43dbg(dev->wl, "Poisoned TX slot %d (first=%d) "
			       "on ring %d\n",
			       slot, firstused, ring->index);
			break;
		}

		if (meta->skb) {
			struct b43_private_tx_info *priv_info =
			     b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb));

			unmap_descbuffer(ring, meta->dmaaddr,
					 meta->skb->len, 1);
			kfree(priv_info->bouncebuffer);
			priv_info->bouncebuffer = NULL;
		} else {
			unmap_descbuffer(ring, meta->dmaaddr,
					 b43_txhdr_size(dev), 1);
		}

		if (meta->is_last_fragment) {
			struct ieee80211_tx_info *info;

			if (unlikely(!meta->skb)) {
				/* This is a scatter-gather fragment of a frame,
				 * so the skb pointer must not be NULL.
				 */
				b43dbg(dev->wl, "TX status unexpected NULL skb "
				       "at slot %d (first=%d) on ring %d\n",
				       slot, firstused, ring->index);
				break;
			}

			info = IEEE80211_SKB_CB(meta->skb);

			/*
			 * Call back to inform the ieee80211 subsystem about
			 * the status of the transmission. When skipping over
			 * a missed TX status report, use a status structure
			 * filled with zeros to indicate that the frame was not
			 * sent (frame_count 0) and not acknowledged
			 */
			if (unlikely(skip))
				txstat = &fake;
			else
				txstat = status;

			frame_succeed = b43_fill_txstatus_report(dev, info,
								 txstat);
#ifdef CPTCFG_B43_DEBUG
			if (frame_succeed)
				ring->nr_succeed_tx_packets++;
			else
				ring->nr_failed_tx_packets++;
			ring->nr_total_packet_tries += status->frame_count;
#endif /* DEBUG */
			ieee80211_tx_status(dev->wl->hw, meta->skb);

			/* skb will be freed by ieee80211_tx_status().
			 * Poison our pointer. */
			meta->skb = B43_DMA_PTR_POISON;
		} else {
			/* No need to call free_descriptor_buffer here, as
			 * this is only the txhdr, which is not allocated.
			 */
			if (unlikely(meta->skb)) {
				b43dbg(dev->wl, "TX status unexpected non-NULL skb "
				       "at slot %d (first=%d) on ring %d\n",
				       slot, firstused, ring->index);
				break;
			}
		}

		/* Everything unmapped and free'd. So it's not used anymore. */
		ring->used_slots--;

		if (meta->is_last_fragment && !skip) {
			/* This is the last scatter-gather
			 * fragment of the frame. We are done. */
			break;
		}
		slot = next_slot(ring, slot);
		if (skip > 0)
			--skip;
	}
	if (ring->stopped) {
		B43_WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME);
		ring->stopped = false;
	}

	if (dev->wl->tx_queue_stopped[ring->queue_prio]) {
		dev->wl->tx_queue_stopped[ring->queue_prio] = 0;
	} else {
		/* If the driver queue is running wake the corresponding
		 * mac80211 queue. */
		ieee80211_wake_queue(dev->wl->hw, ring->queue_prio);
		if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
			b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index);
		}
	}
	/* Add work to the queue. */
	ieee80211_queue_work(dev->wl->hw, &dev->wl->tx_work);
}
Пример #17
0
/*
 *  This routine performs the periodic noise floor calibration function
 *  that is used to adjust and optimize the chip performance.  This
 *  takes environmental changes (location, temperature) into account.
 *  When the task is complete, it reschedules itself depending on the
 *  appropriate interval that was calculated.
 */
void ath_ani_calibrate(unsigned long data)
{
	struct ath_softc *sc = (struct ath_softc *)data;
	struct ath_hw *ah = sc->sc_ah;
	struct ath_common *common = ath9k_hw_common(ah);
	bool longcal = false;
	bool shortcal = false;
	bool aniflag = false;
	unsigned int timestamp = jiffies_to_msecs(jiffies);
	u32 cal_interval, short_cal_interval, long_cal_interval;
	unsigned long flags;

	if (ah->caldata && ah->caldata->nfcal_interference)
		long_cal_interval = ATH_LONG_CALINTERVAL_INT;
	else
		long_cal_interval = ATH_LONG_CALINTERVAL;

	short_cal_interval = (ah->opmode == NL80211_IFTYPE_AP) ?
		ATH_AP_SHORT_CALINTERVAL : ATH_STA_SHORT_CALINTERVAL;

	/* Only calibrate if awake */
	if (sc->sc_ah->power_mode != ATH9K_PM_AWAKE)
		goto set_timer;

	ath9k_ps_wakeup(sc);

	/* Long calibration runs independently of short calibration. */
	if ((timestamp - common->ani.longcal_timer) >= long_cal_interval) {
		longcal = true;
		ath_dbg(common, ATH_DBG_ANI, "longcal @%lu\n", jiffies);
		common->ani.longcal_timer = timestamp;
	}

	/* Short calibration applies only while caldone is false */
	if (!common->ani.caldone) {
		if ((timestamp - common->ani.shortcal_timer) >= short_cal_interval) {
			shortcal = true;
			ath_dbg(common, ATH_DBG_ANI,
				"shortcal @%lu\n", jiffies);
			common->ani.shortcal_timer = timestamp;
			common->ani.resetcal_timer = timestamp;
		}
	} else {
		if ((timestamp - common->ani.resetcal_timer) >=
		    ATH_RESTART_CALINTERVAL) {
			common->ani.caldone = ath9k_hw_reset_calvalid(ah);
			if (common->ani.caldone)
				common->ani.resetcal_timer = timestamp;
		}
	}

	/* Verify whether we must check ANI */
	if ((timestamp - common->ani.checkani_timer) >=
	     ah->config.ani_poll_interval) {
		aniflag = true;
		common->ani.checkani_timer = timestamp;
	}

	/* Call ANI routine if necessary */
	if (aniflag) {
		spin_lock_irqsave(&common->cc_lock, flags);
		ath9k_hw_ani_monitor(ah, ah->curchan);
		ath_update_survey_stats(sc);
		spin_unlock_irqrestore(&common->cc_lock, flags);
	}

	/* Perform calibration if necessary */
	if (longcal || shortcal) {
		common->ani.caldone =
			ath9k_hw_calibrate(ah, ah->curchan,
						common->rx_chainmask, longcal);
	}

	ath9k_ps_restore(sc);

set_timer:
	/*
	* Set timer interval based on previous results.
	* The interval must be the shortest necessary to satisfy ANI,
	* short calibration and long calibration.
	*/
	cal_interval = ATH_LONG_CALINTERVAL;
	if (sc->sc_ah->config.enable_ani)
		cal_interval = min(cal_interval,
				   (u32)ah->config.ani_poll_interval);
	if (!common->ani.caldone)
		cal_interval = min(cal_interval, (u32)short_cal_interval);

	mod_timer(&common->ani.timer, jiffies + msecs_to_jiffies(cal_interval));
	if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_PAPRD) && ah->caldata) {
		if (!ah->caldata->paprd_done)
			ieee80211_queue_work(sc->hw, &sc->paprd_work);
		else if (!ah->paprd_table_write_done)
			ath_paprd_activate(sc);
	}
}
Пример #18
0
int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
                                  u16 timeout)
{
    struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
    struct ieee80211_sub_if_data *sdata = sta->sdata;
    struct ieee80211_local *local = sdata->local;
    struct tid_ampdu_tx *tid_tx;
    int ret = 0;

    trace_api_start_tx_ba_session(pubsta, tid);

    if (WARN_ON(!local->ops->ampdu_action))
        return -EINVAL;

    if ((tid >= STA_TID_NUM) ||
            !(local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION) ||
            (local->hw.flags & IEEE80211_HW_TX_AMPDU_SETUP_IN_HW))
        return -EINVAL;

#ifdef CONFIG_MAC80211_HT_DEBUG
    printk(KERN_DEBUG "Open BA session requested for %pM tid %u\n",
           pubsta->addr, tid);
#endif /* CONFIG_MAC80211_HT_DEBUG */

    if (sdata->vif.type != NL80211_IFTYPE_STATION &&
            sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
            sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
            sdata->vif.type != NL80211_IFTYPE_AP &&
            sdata->vif.type != NL80211_IFTYPE_ADHOC)
        return -EINVAL;

    if (test_sta_flag(sta, WLAN_STA_BLOCK_BA)) {
#ifdef CONFIG_MAC80211_HT_DEBUG
        printk(KERN_DEBUG "BA sessions blocked. "
               "Denying BA session request\n");
#endif
        return -EINVAL;
    }

    /*
     * 802.11n-2009 11.5.1.1: If the initiating STA is an HT STA, is a
     * member of an IBSS, and has no other existing Block Ack agreement
     * with the recipient STA, then the initiating STA shall transmit a
     * Probe Request frame to the recipient STA and shall not transmit an
     * ADDBA Request frame unless it receives a Probe Response frame
     * from the recipient within dot11ADDBAFailureTimeout.
     *
     * The probe request mechanism for ADDBA is currently not implemented,
     * but we only build up Block Ack session with HT STAs. This information
     * is set when we receive a bss info from a probe response or a beacon.
     */
    if (sta->sdata->vif.type == NL80211_IFTYPE_ADHOC &&
            !sta->sta.ht_cap.ht_supported) {
#ifdef CONFIG_MAC80211_HT_DEBUG
        printk(KERN_DEBUG "BA request denied - IBSS STA %pM"
               "does not advertise HT support\n", pubsta->addr);
#endif /* CONFIG_MAC80211_HT_DEBUG */
        return -EINVAL;
    }

    spin_lock_bh(&sta->lock);

    /* we have tried too many times, receiver does not want A-MPDU */
    if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_MAX_RETRIES) {
        ret = -EBUSY;
        goto err_unlock_sta;
    }

    /*
     * if we have tried more than HT_AGG_BURST_RETRIES times we
     * will spread our requests in time to avoid stalling connection
     * for too long
     */
    if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_BURST_RETRIES &&
            time_before(jiffies, sta->ampdu_mlme.last_addba_req_time[tid] +
                        HT_AGG_RETRIES_PERIOD)) {
#ifdef CONFIG_MAC80211_HT_DEBUG
        printk(KERN_DEBUG "BA request denied - "
               "waiting a grace period after %d failed requests "
               "on tid %u\n",
               sta->ampdu_mlme.addba_req_num[tid], tid);
#endif /* CONFIG_MAC80211_HT_DEBUG */
        ret = -EBUSY;
        goto err_unlock_sta;
    }

    tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
    /* check if the TID is not in aggregation flow already */
    if (tid_tx || sta->ampdu_mlme.tid_start_tx[tid]) {
#ifdef CONFIG_MAC80211_HT_DEBUG
        printk(KERN_DEBUG "BA request denied - session is not "
               "idle on tid %u\n", tid);
#endif /* CONFIG_MAC80211_HT_DEBUG */
        ret = -EAGAIN;
        goto err_unlock_sta;
    }

    /* prepare A-MPDU MLME for Tx aggregation */
    tid_tx = kzalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC);
    if (!tid_tx) {
        ret = -ENOMEM;
        goto err_unlock_sta;
    }

    skb_queue_head_init(&tid_tx->pending);
    __set_bit(HT_AGG_STATE_WANT_START, &tid_tx->state);

    tid_tx->timeout = timeout;

    /* response timer */
    tid_tx->addba_resp_timer.function = sta_addba_resp_timer_expired;
    tid_tx->addba_resp_timer.data = (unsigned long)&sta->timer_to_tid[tid];
    init_timer(&tid_tx->addba_resp_timer);

    /* tx timer */
    tid_tx->session_timer.function = sta_tx_agg_session_timer_expired;
    tid_tx->session_timer.data = (unsigned long)&sta->timer_to_tid[tid];
    init_timer_deferrable(&tid_tx->session_timer);

    /* assign a dialog token */
    sta->ampdu_mlme.dialog_token_allocator++;
    tid_tx->dialog_token = sta->ampdu_mlme.dialog_token_allocator;

    /*
     * Finally, assign it to the start array; the work item will
     * collect it and move it to the normal array.
     */
    sta->ampdu_mlme.tid_start_tx[tid] = tid_tx;

    ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);

    /* this flow continues off the work */
err_unlock_sta:
    spin_unlock_bh(&sta->lock);
    return ret;
}
Пример #19
0
int wl1271_ps_elp_wakeup(struct wl1271 *wl)
{
	DECLARE_COMPLETION_ONSTACK(compl);
	unsigned long flags;
	int ret;
	u32 start_time = jiffies;
	bool pending = false;

	/* we might try to wakeup although we didn't go to sleep (e.g. on boot) */
	if (!test_and_clear_bit(WL1271_FLAG_ELP_REQUESTED, &wl->flags))
		return 0;

	/* don't cancel_sync as it might contend for a mutex and deadlock */
	cancel_delayed_work(&wl->elp_work);

	if (!test_bit(WL1271_FLAG_IN_ELP, &wl->flags))
		return 0;

	wl1271_debug(DEBUG_PSM, "waking up chip from elp");

	/*
	 * The spinlock is required here to synchronize both the work and
	 * the completion variable in one entity.
	 */
	spin_lock_irqsave(&wl->wl_lock, flags);
	if (test_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags))
		pending = true;
	else
		wl->elp_compl = &compl;
	spin_unlock_irqrestore(&wl->wl_lock, flags);

	wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_WAKE_UP);

	if (!pending) {
		ret = wait_for_completion_timeout(
			&compl, msecs_to_jiffies(WL1271_WAKEUP_TIMEOUT));
		if (ret == 0) {
			wl1271_error("ELP wakeup timeout!");
			ieee80211_queue_work(wl->hw, &wl->recovery_work);
			ret = -ETIMEDOUT;
			goto err;
		} else if (ret < 0) {
			wl1271_error("ELP wakeup completion error.");
			goto err;
		}
	}

	clear_bit(WL1271_FLAG_IN_ELP, &wl->flags);

	wl1271_debug(DEBUG_PSM, "wakeup time: %u ms",
		     jiffies_to_msecs(jiffies - start_time));
	goto out;

err:
	spin_lock_irqsave(&wl->wl_lock, flags);
	wl->elp_compl = NULL;
	spin_unlock_irqrestore(&wl->wl_lock, flags);
	return ret;

out:
	return 0;
}
Пример #20
0
void ath_rx_poll(unsigned long data)
{
	struct ath_softc *sc = (struct ath_softc *)data;

	ieee80211_queue_work(sc->hw, &sc->hw_check_work);
}
Пример #21
0
void ath9k_beacon_tasklet(unsigned long data)
{
	struct ath_softc *sc = (struct ath_softc *)data;
	struct ath_hw *ah = sc->sc_ah;
	struct ath_common *common = ath9k_hw_common(ah);
	struct ath_buf *bf = NULL;
	struct ieee80211_vif *vif;
	bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
	int slot;

	if (test_bit(SC_OP_HW_RESET, &sc->sc_flags)) {
		ath_dbg(common, RESET,
			"reset work is pending, skip beaconing now\n");
		return;
	}

	/*
	 * Check if the previous beacon has gone out.  If
	 * not don't try to post another, skip this period
	 * and wait for the next.  Missed beacons indicate
	 * a problem and should not occur.  If we miss too
	 * many consecutive beacons reset the device.
	 */
	if (ath9k_hw_numtxpending(ah, sc->beacon.beaconq) != 0) {
		sc->beacon.bmisscnt++;

		if (!ath9k_hw_check_alive(ah))
			ieee80211_queue_work(sc->hw, &sc->hw_check_work);

		if (sc->beacon.bmisscnt < BSTUCK_THRESH * sc->nbcnvifs) {
			ath_dbg(common, BSTUCK,
				"missed %u consecutive beacons\n",
				sc->beacon.bmisscnt);
			ath9k_hw_stop_dma_queue(ah, sc->beacon.beaconq);
			if (sc->beacon.bmisscnt > 3)
				ath9k_hw_bstuck_nfcal(ah);
		} else if (sc->beacon.bmisscnt >= BSTUCK_THRESH) {
			ath_dbg(common, BSTUCK, "beacon is officially stuck\n");
			sc->beacon.bmisscnt = 0;
			ath9k_queue_reset(sc, RESET_TYPE_BEACON_STUCK);
		}

		return;
	}

	slot = ath9k_beacon_choose_slot(sc);
	vif = sc->beacon.bslot[slot];

	if (!vif || !vif->bss_conf.enable_beacon)
		return;

	bf = ath9k_beacon_generate(sc->hw, vif);
	WARN_ON(!bf);

	if (sc->beacon.bmisscnt != 0) {
		ath_dbg(common, BSTUCK, "resume beacon xmit after %u misses\n",
			sc->beacon.bmisscnt);
		sc->beacon.bmisscnt = 0;
	}

	/*
	 * Handle slot time change when a non-ERP station joins/leaves
	 * an 11g network.  The 802.11 layer notifies us via callback,
	 * we mark updateslot, then wait one beacon before effecting
	 * the change.  This gives associated stations at least one
	 * beacon interval to note the state change.
	 *
	 * NB: The slot time change state machine is clocked according
	 *     to whether we are bursting or staggering beacons.  We
	 *     recognize the request to update and record the current
	 *     slot then don't transition until that slot is reached
	 *     again.  If we miss a beacon for that slot then we'll be
	 *     slow to transition but we'll be sure at least one beacon
	 *     interval has passed.  When bursting slot is always left
	 *     set to ATH_BCBUF so this check is a noop.
	 */
	if (sc->beacon.updateslot == UPDATE) {
		sc->beacon.updateslot = COMMIT;
		sc->beacon.slotupdate = slot;
	} else if (sc->beacon.updateslot == COMMIT &&
		   sc->beacon.slotupdate == slot) {
		ah->slottime = sc->beacon.slottime;
		ath9k_hw_init_global_settings(ah);
		sc->beacon.updateslot = OK;
	}

	if (bf) {
		ath9k_reset_beacon_status(sc);

		ath_dbg(common, BEACON,
			"Transmitting beacon for slot: %d\n", slot);

		/* NB: cabq traffic should already be queued and primed */
		ath9k_hw_puttxbuf(ah, sc->beacon.beaconq, bf->bf_daddr);

		if (!edma)
			ath9k_hw_txstart(ah, sc->beacon.beaconq);
	}
}
Пример #22
0
int ath9k_wiphy_select(struct ath_wiphy *aphy)
{
	struct ath_softc *sc = aphy->sc;
	bool now;

	spin_lock_bh(&sc->wiphy_lock);
	if (__ath9k_wiphy_scanning(sc)) {
		/*
		 * For now, we are using mac80211 sw scan and it expects to
		 * have full control over channel changes, so avoid wiphy
		 * scheduling during a scan. This could be optimized if the
		 * scanning control were moved into the driver.
		 */
		spin_unlock_bh(&sc->wiphy_lock);
		return -EBUSY;
	}
	if (__ath9k_wiphy_pausing(sc)) {
		if (sc->wiphy_select_failures == 0)
			sc->wiphy_select_first_fail = jiffies;
		sc->wiphy_select_failures++;
		if (time_after(jiffies, sc->wiphy_select_first_fail + HZ / 2))
		{
			printk(KERN_DEBUG "ath9k: Previous wiphy select timed "
			       "out; disable/enable hw to recover\n");
			__ath9k_wiphy_mark_all_paused(sc);
			/*
			 * TODO: this workaround to fix hardware is unlikely to
			 * be specific to virtual wiphy changes. It can happen
			 * on normal channel change, too, and as such, this
			 * should really be made more generic. For example,
			 * tricker radio disable/enable on GTT interrupt burst
			 * (say, 10 GTT interrupts received without any TX
			 * frame being completed)
			 */
			spin_unlock_bh(&sc->wiphy_lock);
			ath_radio_disable(sc, aphy->hw);
			ath_radio_enable(sc, aphy->hw);
			/* Only the primary wiphy hw is used for queuing work */
			ieee80211_queue_work(aphy->sc->hw,
				   &aphy->sc->chan_work);
			return -EBUSY; /* previous select still in progress */
		}
		spin_unlock_bh(&sc->wiphy_lock);
		return -EBUSY; /* previous select still in progress */
	}
	sc->wiphy_select_failures = 0;

	/* Store the new channel */
	sc->chan_idx = aphy->chan_idx;
	sc->chan_is_ht = aphy->chan_is_ht;
	sc->next_wiphy = aphy;

	__ath9k_wiphy_pause_all(sc);
	now = !__ath9k_wiphy_pausing(aphy->sc);
	spin_unlock_bh(&sc->wiphy_lock);

	if (now) {
		/* Ready to request channel change immediately */
		ieee80211_queue_work(aphy->sc->hw, &aphy->sc->chan_work);
	}

	/*
	 * wiphys will be unpaused in ath9k_tx_status() once channel has been
	 * changed if any wiphy needs time to become paused.
	 */

	return 0;
}
Пример #23
0
static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
					 struct sta_info *sta, u16 tid)
{
	struct tid_ampdu_tx *tid_tx;
	struct ieee80211_ampdu_params params = {
		.sta = &sta->sta,
		.action = IEEE80211_AMPDU_TX_OPERATIONAL,
		.tid = tid,
		.timeout = 0,
		.ssn = 0,
	};

	lockdep_assert_held(&sta->ampdu_mlme.mtx);

	tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
	params.buf_size = tid_tx->buf_size;
	params.amsdu = tid_tx->amsdu;

	ht_dbg(sta->sdata, "Aggregation is on for %pM tid %d\n",
	       sta->sta.addr, tid);

	drv_ampdu_action(local, sta->sdata, &params);

	/*
	 * synchronize with TX path, while splicing the TX path
	 * should block so it won't put more packets onto pending.
	 */
	spin_lock_bh(&sta->lock);

	ieee80211_agg_splice_packets(sta->sdata, tid_tx, tid);
	/*
	 * Now mark as operational. This will be visible
	 * in the TX path, and lets it go lock-free in
	 * the common case.
	 */
	set_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state);
	ieee80211_agg_splice_finish(sta->sdata, tid);

	spin_unlock_bh(&sta->lock);

	ieee80211_agg_start_txq(sta, tid, true);
}

void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid)
{
	struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
	struct ieee80211_local *local = sdata->local;
	struct sta_info *sta;
	struct tid_ampdu_tx *tid_tx;

	trace_api_start_tx_ba_cb(sdata, ra, tid);

	if (tid >= IEEE80211_NUM_TIDS) {
		ht_dbg(sdata, "Bad TID value: tid = %d (>= %d)\n",
		       tid, IEEE80211_NUM_TIDS);
		return;
	}

	mutex_lock(&local->sta_mtx);
	sta = sta_info_get_bss(sdata, ra);
	if (!sta) {
		mutex_unlock(&local->sta_mtx);
		ht_dbg(sdata, "Could not find station: %pM\n", ra);
		return;
	}

	mutex_lock(&sta->ampdu_mlme.mtx);
	tid_tx = rcu_dereference_protected_tid_tx(sta, tid);

	if (WARN_ON(!tid_tx)) {
		ht_dbg(sdata, "addBA was not requested!\n");
		goto unlock;
	}

	if (WARN_ON(test_and_set_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state)))
		goto unlock;

	if (test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state))
		ieee80211_agg_tx_operational(local, sta, tid);

 unlock:
	mutex_unlock(&sta->ampdu_mlme.mtx);
	mutex_unlock(&local->sta_mtx);
}

void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
				      const u8 *ra, u16 tid)
{
	struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
	struct ieee80211_local *local = sdata->local;
	struct ieee80211_ra_tid *ra_tid;
	struct sk_buff *skb = dev_alloc_skb(0);

	if (unlikely(!skb))
		return;

	ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
	memcpy(&ra_tid->ra, ra, ETH_ALEN);
	ra_tid->tid = tid;

	skb->pkt_type = IEEE80211_SDATA_QUEUE_AGG_START;
	skb_queue_tail(&sdata->skb_queue, skb);
	ieee80211_queue_work(&local->hw, &sdata->work);
}
EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe);

int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
				   enum ieee80211_agg_stop_reason reason)
{
	int ret;

	mutex_lock(&sta->ampdu_mlme.mtx);

	ret = ___ieee80211_stop_tx_ba_session(sta, tid, reason);

	mutex_unlock(&sta->ampdu_mlme.mtx);

	return ret;
}
Пример #24
0
void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status)
{
	struct wl1271_acx_mem_map *wl_mem_map = wl->target_mem_map;
	u32 buf_size;
	u32 fw_rx_counter  = status->fw_rx_counter & NUM_RX_PKT_DESC_MOD_MASK;
	u32 drv_rx_counter = wl->rx_counter & NUM_RX_PKT_DESC_MOD_MASK;
	u32 rx_counter;
	u32 mem_block;
	u32 pkt_length;
	u32 pkt_offset;
	bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
	bool had_data = false;
	bool unaligned = false;

	while (drv_rx_counter != fw_rx_counter) {
		buf_size = 0;
		rx_counter = drv_rx_counter;
		while (rx_counter != fw_rx_counter) {
			pkt_length = wl1271_rx_get_buf_size(status, rx_counter);
			if (buf_size + pkt_length > WL1271_AGGR_BUFFER_SIZE)
				break;
			buf_size += pkt_length;
			rx_counter++;
			rx_counter &= NUM_RX_PKT_DESC_MOD_MASK;
		}

		if (buf_size == 0) {
			wl1271_warning("received empty data");
			break;
		}

		if (wl->chip.id != CHIP_ID_1283_PG20) {
			/*
			 * Choose the block we want to read
			 * For aggregated packets, only the first memory block
			 * should be retrieved. The FW takes care of the rest.
			 */
			mem_block = wl1271_rx_get_mem_block(status,
							    drv_rx_counter);

			wl->rx_mem_pool_addr.addr = (mem_block << 8) +
			   le32_to_cpu(wl_mem_map->packet_memory_pool_start);

			wl->rx_mem_pool_addr.addr_extra =
				wl->rx_mem_pool_addr.addr + 4;

			wl1271_write(wl, WL1271_SLV_REG_DATA,
				     &wl->rx_mem_pool_addr,
				     sizeof(wl->rx_mem_pool_addr), false);
		}

		/* Read all available packets at once */
		wl1271_read(wl, WL1271_SLV_MEM_DATA, wl->aggr_buf,
				buf_size, true);

		/* Split data into separate packets */
		pkt_offset = 0;
		while (pkt_offset < buf_size) {
			pkt_length = wl1271_rx_get_buf_size(status,
					drv_rx_counter);

			unaligned = wl1271_rx_get_unaligned(status,
					drv_rx_counter);

			/*
			 * the handle data call can only fail in memory-outage
			 * conditions, in that case the received frame will just
			 * be dropped.
			 */
			if (wl1271_rx_handle_data(wl,
						  wl->aggr_buf + pkt_offset,
						  pkt_length, unaligned) == 1)
				had_data = true;

			wl->rx_counter++;
			drv_rx_counter++;
			drv_rx_counter &= NUM_RX_PKT_DESC_MOD_MASK;
			pkt_offset += pkt_length;
		}
	}

	/*
	 * Write the driver's packet counter to the FW. This is only required
	 * for older hardware revisions
	 */
	if (wl->quirks & WL12XX_QUIRK_END_OF_TRANSACTION)
		wl1271_write32(wl, RX_DRIVER_COUNTER_ADDRESS, wl->rx_counter);

	if (!is_ap && wl->conf.rx_streaming.interval && had_data &&
	    (wl->conf.rx_streaming.always ||
	     test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags))) {
		u32 timeout = wl->conf.rx_streaming.duration;

		/* restart rx streaming */
		if (!test_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags))
			ieee80211_queue_work(wl->hw,
					     &wl->rx_streaming_enable_work);

		mod_timer(&wl->rx_streaming_timer,
			  jiffies + msecs_to_jiffies(timeout));
	}
}