Beispiel #1
0
int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
			struct ieee80211_sta *sta, u16 tid, u16 *ssn)
{
	struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
	int sta_id;
	int ret;

	IWL_DEBUG_HT(priv, "TX AGG request on ra = %pM tid = %d\n",
		     sta->addr, tid);

	sta_id = iwl_sta_id(sta);
	if (sta_id == IWL_INVALID_STATION) {
		IWL_ERR(priv, "Start AGG on invalid station\n");
		return -ENXIO;
	}
	if (unlikely(tid >= IWL_MAX_TID_COUNT))
		return -EINVAL;

	if (priv->shrd->tid_data[sta_id][tid].agg.state != IWL_AGG_OFF) {
		IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n");
		return -ENXIO;
	}

	ret = iwl_sta_tx_modify_enable_tid(priv, sta_id, tid);
	if (ret)
		return ret;

	ret = iwl_trans_tx_agg_alloc(trans(priv), vif_priv->ctx->ctxid, sta_id,
				     tid, ssn);

	return ret;
}
Beispiel #2
0
int iwlagn_tx_agg_oper(struct iwl_priv *priv, struct ieee80211_vif *vif,
			struct ieee80211_sta *sta, u16 tid, u8 buf_size)
{
	struct iwl_station_priv *sta_priv = (void *) sta->drv_priv;
	struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
	unsigned long flags;
	u16 ssn;

	buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF);

	spin_lock_irqsave(&priv->shrd->sta_lock, flags);
	ssn = priv->tid_data[sta_priv->sta_id][tid].agg.ssn;
	spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);

	iwl_trans_tx_agg_setup(trans(priv), ctx->ctxid, sta_priv->sta_id, tid,
			       buf_size, ssn);

	/*
	 * If the limit is 0, then it wasn't initialised yet,
	 * use the default. We can do that since we take the
	 * minimum below, and we don't want to go above our
	 * default due to hardware restrictions.
	 */
	if (sta_priv->max_agg_bufsize == 0)
		sta_priv->max_agg_bufsize =
			LINK_QUAL_AGG_FRAME_LIMIT_DEF;

	/*
	 * Even though in theory the peer could have different
	 * aggregation reorder buffer sizes for different sessions,
	 * our ucode doesn't allow for that and has a global limit
	 * for each station. Therefore, use the minimum of all the
	 * aggregation sessions and our default value.
	 */
	sta_priv->max_agg_bufsize =
		min(sta_priv->max_agg_bufsize, buf_size);

	if (cfg(priv)->ht_params &&
	    cfg(priv)->ht_params->use_rts_for_aggregation) {
		/*
		 * switch to RTS/CTS if it is the prefer protection
		 * method for HT traffic
		 */

		sta_priv->lq_sta.lq.general_params.flags |=
			LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK;
	}
	priv->agg_tids_count++;
	IWL_DEBUG_HT(priv, "priv->agg_tids_count = %u\n",
		     priv->agg_tids_count);

	sta_priv->lq_sta.lq.agg_params.agg_frame_cnt_limit =
		sta_priv->max_agg_bufsize;

	IWL_INFO(priv, "Tx aggregation enabled on ra = %pM tid = %d\n",
		 sta->addr, tid);

	return iwl_send_lq_cmd(priv, ctx,
			&sta_priv->lq_sta.lq, CMD_ASYNC, false);
}
Beispiel #3
0
int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
			struct ieee80211_sta *sta, u16 tid, u16 *ssn)
{
	struct iwl_tid_data *tid_data;
	unsigned long flags;
	int sta_id;
	int ret;

	IWL_DEBUG_HT(priv, "TX AGG request on ra = %pM tid = %d\n",
		     sta->addr, tid);

	sta_id = iwl_sta_id(sta);
	if (sta_id == IWL_INVALID_STATION) {
		IWL_ERR(priv, "Start AGG on invalid station\n");
		return -ENXIO;
	}
	if (unlikely(tid >= IWL_MAX_TID_COUNT))
		return -EINVAL;

	if (priv->tid_data[sta_id][tid].agg.state != IWL_AGG_OFF) {
		IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n");
		return -ENXIO;
	}

	ret = iwl_sta_tx_modify_enable_tid(priv, sta_id, tid);
	if (ret)
		return ret;

	spin_lock_irqsave(&priv->shrd->sta_lock, flags);

	tid_data = &priv->tid_data[sta_id][tid];
	tid_data->agg.ssn = SEQ_TO_SN(tid_data->seq_number);

	*ssn = tid_data->agg.ssn;

	ret = iwl_trans_tx_agg_alloc(trans(priv), sta_id, tid);
	if (ret) {
		spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
		return ret;
	}

	if (*ssn == tid_data->next_reclaimed) {
		IWL_DEBUG_TX_QUEUES(priv, "Can proceed: ssn = next_recl = %d",
				    tid_data->agg.ssn);
		tid_data->agg.state = IWL_AGG_ON;
		ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
	} else {
		IWL_DEBUG_TX_QUEUES(priv, "Can't proceed: ssn %d, "
				    "next_reclaimed = %d",
				    tid_data->agg.ssn,
				    tid_data->next_reclaimed);
		tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
	}

	spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);

	return ret;
}
static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
				    struct ieee80211_vif *vif,
				    enum ieee80211_ampdu_mlme_action action,
				    struct ieee80211_sta *sta, u16 tid,
				    u16 *ssn, u8 buf_size)
{
	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
	int ret;

	IWL_DEBUG_HT(mvm, "A-MPDU action on addr %pM tid %d: action %d\n",
		     sta->addr, tid, action);

	if (!(mvm->nvm_data->sku_cap_11n_enable))
		return -EACCES;

	mutex_lock(&mvm->mutex);

	switch (action) {
	case IEEE80211_AMPDU_RX_START:
		if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG) {
			ret = -EINVAL;
			break;
		}
		ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, *ssn, true);
		break;
	case IEEE80211_AMPDU_RX_STOP:
		ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false);
		break;
	case IEEE80211_AMPDU_TX_START:
		if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG) {
			ret = -EINVAL;
			break;
		}
		ret = iwl_mvm_sta_tx_agg_start(mvm, vif, sta, tid, ssn);
		break;
	case IEEE80211_AMPDU_TX_STOP_CONT:
		ret = iwl_mvm_sta_tx_agg_stop(mvm, vif, sta, tid);
		break;
	case IEEE80211_AMPDU_TX_STOP_FLUSH:
	case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
		ret = iwl_mvm_sta_tx_agg_flush(mvm, vif, sta, tid);
		break;
	case IEEE80211_AMPDU_TX_OPERATIONAL:
		ret = iwl_mvm_sta_tx_agg_oper(mvm, vif, sta, tid, buf_size);
		break;
	default:
		WARN_ON_ONCE(1);
		ret = -EINVAL;
		break;
	}
	mutex_unlock(&mvm->mutex);

	return ret;
}
Beispiel #5
0
static int rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
				      struct iwl_lq_sta *lq_data, u8 tid,
				      struct ieee80211_sta *sta)
{
	int ret = -EAGAIN;
	u32 load;

	/*
	 * Don't create TX aggregation sessions when in high
	 * BT traffic, as they would just be disrupted by BT.
	 */
	if (priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH) {
		IWL_DEBUG_COEX(priv,
			       "BT traffic (%d), no aggregation allowed\n",
			       priv->bt_traffic_load);
		return ret;
	}

	load = rs_tl_get_load(lq_data, tid);

	if ((iwlwifi_mod_params.auto_agg) || (load > IWL_AGG_LOAD_THRESHOLD)) {
		IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n",
				sta->addr, tid);
		ret = ieee80211_start_tx_ba_session(sta, tid, 5000);
		if (ret == -EAGAIN) {
			/*
			 * driver and mac80211 is out of sync
			 * this might be cause by reloading firmware
			 * stop the tx ba session here
			 */
			IWL_ERR(priv, "Fail start Tx agg on tid: %d\n",
				tid);
			ieee80211_stop_tx_ba_session(sta, tid);
		}
	} else {
		IWL_DEBUG_HT(priv, "Aggregation not enabled for tid %d "
			"because load = %u\n", tid, load);
	}
	return ret;
}
Beispiel #6
0
void iwl_mvm_reorder_timer_expired(unsigned long data)
{
	struct iwl_mvm_reorder_buffer *buf = (void *)data;
	int i;
	u16 sn = 0, index = 0;
	bool expired = false;

	spin_lock_bh(&buf->lock);

	if (!buf->num_stored || buf->removed) {
		spin_unlock_bh(&buf->lock);
		return;
	}

	for (i = 0; i < buf->buf_size ; i++) {
		index = (buf->head_sn + i) % buf->buf_size;

		if (!skb_peek_tail(&buf->entries[index]))
			continue;
		if (!time_after(jiffies, buf->reorder_time[index] +
				RX_REORDER_BUF_TIMEOUT_MQ))
			break;
		expired = true;
		sn = ieee80211_sn_add(buf->head_sn, i + 1);
	}

	if (expired) {
		struct ieee80211_sta *sta;

		rcu_read_lock();
		sta = rcu_dereference(buf->mvm->fw_id_to_mac_id[buf->sta_id]);
		/* SN is set to the last expired frame + 1 */
		IWL_DEBUG_HT(buf->mvm,
			     "Releasing expired frames for sta %u, sn %d\n",
			     buf->sta_id, sn);
		iwl_mvm_release_frames(buf->mvm, sta, NULL, buf, sn);
		rcu_read_unlock();
	} else if (buf->num_stored) {
		/*
		 * If no frame expired and there are stored frames, index is now
		 * pointing to the first unexpired frame - modify timer
		 * accordingly to this frame.
		 */
		mod_timer(&buf->reorder_timer,
			  buf->reorder_time[index] +
			  1 + RX_REORDER_BUF_TIMEOUT_MQ);
	}
	spin_unlock_bh(&buf->lock);
}
Beispiel #7
0
/**
 * iwlagn_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
 *
 * Handles block-acknowledge notification from device, which reports success
 * of frames sent via aggregation.
 */
int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
				   struct iwl_rx_mem_buffer *rxb,
				   struct iwl_device_cmd *cmd)
{
	struct iwl_rx_packet *pkt = rxb_addr(rxb);
	struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
	struct iwl_ht_agg *agg;
	struct sk_buff_head reclaimed_skbs;
	struct ieee80211_tx_info *info;
	struct ieee80211_hdr *hdr;
	struct sk_buff *skb;
	unsigned long flags;
	int sta_id;
	int tid;
	int freed;

	/* "flow" corresponds to Tx queue */
	u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);

	/* "ssn" is start of block-ack Tx window, corresponds to index
	 * (in Tx queue's circular buffer) of first TFD/frame in window */
	u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);

	if (scd_flow >= hw_params(priv).max_txq_num) {
		IWL_ERR(priv,
			"BUG_ON scd_flow is bigger than number of queues\n");
		return 0;
	}

	sta_id = ba_resp->sta_id;
	tid = ba_resp->tid;
	agg = &priv->shrd->tid_data[sta_id][tid].agg;

	spin_lock_irqsave(&priv->shrd->sta_lock, flags);

	if (unlikely(agg->txq_id != scd_flow)) {
		/*
		 * FIXME: this is a uCode bug which need to be addressed,
		 * log the information and return for now!
		 * since it is possible happen very often and in order
		 * not to fill the syslog, don't enable the logging by default
		 */
		IWL_DEBUG_TX_REPLY(priv,
			"BA scd_flow %d does not match txq_id %d\n",
			scd_flow, agg->txq_id);
		spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
		return 0;
	}

	if (unlikely(!agg->wait_for_ba)) {
		if (unlikely(ba_resp->bitmap))
			IWL_ERR(priv, "Received BA when not expected\n");
		spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
		return 0;
	}

	IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
			   "sta_id = %d\n",
			   agg->wait_for_ba,
			   (u8 *) &ba_resp->sta_addr_lo32,
			   ba_resp->sta_id);
	IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, "
			   "scd_flow = %d, scd_ssn = %d\n",
			   ba_resp->tid, ba_resp->seq_ctl,
			   (unsigned long long)le64_to_cpu(ba_resp->bitmap),
			   scd_flow, ba_resp_scd_ssn);

	/* Mark that the expected block-ack response arrived */
	agg->wait_for_ba = false;

	/* Sanity check values reported by uCode */
	if (ba_resp->txed_2_done > ba_resp->txed) {
		IWL_DEBUG_TX_REPLY(priv,
			"bogus sent(%d) and ack(%d) count\n",
			ba_resp->txed, ba_resp->txed_2_done);
		/*
		 * set txed_2_done = txed,
		 * so it won't impact rate scale
		 */
		ba_resp->txed = ba_resp->txed_2_done;
	}
	IWL_DEBUG_HT(priv, "agg frames sent:%d, acked:%d\n",
			ba_resp->txed, ba_resp->txed_2_done);

	__skb_queue_head_init(&reclaimed_skbs);

	/* Release all TFDs before the SSN, i.e. all TFDs in front of
	 * block-ack window (we assume that they've been successfully
	 * transmitted ... if not, it's too late anyway). */
	iwl_trans_reclaim(trans(priv), sta_id, tid, scd_flow, ba_resp_scd_ssn,
			  0, &reclaimed_skbs);
	freed = 0;
	while (!skb_queue_empty(&reclaimed_skbs)) {

		skb = __skb_dequeue(&reclaimed_skbs);
		hdr = (struct ieee80211_hdr *)skb->data;

		if (ieee80211_is_data_qos(hdr->frame_control))
			freed++;
		else
			WARN_ON_ONCE(1);

		info = IEEE80211_SKB_CB(skb);
		kmem_cache_free(priv->tx_cmd_pool, (info->driver_data[1]));

		if (freed == 1) {
			/* this is the first skb we deliver in this batch */
			/* put the rate scaling data there */
			info = IEEE80211_SKB_CB(skb);
			memset(&info->status, 0, sizeof(info->status));
			info->flags |= IEEE80211_TX_STAT_ACK;
			info->flags |= IEEE80211_TX_STAT_AMPDU;
			info->status.ampdu_ack_len = ba_resp->txed_2_done;
			info->status.ampdu_len = ba_resp->txed;
			iwlagn_hwrate_to_tx_control(priv, agg->rate_n_flags,
						    info);
		}

		ieee80211_tx_status_irqsafe(priv->hw, skb);
	}

	spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
	return 0;
}
Beispiel #8
0
int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
			struct ieee80211_sta *sta, u16 tid)
{
	struct iwl_tid_data *tid_data;
	unsigned long flags;
	int sta_id;

	sta_id = iwl_sta_id(sta);

	if (sta_id == IWL_INVALID_STATION) {
		IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
		return -ENXIO;
	}

	spin_lock_irqsave(&priv->shrd->sta_lock, flags);

	tid_data = &priv->tid_data[sta_id][tid];

	switch (priv->tid_data[sta_id][tid].agg.state) {
	case IWL_EMPTYING_HW_QUEUE_ADDBA:
		/*
		* This can happen if the peer stops aggregation
		* again before we've had a chance to drain the
		* queue we selected previously, i.e. before the
		* session was really started completely.
		*/
		IWL_DEBUG_HT(priv, "AGG stop before setup done\n");
		goto turn_off;
	case IWL_AGG_ON:
		break;
	default:
		IWL_WARN(priv, "Stopping AGG while state not ON "
			 "or starting for %d on %d (%d)\n", sta_id, tid,
			 priv->tid_data[sta_id][tid].agg.state);
		spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
		return 0;
	}

	tid_data->agg.ssn = SEQ_TO_SN(tid_data->seq_number);

	/* There are still packets for this RA / TID in the HW */
	if (tid_data->agg.ssn != tid_data->next_reclaimed) {
		IWL_DEBUG_TX_QUEUES(priv, "Can't proceed: ssn %d, "
				    "next_recl = %d",
				    tid_data->agg.ssn,
				    tid_data->next_reclaimed);
		priv->tid_data[sta_id][tid].agg.state =
			IWL_EMPTYING_HW_QUEUE_DELBA;
		spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
		return 0;
	}

	IWL_DEBUG_TX_QUEUES(priv, "Can proceed: ssn = next_recl = %d",
			    tid_data->agg.ssn);
turn_off:
	priv->tid_data[sta_id][tid].agg.state = IWL_AGG_OFF;

	/* do not restore/save irqs */
	spin_unlock(&priv->shrd->sta_lock);
	spin_lock(&priv->shrd->lock);

	iwl_trans_tx_agg_disable(trans(priv), sta_id, tid);

	spin_unlock_irqrestore(&priv->shrd->lock, flags);

	ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);

	return 0;
}