Пример #1
0
int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
			struct ieee80211_sta *sta, u16 tid, u16 *ssn)
{
	struct iwl_tid_data *tid_data;
	unsigned long flags;
	int sta_id;
	int ret;

	IWL_DEBUG_HT(priv, "TX AGG request on ra = %pM tid = %d\n",
		     sta->addr, tid);

	sta_id = iwl_sta_id(sta);
	if (sta_id == IWL_INVALID_STATION) {
		IWL_ERR(priv, "Start AGG on invalid station\n");
		return -ENXIO;
	}
	if (unlikely(tid >= IWL_MAX_TID_COUNT))
		return -EINVAL;

	if (priv->tid_data[sta_id][tid].agg.state != IWL_AGG_OFF) {
		IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n");
		return -ENXIO;
	}

	ret = iwl_sta_tx_modify_enable_tid(priv, sta_id, tid);
	if (ret)
		return ret;

	spin_lock_irqsave(&priv->shrd->sta_lock, flags);

	tid_data = &priv->tid_data[sta_id][tid];
	tid_data->agg.ssn = SEQ_TO_SN(tid_data->seq_number);

	*ssn = tid_data->agg.ssn;

	ret = iwl_trans_tx_agg_alloc(trans(priv), sta_id, tid);
	if (ret) {
		spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
		return ret;
	}

	if (*ssn == tid_data->next_reclaimed) {
		IWL_DEBUG_TX_QUEUES(priv, "Can proceed: ssn = next_recl = %d",
				    tid_data->agg.ssn);
		tid_data->agg.state = IWL_AGG_ON;
		ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
	} else {
		IWL_DEBUG_TX_QUEUES(priv, "Can't proceed: ssn %d, "
				    "next_reclaimed = %d",
				    tid_data->agg.ssn,
				    tid_data->next_reclaimed);
		tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
	}

	spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);

	return ret;
}
Пример #2
0
static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm,
				      struct ieee80211_sta *sta, u8 tid)
{
	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
	struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
	struct ieee80211_vif *vif = mvmsta->vif;

	lockdep_assert_held(&mvmsta->lock);

	if ((tid_data->state == IWL_AGG_ON ||
	     tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA) &&
	    iwl_mvm_tid_queued(tid_data) == 0) {
		/*
		 * Now that this aggregation queue is empty tell mac80211 so it
		 * knows we no longer have frames buffered for the station on
		 * this TID (for the TIM bitmap calculation.)
		 */
		ieee80211_sta_set_buffered(sta, tid, false);
	}

	if (tid_data->ssn != tid_data->next_reclaimed)
		return;

	switch (tid_data->state) {
	case IWL_EMPTYING_HW_QUEUE_ADDBA:
		IWL_DEBUG_TX_QUEUES(mvm,
				    "Can continue addBA flow ssn = next_recl = %d\n",
				    tid_data->next_reclaimed);
		tid_data->state = IWL_AGG_STARTING;
		ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
		break;

	case IWL_EMPTYING_HW_QUEUE_DELBA:
		IWL_DEBUG_TX_QUEUES(mvm,
				    "Can continue DELBA flow ssn = next_recl = %d\n",
				    tid_data->next_reclaimed);
		iwl_mvm_disable_txq(mvm, tid_data->txq_id);
		tid_data->state = IWL_AGG_OFF;
		/*
		 * we can't hold the mutex - but since we are after a sequence
		 * point (call to iwl_mvm_disable_txq(), so we don't even need
		 * a memory barrier.
		 */
		mvm->queue_to_mac80211[tid_data->txq_id] =
					IWL_INVALID_MAC80211_QUEUE;
		ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
		break;

	default:
		break;
	}
}
Пример #3
0
void iwl_trans_set_wr_ptrs(struct iwl_trans *trans,
				int txq_id, u32 index)
{
	IWL_DEBUG_TX_QUEUES(trans, "Q %d  WrPtr: %d", txq_id, index & 0xff);
	iwl_write_direct32(trans, HBUS_TARG_WRPTR,
			(index & 0xff) | (txq_id << 8));
	iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), index);
}
Пример #4
0
void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
				   struct iwl_tx_queue *txq,
				   int tx_fifo_id, bool active)
{
	int txq_id = txq->q.id;

	iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),
			(active << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
			(tx_fifo_id << SCD_QUEUE_STTS_REG_POS_TXF) |
			(1 << SCD_QUEUE_STTS_REG_POS_WSL) |
			SCD_QUEUE_STTS_REG_MSK);

	if (active)
		IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d on FIFO %d\n",
				    txq_id, tx_fifo_id);
	else
		IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
}
Пример #5
0
static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm,
				      struct ieee80211_sta *sta, u8 tid)
{
	struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
	struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
	struct ieee80211_vif *vif = mvmsta->vif;

	lockdep_assert_held(&mvmsta->lock);

	if (tid_data->ssn != tid_data->next_reclaimed)
		return;

	switch (tid_data->state) {
	case IWL_EMPTYING_HW_QUEUE_ADDBA:
		IWL_DEBUG_TX_QUEUES(mvm,
				    "Can continue addBA flow ssn = next_recl = %d\n",
				    tid_data->next_reclaimed);
		tid_data->state = IWL_AGG_STARTING;
		ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
		break;

	case IWL_EMPTYING_HW_QUEUE_DELBA:
		IWL_DEBUG_TX_QUEUES(mvm,
				    "Can continue DELBA flow ssn = next_recl = %d\n",
				    tid_data->next_reclaimed);
		iwl_trans_txq_disable(mvm->trans, tid_data->txq_id);
		tid_data->state = IWL_AGG_OFF;
		/*
		 * we can't hold the mutex - but since we are after a sequence
		 * point (call to iwl_trans_txq_disable), so we don't even need
		 * a memory barrier.
		 */
		mvm->queue_to_mac80211[tid_data->txq_id] =
					IWL_INVALID_MAC80211_QUEUE;
		ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
		break;

	default:
		break;
	}
}
Пример #6
0
void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

	if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) {
		WARN_ONCE(1, "queue %d not used", txq_id);
		return;
	}

	iwl_txq_set_inactive(trans, txq_id);
	IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
}
Пример #7
0
static void iwlagn_check_ratid_empty(struct iwl_priv *priv, int sta_id, u8 tid)
{
	struct iwl_tid_data *tid_data = &priv->tid_data[sta_id][tid];
	enum iwl_rxon_context_id ctx;
	struct ieee80211_vif *vif;
	u8 *addr;

	lockdep_assert_held(&priv->shrd->sta_lock);

	addr = priv->stations[sta_id].sta.sta.addr;
	ctx = priv->stations[sta_id].ctxid;
	vif = priv->contexts[ctx].vif;

	switch (priv->tid_data[sta_id][tid].agg.state) {
	case IWL_EMPTYING_HW_QUEUE_DELBA:
		/* There are no packets for this RA / TID in the HW any more */
		if (tid_data->agg.ssn == tid_data->next_reclaimed) {
			IWL_DEBUG_TX_QUEUES(priv,
				"Can continue DELBA flow ssn = next_recl ="
				" %d", tid_data->next_reclaimed);
			iwl_trans_tx_agg_disable(trans(priv), sta_id, tid);
			tid_data->agg.state = IWL_AGG_OFF;
			ieee80211_stop_tx_ba_cb_irqsafe(vif, addr, tid);
		}
		break;
	case IWL_EMPTYING_HW_QUEUE_ADDBA:
		/* There are no packets for this RA / TID in the HW any more */
		if (tid_data->agg.ssn == tid_data->next_reclaimed) {
			IWL_DEBUG_TX_QUEUES(priv,
				"Can continue ADDBA flow ssn = next_recl ="
				" %d", tid_data->next_reclaimed);
			tid_data->agg.state = IWL_AGG_ON;
			ieee80211_start_tx_ba_cb_irqsafe(vif, addr, tid);
		}
		break;
	default:
		break;
	}
}
Пример #8
0
int iwl_trans_pcie_txq_alloc_response(struct iwl_trans *trans,
				      struct iwl_txq *txq,
				      struct iwl_host_cmd *hcmd)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_tx_queue_cfg_rsp *rsp;
	int ret, qid;
	u32 wr_ptr;

	if (WARN_ON(iwl_rx_packet_payload_len(hcmd->resp_pkt) !=
		    sizeof(*rsp))) {
		ret = -EINVAL;
		goto error_free_resp;
	}

	rsp = (void *)hcmd->resp_pkt->data;
	qid = le16_to_cpu(rsp->queue_number);
	wr_ptr = le16_to_cpu(rsp->write_pointer);

	if (qid >= ARRAY_SIZE(trans_pcie->txq)) {
		WARN_ONCE(1, "queue index %d unsupported", qid);
		ret = -EIO;
		goto error_free_resp;
	}

	if (test_and_set_bit(qid, trans_pcie->queue_used)) {
		WARN_ONCE(1, "queue %d already used", qid);
		ret = -EIO;
		goto error_free_resp;
	}

	txq->id = qid;
	trans_pcie->txq[qid] = txq;
	wr_ptr &= (trans->cfg->base_params->max_tfd_queue_size - 1);

	/* Place first TFD at index corresponding to start sequence number */
	txq->read_ptr = wr_ptr;
	txq->write_ptr = wr_ptr;

	IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid);

	iwl_free_resp(hcmd);
	return qid;

error_free_resp:
	iwl_free_resp(hcmd);
	iwl_pcie_gen2_txq_free_memory(trans, txq);
	return ret;
}
Пример #9
0
void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
					struct iwl_tx_queue *txq,
					int tx_fifo_id, int scd_retry)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	int txq_id = txq->q.id;
	int active =
		test_bit(txq_id, &trans_pcie->txq_ctx_active_msk) ? 1 : 0;

	iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),
			(active << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
			(tx_fifo_id << SCD_QUEUE_STTS_REG_POS_TXF) |
			(1 << SCD_QUEUE_STTS_REG_POS_WSL) |
			SCD_QUEUE_STTS_REG_MSK);

	txq->sched_retry = scd_retry;

	if (active)
		IWL_DEBUG_TX_QUEUES(trans, "Activate %s Queue %d on FIFO %d\n",
			scd_retry ? "BA" : "AC/CMD", txq_id, tx_fifo_id);
	else
		IWL_DEBUG_TX_QUEUES(trans, "Deactivate %s Queue %d\n",
			scd_retry ? "BA" : "AC/CMD", txq_id);
}
Пример #10
0
void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
			       int sta_id, int tid, int frame_limit, u16 ssn)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

	if (test_and_set_bit(txq_id, trans_pcie->queue_used))
		WARN_ONCE(1, "queue %d already used - expect issues", txq_id);

	/* Stop this Tx queue before configuring it */
	iwl_txq_set_inactive(trans, txq_id);

	/* Set this queue as a chain-building queue unless it is CMD queue */
	if (txq_id != trans_pcie->cmd_queue)
		iwl_set_bits_prph(trans, SCD_QUEUECHAIN_SEL, BIT(txq_id));

	/* If this queue is mapped to a certain station: it is an AGG queue */
	if (sta_id != IWL_INVALID_STATION) {
		u16 ra_tid = BUILD_RAxTID(sta_id, tid);

		/* Map receiver-address / traffic-ID to this queue */
		iwl_txq_set_ratid_map(trans, ra_tid, txq_id);

		/* enable aggregations for the queue */
		iwl_set_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
	} else {
		/*
		 * disable aggregations for the queue, this will also make the
		 * ra_tid mapping configuration irrelevant since it is now a
		 * non-AGG queue.
		 */
		iwl_clear_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
	}

	/* Place first TFD at index corresponding to start sequence number.
	 * Assumes that ssn_idx is valid (!= 0xFFF) */
	trans_pcie->txq[txq_id].q.read_ptr = (ssn & 0xff);
	trans_pcie->txq[txq_id].q.write_ptr = (ssn & 0xff);

	iwl_write_direct32(trans, HBUS_TARG_WRPTR,
			   (ssn & 0xff) | (txq_id << 8));
	iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn);

	/* Set up Tx window size and frame limit for this queue */
	iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
			SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0);
	iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
			SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
			((frame_limit << SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
				SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
			((frame_limit << SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
				SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));

	/* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
	iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),
		       (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
		       (fifo << SCD_QUEUE_STTS_REG_POS_TXF) |
		       (1 << SCD_QUEUE_STTS_REG_POS_WSL) |
		       SCD_QUEUE_STTS_REG_MSK);
	IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d on FIFO %d WrPtr: %d\n",
			    txq_id, fifo, ssn & 0xff);
}
Пример #11
0
int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
			struct ieee80211_sta *sta, u16 tid)
{
	struct iwl_tid_data *tid_data;
	unsigned long flags;
	int sta_id;

	sta_id = iwl_sta_id(sta);

	if (sta_id == IWL_INVALID_STATION) {
		IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
		return -ENXIO;
	}

	spin_lock_irqsave(&priv->shrd->sta_lock, flags);

	tid_data = &priv->tid_data[sta_id][tid];

	switch (priv->tid_data[sta_id][tid].agg.state) {
	case IWL_EMPTYING_HW_QUEUE_ADDBA:
		/*
		* This can happen if the peer stops aggregation
		* again before we've had a chance to drain the
		* queue we selected previously, i.e. before the
		* session was really started completely.
		*/
		IWL_DEBUG_HT(priv, "AGG stop before setup done\n");
		goto turn_off;
	case IWL_AGG_ON:
		break;
	default:
		IWL_WARN(priv, "Stopping AGG while state not ON "
			 "or starting for %d on %d (%d)\n", sta_id, tid,
			 priv->tid_data[sta_id][tid].agg.state);
		spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
		return 0;
	}

	tid_data->agg.ssn = SEQ_TO_SN(tid_data->seq_number);

	/* There are still packets for this RA / TID in the HW */
	if (tid_data->agg.ssn != tid_data->next_reclaimed) {
		IWL_DEBUG_TX_QUEUES(priv, "Can't proceed: ssn %d, "
				    "next_recl = %d",
				    tid_data->agg.ssn,
				    tid_data->next_reclaimed);
		priv->tid_data[sta_id][tid].agg.state =
			IWL_EMPTYING_HW_QUEUE_DELBA;
		spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
		return 0;
	}

	IWL_DEBUG_TX_QUEUES(priv, "Can proceed: ssn = next_recl = %d",
			    tid_data->agg.ssn);
turn_off:
	priv->tid_data[sta_id][tid].agg.state = IWL_AGG_OFF;

	/* do not restore/save irqs */
	spin_unlock(&priv->shrd->sta_lock);
	spin_lock(&priv->shrd->lock);

	iwl_trans_tx_agg_disable(trans(priv), sta_id, tid);

	spin_unlock_irqrestore(&priv->shrd->lock, flags);

	ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);

	return 0;
}