void iwl_trans_txq_agg_setup(struct iwl_priv *priv, int sta_id, int tid, int frame_limit) { int tx_fifo, txq_id, ssn_idx; u16 ra_tid; unsigned long flags; struct iwl_tid_data *tid_data; if (WARN_ON(sta_id == IWL_INVALID_STATION)) return; if (WARN_ON(tid >= MAX_TID_COUNT)) return; spin_lock_irqsave(&priv->sta_lock, flags); tid_data = &priv->stations[sta_id].tid[tid]; ssn_idx = SEQ_TO_SN(tid_data->seq_number); txq_id = tid_data->agg.txq_id; tx_fifo = tid_data->agg.tx_fifo; spin_unlock_irqrestore(&priv->sta_lock, flags); ra_tid = BUILD_RAxTID(sta_id, tid); spin_lock_irqsave(&priv->lock, flags); /* Stop this Tx queue before configuring it */ iwlagn_tx_queue_stop_scheduler(priv, txq_id); /* Map receiver-address / traffic-ID to this queue */ iwlagn_tx_queue_set_q2ratid(priv, ra_tid, txq_id); /* Set this queue as a chain-building queue */ iwl_set_bits_prph(priv, SCD_QUEUECHAIN_SEL, (1<<txq_id)); /* enable aggregations for the queue */ iwl_set_bits_prph(priv, SCD_AGGR_SEL, (1<<txq_id)); /* Place first TFD at index corresponding to start sequence number. * Assumes that ssn_idx is valid (!= 0xFFF) */ priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff); priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff); iwl_trans_set_wr_ptrs(priv, txq_id, ssn_idx); /* Set up Tx window size and frame limit for this queue */ iwl_write_targ_mem(priv, priv->scd_base_addr + SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32), ((frame_limit << SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) & SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) | ((frame_limit << SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) & SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK)); iwl_set_bits_prph(priv, SCD_INTERRUPT_MASK, (1 << txq_id)); /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */ iwl_trans_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1); spin_unlock_irqrestore(&priv->lock, flags); }
/********************************************************* * * functions called by core.c * *********************************************************/ int rtl_tx_agg_start(struct ieee80211_hw *hw, const u8 *ra, u16 tid, u16 *ssn) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_tid_data *tid_data; struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, ("on ra = %pM tid = %d\n", ra, tid)); if (unlikely(tid >= MAX_TID_COUNT)) return -EINVAL; if (mac->tids[tid].agg.agg_state != RTL_AGG_OFF) { RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, ("Start AGG when state is not RTL_AGG_OFF !\n")); return -ENXIO; } tid_data = &mac->tids[tid]; *ssn = SEQ_TO_SN(tid_data->seq_number); RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, ("HW queue is empty tid:%d\n", tid)); tid_data->agg.agg_state = RTL_AGG_ON; ieee80211_start_tx_ba_cb_irqsafe(mac->vif, ra, tid); return 0; }
int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u16 tid, u16 *ssn) { struct iwl_tid_data *tid_data; unsigned long flags; int sta_id; int ret; IWL_DEBUG_HT(priv, "TX AGG request on ra = %pM tid = %d\n", sta->addr, tid); sta_id = iwl_sta_id(sta); if (sta_id == IWL_INVALID_STATION) { IWL_ERR(priv, "Start AGG on invalid station\n"); return -ENXIO; } if (unlikely(tid >= IWL_MAX_TID_COUNT)) return -EINVAL; if (priv->tid_data[sta_id][tid].agg.state != IWL_AGG_OFF) { IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n"); return -ENXIO; } ret = iwl_sta_tx_modify_enable_tid(priv, sta_id, tid); if (ret) return ret; spin_lock_irqsave(&priv->shrd->sta_lock, flags); tid_data = &priv->tid_data[sta_id][tid]; tid_data->agg.ssn = SEQ_TO_SN(tid_data->seq_number); *ssn = tid_data->agg.ssn; ret = iwl_trans_tx_agg_alloc(trans(priv), sta_id, tid); if (ret) { spin_unlock_irqrestore(&priv->shrd->sta_lock, flags); return ret; } if (*ssn == tid_data->next_reclaimed) { IWL_DEBUG_TX_QUEUES(priv, "Can proceed: ssn = next_recl = %d", tid_data->agg.ssn); tid_data->agg.state = IWL_AGG_ON; ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); } else { IWL_DEBUG_TX_QUEUES(priv, "Can't proceed: ssn %d, " "next_reclaimed = %d", tid_data->agg.ssn, tid_data->next_reclaimed); tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA; } spin_unlock_irqrestore(&priv->shrd->sta_lock, flags); return ret; }
static int iwl5000_tx_status_reply_tx(struct iwl_priv *priv, struct iwl_ht_agg *agg, struct iwl5000_tx_resp *tx_resp, int txq_id, u16 start_idx) { u16 status; struct agg_tx_status *frame_status = &tx_resp->status; struct ieee80211_tx_info *info = NULL; struct ieee80211_hdr *hdr = NULL; u32 rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags); int i, sh, idx; u16 seq; if (agg->wait_for_ba) IWL_DEBUG_TX_REPLY(priv, "got tx response w/o block-ack\n"); agg->frame_count = tx_resp->frame_count; agg->start_idx = start_idx; agg->rate_n_flags = rate_n_flags; agg->bitmap = 0; /* # frames attempted by Tx command */ if (agg->frame_count == 1) { /* Only one frame was attempted; no block-ack will arrive */ status = le16_to_cpu(frame_status[0].status); idx = start_idx; /* FIXME: code repetition */ IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, StartIdx=%d idx=%d\n", agg->frame_count, agg->start_idx, idx); info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb[0]); info->status.rates[0].count = tx_resp->failure_frame + 1; info->flags &= ~IEEE80211_TX_CTL_AMPDU; info->flags |= iwl_is_tx_success(status) ? IEEE80211_TX_STAT_ACK : 0; iwl_hwrate_to_tx_control(priv, rate_n_flags, info); /* FIXME: code repetition end */ IWL_DEBUG_TX_REPLY(priv, "1 Frame 0x%x failure :%d\n", status & 0xff, tx_resp->failure_frame); IWL_DEBUG_TX_REPLY(priv, "Rate Info rate_n_flags=%x\n", rate_n_flags); agg->wait_for_ba = 0; } else { /* Two or more frames were attempted; expect block-ack */ u64 bitmap = 0; int start = agg->start_idx; /* Construct bit-map of pending frames within Tx window */ for (i = 0; i < agg->frame_count; i++) { u16 sc; status = le16_to_cpu(frame_status[i].status); seq = le16_to_cpu(frame_status[i].sequence); idx = SEQ_TO_INDEX(seq); txq_id = SEQ_TO_QUEUE(seq); if (status & (AGG_TX_STATE_FEW_BYTES_MSK | AGG_TX_STATE_ABORT_MSK)) continue; IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, txq_id=%d idx=%d\n", agg->frame_count, txq_id, idx); hdr = iwl_tx_queue_get_hdr(priv, txq_id, idx); sc = le16_to_cpu(hdr->seq_ctrl); if (idx != (SEQ_TO_SN(sc) & 0xff)) { IWL_ERR(priv, "BUG_ON idx doesn't match seq control" " idx=%d, seq_idx=%d, seq=%d\n", idx, SEQ_TO_SN(sc), hdr->seq_ctrl); return -1; } IWL_DEBUG_TX_REPLY(priv, "AGG Frame i=%d idx %d seq=%d\n", i, idx, SEQ_TO_SN(sc)); sh = idx - start; if (sh > 64) { sh = (start - idx) + 0xff; bitmap = bitmap << sh; sh = 0; start = idx; } else if (sh < -64) sh = 0xff - (start - idx); else if (sh < 0) { sh = start - idx; start = idx; bitmap = bitmap << sh; sh = 0; } bitmap |= 1ULL << sh; IWL_DEBUG_TX_REPLY(priv, "start=%d bitmap=0x%llx\n", start, (unsigned long long)bitmap); } agg->bitmap = bitmap; agg->start_idx = start; IWL_DEBUG_TX_REPLY(priv, "Frames %d start_idx=%d bitmap=0x%llx\n", agg->frame_count, agg->start_idx, (unsigned long long)agg->bitmap); if (bitmap) agg->wait_for_ba = 1; } return 0; }
int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb, struct iwl_device_cmd *cmd) { struct iwl_rx_packet *pkt = rxb_addr(rxb); u16 sequence = le16_to_cpu(pkt->hdr.sequence); int txq_id = SEQ_TO_QUEUE(sequence); int cmd_index __maybe_unused = SEQ_TO_INDEX(sequence); struct iwlagn_tx_resp *tx_resp = (void *)&pkt->u.raw[0]; struct ieee80211_hdr *hdr; u32 status = le16_to_cpu(tx_resp->status.status); u16 ssn = iwlagn_get_scd_ssn(tx_resp); int tid; int sta_id; int freed; struct ieee80211_tx_info *info; unsigned long flags; struct sk_buff_head skbs; struct sk_buff *skb; struct iwl_rxon_context *ctx; bool is_agg = (txq_id >= IWLAGN_FIRST_AMPDU_QUEUE); tid = (tx_resp->ra_tid & IWLAGN_TX_RES_TID_MSK) >> IWLAGN_TX_RES_TID_POS; sta_id = (tx_resp->ra_tid & IWLAGN_TX_RES_RA_MSK) >> IWLAGN_TX_RES_RA_POS; spin_lock_irqsave(&priv->shrd->sta_lock, flags); if (is_agg) iwl_rx_reply_tx_agg(priv, tx_resp); if (tx_resp->frame_count == 1) { u16 next_reclaimed = le16_to_cpu(tx_resp->seq_ctl); next_reclaimed = SEQ_TO_SN(next_reclaimed + 0x10); if (is_agg) { /* If this is an aggregation queue, we can rely on the * ssn since the wifi sequence number corresponds to * the index in the TFD ring (%256). * The seq_ctl is the sequence control of the packet * to which this Tx response relates. But if there is a * hole in the bitmap of the BA we received, this Tx * response may allow to reclaim the hole and all the * subsequent packets that were already acked. * In that case, seq_ctl != ssn, and the next packet * to be reclaimed will be ssn and not seq_ctl. */ next_reclaimed = ssn; } __skb_queue_head_init(&skbs); priv->tid_data[sta_id][tid].next_reclaimed = next_reclaimed; IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d", next_reclaimed); /*we can free until ssn % q.n_bd not inclusive */ WARN_ON(iwl_trans_reclaim(trans(priv), sta_id, tid, txq_id, ssn, status, &skbs)); iwlagn_check_ratid_empty(priv, sta_id, tid); freed = 0; while (!skb_queue_empty(&skbs)) { skb = __skb_dequeue(&skbs); hdr = (struct ieee80211_hdr *)skb->data; if (!ieee80211_is_data_qos(hdr->frame_control)) priv->last_seq_ctl = tx_resp->seq_ctl; info = IEEE80211_SKB_CB(skb); ctx = info->driver_data[0]; kmem_cache_free(priv->tx_cmd_pool, (info->driver_data[1])); memset(&info->status, 0, sizeof(info->status)); if (status == TX_STATUS_FAIL_PASSIVE_NO_RX && iwl_is_associated_ctx(ctx) && ctx->vif && ctx->vif->type == NL80211_IFTYPE_STATION) { ctx->last_tx_rejected = true; iwl_trans_stop_queue(trans(priv), txq_id, "Tx on passive channel"); IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x) " "rate_n_flags 0x%x retries %d\n", txq_id, iwl_get_tx_fail_reason(status), status, le32_to_cpu(tx_resp->rate_n_flags), tx_resp->failure_frame); IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, idx=%d\n", tx_resp->frame_count, cmd_index); } /* check if BAR is needed */ if (is_agg && !iwl_is_tx_success(status)) info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; iwlagn_set_tx_status(priv, IEEE80211_SKB_CB(skb), tx_resp, is_agg); if (!is_agg) iwlagn_non_agg_tx_status(priv, ctx, hdr->addr1); ieee80211_tx_status_irqsafe(priv->hw, skb); freed++; } WARN_ON(!is_agg && freed != 1); } iwl_check_abort_status(priv, tx_resp->frame_count, status); spin_unlock_irqrestore(&priv->shrd->sta_lock, flags); return 0; }
int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u16 tid) { struct iwl_tid_data *tid_data; unsigned long flags; int sta_id; sta_id = iwl_sta_id(sta); if (sta_id == IWL_INVALID_STATION) { IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid); return -ENXIO; } spin_lock_irqsave(&priv->shrd->sta_lock, flags); tid_data = &priv->tid_data[sta_id][tid]; switch (priv->tid_data[sta_id][tid].agg.state) { case IWL_EMPTYING_HW_QUEUE_ADDBA: /* * This can happen if the peer stops aggregation * again before we've had a chance to drain the * queue we selected previously, i.e. before the * session was really started completely. */ IWL_DEBUG_HT(priv, "AGG stop before setup done\n"); goto turn_off; case IWL_AGG_ON: break; default: IWL_WARN(priv, "Stopping AGG while state not ON " "or starting for %d on %d (%d)\n", sta_id, tid, priv->tid_data[sta_id][tid].agg.state); spin_unlock_irqrestore(&priv->shrd->sta_lock, flags); return 0; } tid_data->agg.ssn = SEQ_TO_SN(tid_data->seq_number); /* There are still packets for this RA / TID in the HW */ if (tid_data->agg.ssn != tid_data->next_reclaimed) { IWL_DEBUG_TX_QUEUES(priv, "Can't proceed: ssn %d, " "next_recl = %d", tid_data->agg.ssn, tid_data->next_reclaimed); priv->tid_data[sta_id][tid].agg.state = IWL_EMPTYING_HW_QUEUE_DELBA; spin_unlock_irqrestore(&priv->shrd->sta_lock, flags); return 0; } IWL_DEBUG_TX_QUEUES(priv, "Can proceed: ssn = next_recl = %d", tid_data->agg.ssn); turn_off: priv->tid_data[sta_id][tid].agg.state = IWL_AGG_OFF; /* do not restore/save irqs */ spin_unlock(&priv->shrd->sta_lock); spin_lock(&priv->shrd->lock); iwl_trans_tx_agg_disable(trans(priv), sta_id, tid); spin_unlock_irqrestore(&priv->shrd->lock, flags); ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); return 0; }
/* * start REPLY_TX command process */ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct iwl_station_priv *sta_priv = NULL; struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; struct iwl_device_cmd *dev_cmd = NULL; struct iwl_tx_cmd *tx_cmd; __le16 fc; u8 hdr_len; u16 len, seq_number = 0; u8 sta_id, tid = IWL_MAX_TID_COUNT; unsigned long flags; bool is_agg = false; if (info->control.vif) ctx = iwl_rxon_ctx_from_vif(info->control.vif); spin_lock_irqsave(&priv->shrd->lock, flags); if (iwl_is_rfkill(priv->shrd)) { IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n"); goto drop_unlock_priv; } fc = hdr->frame_control; #ifdef CONFIG_IWLWIFI_DEBUG if (ieee80211_is_auth(fc)) IWL_DEBUG_TX(priv, "Sending AUTH frame\n"); else if (ieee80211_is_assoc_req(fc)) IWL_DEBUG_TX(priv, "Sending ASSOC frame\n"); else if (ieee80211_is_reassoc_req(fc)) IWL_DEBUG_TX(priv, "Sending REASSOC frame\n"); #endif if (unlikely(ieee80211_is_probe_resp(fc))) { struct iwl_wipan_noa_data *noa_data = rcu_dereference(priv->noa_data); if (noa_data && pskb_expand_head(skb, 0, noa_data->length, GFP_ATOMIC) == 0) { memcpy(skb_put(skb, noa_data->length), noa_data->data, noa_data->length); hdr = (struct ieee80211_hdr *)skb->data; } } hdr_len = ieee80211_hdrlen(fc); /* For management frames use broadcast id to do not break aggregation */ if (!ieee80211_is_data(fc)) sta_id = ctx->bcast_sta_id; else { /* Find index into station table for destination station */ sta_id = iwl_sta_id_or_broadcast(priv, ctx, info->control.sta); if (sta_id == IWL_INVALID_STATION) { IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n", hdr->addr1); goto drop_unlock_priv; } } IWL_DEBUG_TX(priv, "station Id %d\n", sta_id); if (info->control.sta) sta_priv = (void *)info->control.sta->drv_priv; if (sta_priv && sta_priv->asleep && (info->flags & IEEE80211_TX_CTL_POLL_RESPONSE)) { /* * This sends an asynchronous command to the device, * but we can rely on it being processed before the * next frame is processed -- and the next frame to * this station is the one that will consume this * counter. * For now set the counter to just 1 since we do not * support uAPSD yet. */ iwl_sta_modify_sleep_tx_count(priv, sta_id, 1); } if (info->flags & IEEE80211_TX_CTL_AMPDU) is_agg = true; /* irqs already disabled/saved above when locking priv->shrd->lock */ spin_lock(&priv->shrd->sta_lock); dev_cmd = kmem_cache_alloc(priv->tx_cmd_pool, GFP_ATOMIC); if (unlikely(!dev_cmd)) goto drop_unlock_sta; memset(dev_cmd, 0, sizeof(*dev_cmd)); tx_cmd = (struct iwl_tx_cmd *) dev_cmd->payload; /* Total # bytes to be transmitted */ len = (u16)skb->len; tx_cmd->len = cpu_to_le16(len); if (info->control.hw_key) iwlagn_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id); /* TODO need this for burst mode later on */ iwlagn_tx_cmd_build_basic(priv, skb, tx_cmd, info, hdr, sta_id); iwl_dbg_log_tx_data_frame(priv, len, hdr); iwlagn_tx_cmd_build_rate(priv, tx_cmd, info, fc); iwl_update_stats(priv, true, fc, len); memset(&info->status, 0, sizeof(info->status)); info->driver_data[0] = ctx; info->driver_data[1] = dev_cmd; if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) { u8 *qc = NULL; struct iwl_tid_data *tid_data; qc = ieee80211_get_qos_ctl(hdr); tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT)) goto drop_unlock_sta; tid_data = &priv->tid_data[sta_id][tid]; /* aggregation is on for this <sta,tid> */ if (info->flags & IEEE80211_TX_CTL_AMPDU && tid_data->agg.state != IWL_AGG_ON) { IWL_ERR(priv, "TX_CTL_AMPDU while not in AGG:" " Tx flags = 0x%08x, agg.state = %d", info->flags, tid_data->agg.state); IWL_ERR(priv, "sta_id = %d, tid = %d seq_num = %d", sta_id, tid, SEQ_TO_SN(tid_data->seq_number)); goto drop_unlock_sta; } /* We can receive packets from the stack in IWL_AGG_{ON,OFF} * only. Check this here. */ if (WARN_ONCE(tid_data->agg.state != IWL_AGG_ON && tid_data->agg.state != IWL_AGG_OFF, "Tx while agg.state = %d", tid_data->agg.state)) goto drop_unlock_sta; seq_number = tid_data->seq_number; seq_number &= IEEE80211_SCTL_SEQ; hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); hdr->seq_ctrl |= cpu_to_le16(seq_number); seq_number += 0x10; } /* Copy MAC header from skb into command buffer */ memcpy(tx_cmd->hdr, hdr, hdr_len); if (iwl_trans_tx(trans(priv), skb, dev_cmd, ctx->ctxid, sta_id, tid)) goto drop_unlock_sta; if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc) && !ieee80211_has_morefrags(fc)) priv->tid_data[sta_id][tid].seq_number = seq_number; spin_unlock(&priv->shrd->sta_lock); spin_unlock_irqrestore(&priv->shrd->lock, flags); /* * Avoid atomic ops if it isn't an associated client. * Also, if this is a packet for aggregation, don't * increase the counter because the ucode will stop * aggregation queues when their respective station * goes to sleep. */ if (sta_priv && sta_priv->client && !is_agg) atomic_inc(&sta_priv->pending_frames); return 0; drop_unlock_sta: if (dev_cmd) kmem_cache_free(priv->tx_cmd_pool, dev_cmd); spin_unlock(&priv->shrd->sta_lock); drop_unlock_priv: spin_unlock_irqrestore(&priv->shrd->lock, flags); return -1; }