/*======================= tx =========================================*/ static void rtl_usb_cleanup(struct ieee80211_hw *hw) { u32 i; struct sk_buff *_skb; struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); struct ieee80211_tx_info *txinfo; SET_USB_STOP(rtlusb); /* clean up rx stuff. */ _rtl_usb_cleanup_rx(hw); /* clean up tx stuff */ for (i = 0; i < RTL_USB_MAX_EP_NUM; i++) { while ((_skb = skb_dequeue(&rtlusb->tx_skb_queue[i]))) { rtlusb->usb_tx_cleanup(hw, _skb); txinfo = IEEE80211_SKB_CB(_skb); ieee80211_tx_info_clear_status(txinfo); txinfo->flags |= IEEE80211_TX_STAT_ACK; ieee80211_tx_status_irqsafe(hw, _skb); } usb_kill_anchored_urbs(&rtlusb->tx_pending[i]); } usb_kill_anchored_urbs(&rtlusb->tx_submitted); }
static int mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb) { struct mac80211_hwsim_data *data = hw->priv; int ack; struct ieee80211_tx_info *txi; mac80211_hwsim_monitor_rx(hw, skb); if (skb->len < 10) { /* Should not happen; just a sanity check for addr1 use */ dev_kfree_skb(skb); return NETDEV_TX_OK; } if (!data->radio_enabled) { printk(KERN_DEBUG "%s: dropped TX frame since radio " "disabled\n", wiphy_name(hw->wiphy)); dev_kfree_skb(skb); return NETDEV_TX_OK; } ack = mac80211_hwsim_tx_frame(hw, skb); txi = IEEE80211_SKB_CB(skb); memset(&txi->status, 0, sizeof(txi->status)); if (!(txi->flags & IEEE80211_TX_CTL_NO_ACK)) { if (ack) txi->flags |= IEEE80211_TX_STAT_ACK; else txi->status.excessive_retries = 1; } ieee80211_tx_status_irqsafe(hw, skb); return NETDEV_TX_OK; }
static void ar5523_data_tx_cb(struct urb *urb) { struct sk_buff *skb = urb->context; struct ieee80211_tx_info *txi = IEEE80211_SKB_CB(skb); struct ar5523_tx_data *data = (struct ar5523_tx_data *) txi->driver_data; struct ar5523 *ar = data->ar; unsigned long flags; ar5523_dbg(ar, "data tx urb completed: %d\n", urb->status); spin_lock_irqsave(&ar->tx_data_list_lock, flags); list_del(&data->list); spin_unlock_irqrestore(&ar->tx_data_list_lock, flags); if (urb->status) { ar5523_dbg(ar, "%s: urb status: %d\n", __func__, urb->status); ar5523_data_tx_pkt_put(ar); ieee80211_free_txskb(ar->hw, skb); } else { skb_pull(skb, sizeof(struct ar5523_tx_desc) + sizeof(__be32)); ieee80211_tx_status_irqsafe(ar->hw, skb); } usb_free_urb(urb); }
void wcn36xx_dxe_tx_ack_ind(struct wcn36xx *wcn, u32 status) { struct ieee80211_tx_info *info; struct sk_buff *skb; unsigned long flags; spin_lock_irqsave(&wcn->dxe_lock, flags); skb = wcn->tx_ack_skb; wcn->tx_ack_skb = NULL; spin_unlock_irqrestore(&wcn->dxe_lock, flags); if (!skb) { wcn36xx_warn("Spurious TX complete indication\n"); return; } info = IEEE80211_SKB_CB(skb); if (status == 1) info->flags |= IEEE80211_TX_STAT_ACK; wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ack status: %d\n", status); ieee80211_tx_status_irqsafe(wcn->hw, skb); ieee80211_wake_queues(wcn->hw); }
static void rtl8180_handle_tx(struct ieee80211_hw *dev, unsigned int prio) { struct rtl8180_priv *priv = dev->priv; struct rtl8180_tx_ring *ring = &priv->tx_ring[prio]; while (skb_queue_len(&ring->queue)) { struct rtl8180_tx_desc *entry = &ring->desc[ring->idx]; struct sk_buff *skb; struct ieee80211_tx_info *info; u32 flags = le32_to_cpu(entry->flags); if (flags & RTL818X_TX_DESC_FLAG_OWN) return; ring->idx = (ring->idx + 1) % ring->entries; skb = __skb_dequeue(&ring->queue); pci_unmap_single(priv->pdev, le32_to_cpu(entry->tx_buf), skb->len, PCI_DMA_TODEVICE); info = IEEE80211_SKB_CB(skb); ieee80211_tx_info_clear_status(info); if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) && (flags & RTL818X_TX_DESC_FLAG_TX_OK)) info->flags |= IEEE80211_TX_STAT_ACK; info->status.rates[0].count = (flags & 0xFF) + 1; ieee80211_tx_status_irqsafe(dev, skb); if (ring->entries - skb_queue_len(&ring->queue) == 2) ieee80211_wake_queue(dev, prio); } }
static void device_free_tx_buf(struct vnt_private *priv, struct vnt_tx_desc *desc) { struct vnt_td_info *td_info = desc->td_info; struct sk_buff *skb = td_info->skb; if (skb) ieee80211_tx_status_irqsafe(priv->hw, skb); td_info->skb = NULL; td_info->flags = 0; }
void wcn36xx_dxe_deinit(struct wcn36xx *wcn) { free_irq(wcn->tx_irq, wcn); free_irq(wcn->rx_irq, wcn); if (wcn->tx_ack_skb) { ieee80211_tx_status_irqsafe(wcn->hw, wcn->tx_ack_skb); wcn->tx_ack_skb = NULL; } wcn36xx_dxe_ch_free_skbs(wcn, &wcn->dxe_rx_l_ch); wcn36xx_dxe_ch_free_skbs(wcn, &wcn->dxe_rx_h_ch); }
/** * tx_status - reports tx status of a packet if required * @hw - a &struct ieee80211_hw pointer * @skb - a sk-buffer * @status - the tx status of the packet without control information * @success - True for successfull transmission of the frame * * This information calls ieee80211_tx_status_irqsafe() if required by the * control information. It copies the control information into the status * information. * * If no status information has been requested, the skb is freed. */ static void tx_status(struct ieee80211_hw *hw, struct sk_buff *skb, struct ieee80211_tx_status *status, bool success) { struct zd_tx_skb_control_block *cb = (struct zd_tx_skb_control_block *) skb->cb; ZD_ASSERT(cb->control != NULL); memcpy(&status->control, cb->control, sizeof(status->control)); if (!success) status->excessive_retries = 1; clear_tx_skb_control_block(skb); ieee80211_tx_status_irqsafe(hw, skb, status); }
void wcn36xx_dxe_deinit(struct wcn36xx *wcn) { free_irq(wcn->tx_irq, wcn); free_irq(wcn->rx_irq, wcn); /* Flush any pending rx work */ flush_workqueue(wcn->wq); if (wcn->tx_ack_skb) { ieee80211_tx_status_irqsafe(wcn->hw, wcn->tx_ack_skb); wcn->tx_ack_skb = NULL; } wcn36xx_dxe_ch_free_skbs(wcn, &wcn->dxe_rx_l_ch); wcn36xx_dxe_ch_free_skbs(wcn, &wcn->dxe_rx_h_ch); }
static void adm8211_interrupt_tci(struct ieee80211_hw *dev) { struct adm8211_priv *priv = dev->priv; unsigned int dirty_tx; spin_lock(&priv->lock); for (dirty_tx = priv->dirty_tx; priv->cur_tx - dirty_tx; dirty_tx++) { unsigned int entry = dirty_tx % priv->tx_ring_size; u32 status = le32_to_cpu(priv->tx_ring[entry].status); struct ieee80211_tx_info *txi; struct adm8211_tx_ring_info *info; struct sk_buff *skb; if (status & TDES0_CONTROL_OWN || !(status & TDES0_CONTROL_DONE)) break; info = &priv->tx_buffers[entry]; skb = info->skb; txi = IEEE80211_SKB_CB(skb); /* TODO: check TDES0_STATUS_TUF and TDES0_STATUS_TRO */ pci_unmap_single(priv->pdev, info->mapping, info->skb->len, PCI_DMA_TODEVICE); ieee80211_tx_info_clear_status(txi); skb_pull(skb, sizeof(struct adm8211_tx_hdr)); memcpy(skb_push(skb, info->hdrlen), skb->cb, info->hdrlen); if (!(txi->flags & IEEE80211_TX_CTL_NO_ACK) && !(status & TDES0_STATUS_ES)) txi->flags |= IEEE80211_TX_STAT_ACK; ieee80211_tx_status_irqsafe(dev, skb); info->skb = NULL; } if (priv->cur_tx - dirty_tx < priv->tx_ring_size - 2) ieee80211_wake_queue(dev, 0); priv->dirty_tx = dirty_tx; spin_unlock(&priv->lock); }
static void device_free_tx_buf(struct vnt_private *pDevice, PSTxDesc pDesc) { PDEVICE_TD_INFO pTDInfo = pDesc->pTDInfo; struct sk_buff *skb = pTDInfo->skb; /* pre-allocated buf_dma can't be unmapped. */ if (pTDInfo->skb_dma && (pTDInfo->skb_dma != pTDInfo->buf_dma)) { dma_unmap_single(&pDevice->pcid->dev, pTDInfo->skb_dma, skb->len, DMA_TO_DEVICE); } if (skb) ieee80211_tx_status_irqsafe(pDevice->hw, skb); pTDInfo->skb_dma = 0; pTDInfo->skb = NULL; pTDInfo->byFlags = 0; }
static void device_free_tx_buf(struct vnt_private *pDevice, PSTxDesc pDesc) { PDEVICE_TD_INFO pTDInfo = pDesc->pTDInfo; struct sk_buff *skb = pTDInfo->skb; // pre-allocated buf_dma can't be unmapped. if (pTDInfo->skb_dma && (pTDInfo->skb_dma != pTDInfo->buf_dma)) { pci_unmap_single(pDevice->pcid, pTDInfo->skb_dma, skb->len, PCI_DMA_TODEVICE); } if (pTDInfo->byFlags & TD_FLAGS_NETIF_SKB) ieee80211_tx_status_irqsafe(pDevice->hw, skb); else dev_kfree_skb_irq(skb); pTDInfo->skb_dma = 0; pTDInfo->skb = NULL; pTDInfo->byFlags = 0; }
/** * il3945_tx_queue_reclaim - Reclaim Tx queue entries already Tx'd * * When FW advances 'R' idx, all entries between old and new 'R' idx * need to be reclaimed. As result, some free space forms. If there is * enough free space (> low mark), wake the stack that feeds us. */ static void il3945_tx_queue_reclaim(struct il_priv *il, int txq_id, int idx) { struct il_tx_queue *txq = &il->txq[txq_id]; struct il_queue *q = &txq->q; struct sk_buff *skb; BUG_ON(txq_id == IL39_CMD_QUEUE_NUM); for (idx = il_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx; q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd)) { skb = txq->skbs[txq->q.read_ptr]; ieee80211_tx_status_irqsafe(il->hw, skb); txq->skbs[txq->q.read_ptr] = NULL; il->ops->txq_free_tfd(il, txq); } if (il_queue_space(q) > q->low_mark && txq_id >= 0 && txq_id != IL39_CMD_QUEUE_NUM && il->mac80211_registered) il_wake_queue(il, txq); }
static int _usb_tx_post(struct ieee80211_hw *hw, struct urb *urb, struct sk_buff *skb) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); struct ieee80211_tx_info *txinfo; rtlusb->usb_tx_post_hdl(hw, urb, skb); skb_pull(skb, RTL_TX_HEADER_SIZE); txinfo = IEEE80211_SKB_CB(skb); ieee80211_tx_info_clear_status(txinfo); txinfo->flags |= IEEE80211_TX_STAT_ACK; if (urb->status) { RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG, "Urb has error status 0x%X\n", urb->status); goto out; } /* TODO: statistics */ out: ieee80211_tx_status_irqsafe(hw, skb); return urb->status; }
static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[prio]; while (skb_queue_len(&ring->queue)) { struct rtl_tx_desc *entry = &ring->desc[ring->idx]; struct sk_buff *skb; struct ieee80211_tx_info *info; u8 own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) entry, true, HW_DESC_OWN); /* *beacon packet will only use the first *descriptor defautly,and the own may not *be cleared by the hardware */ if (own) return; ring->idx = (ring->idx + 1) % ring->entries; skb = __skb_dequeue(&ring->queue); pci_unmap_single(rtlpci->pdev, rtlpriv->cfg->ops-> get_desc((u8 *) entry, true, HW_DESC_TXBUFF_ADDR), skb->len, PCI_DMA_TODEVICE); RT_TRACE(rtlpriv, (COMP_INTR | COMP_SEND), DBG_TRACE, ("new ring->idx:%d, " "free: skb_queue_len:%d, free: seq:%x\n", ring->idx, skb_queue_len(&ring->queue), *(u16 *) (skb->data + 22))); info = IEEE80211_SKB_CB(skb); ieee80211_tx_info_clear_status(info); info->flags |= IEEE80211_TX_STAT_ACK; /*info->status.rates[0].count = 1; */ ieee80211_tx_status_irqsafe(hw, skb); if ((ring->entries - skb_queue_len(&ring->queue)) == 2) { RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD, ("more desc left, wake" "skb_queue@%d,ring->idx = %d," "skb_queue_len = 0x%d\n", prio, ring->idx, skb_queue_len(&ring->queue))); ieee80211_wake_queue(hw, skb_get_queue_mapping (skb)); } skb = NULL; } if (((rtlpriv->link_info.num_rx_inperiod + rtlpriv->link_info.num_tx_inperiod) > 8) || (rtlpriv->link_info.num_rx_inperiod > 2)) { rtl_lps_leave(hw); } }
int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb, struct iwl_device_cmd *cmd) { struct iwl_rx_packet *pkt = rxb_addr(rxb); u16 sequence = le16_to_cpu(pkt->hdr.sequence); int txq_id = SEQ_TO_QUEUE(sequence); int cmd_index __maybe_unused = SEQ_TO_INDEX(sequence); struct iwlagn_tx_resp *tx_resp = (void *)&pkt->u.raw[0]; struct ieee80211_hdr *hdr; u32 status = le16_to_cpu(tx_resp->status.status); u32 ssn = iwlagn_get_scd_ssn(tx_resp); int tid; int sta_id; int freed; struct ieee80211_tx_info *info; unsigned long flags; struct sk_buff_head skbs; struct sk_buff *skb; struct iwl_rxon_context *ctx; bool is_agg = (txq_id >= IWLAGN_FIRST_AMPDU_QUEUE); tid = (tx_resp->ra_tid & IWLAGN_TX_RES_TID_MSK) >> IWLAGN_TX_RES_TID_POS; sta_id = (tx_resp->ra_tid & IWLAGN_TX_RES_RA_MSK) >> IWLAGN_TX_RES_RA_POS; spin_lock_irqsave(&priv->shrd->sta_lock, flags); if (is_agg) iwl_rx_reply_tx_agg(priv, tx_resp); if (tx_resp->frame_count == 1) { IWL_DEBUG_TX_REPLY(priv, "Q %d, ssn %d", txq_id, ssn); __skb_queue_head_init(&skbs); /*we can free until ssn % q.n_bd not inclusive */ iwl_trans_reclaim(trans(priv), sta_id, tid, txq_id, ssn, status, &skbs); freed = 0; while (!skb_queue_empty(&skbs)) { skb = __skb_dequeue(&skbs); hdr = (struct ieee80211_hdr *)skb->data; if (!ieee80211_is_data_qos(hdr->frame_control)) priv->last_seq_ctl = tx_resp->seq_ctl; info = IEEE80211_SKB_CB(skb); ctx = info->driver_data[0]; kmem_cache_free(priv->tx_cmd_pool, (info->driver_data[1])); memset(&info->status, 0, sizeof(info->status)); if (status == TX_STATUS_FAIL_PASSIVE_NO_RX && iwl_is_associated_ctx(ctx) && ctx->vif && ctx->vif->type == NL80211_IFTYPE_STATION) { ctx->last_tx_rejected = true; iwl_trans_stop_queue(trans(priv), txq_id, "Tx on passive channel"); IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x) " "rate_n_flags 0x%x retries %d\n", txq_id, iwl_get_tx_fail_reason(status), status, le32_to_cpu(tx_resp->rate_n_flags), tx_resp->failure_frame); IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, idx=%d\n", tx_resp->frame_count, cmd_index); } /* check if BAR is needed */ if (is_agg && !iwl_is_tx_success(status)) info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; iwlagn_set_tx_status(priv, IEEE80211_SKB_CB(skb), tx_resp, is_agg); if (!is_agg) iwlagn_non_agg_tx_status(priv, ctx, hdr->addr1); ieee80211_tx_status_irqsafe(priv->hw, skb); freed++; } WARN_ON(!is_agg && freed != 1); } iwl_check_abort_status(priv, tx_resp->frame_count, status); spin_unlock_irqrestore(&priv->shrd->sta_lock, flags); return 0; }
static void _rtl_usb_rx_process_agg(struct ieee80211_hw *hw, struct sk_buff *skb) { struct rtl_priv *rtlpriv = rtl_priv(hw); u8 *rxdesc = skb->data; struct ieee80211_hdr *hdr; bool unicast = false; __le16 fc; struct ieee80211_rx_status rx_status = {0}; struct rtl_stats stats = { .signal = 0, .noise = -98, .rate = 0, }; skb_pull(skb, RTL_RX_DESC_SIZE); rtlpriv->cfg->ops->query_rx_desc(hw, &stats, &rx_status, rxdesc, skb); skb_pull(skb, (stats.rx_drvinfo_size + stats.rx_bufshift)); hdr = (struct ieee80211_hdr *)(skb->data); fc = hdr->frame_control; if (!stats.crc) { memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status)); if (is_broadcast_ether_addr(hdr->addr1)) { /*TODO*/; } else if (is_multicast_ether_addr(hdr->addr1)) { /*TODO*/ } else { unicast = true; rtlpriv->stats.rxbytesunicast += skb->len; } rtl_is_special_data(hw, skb, false); if (ieee80211_is_data(fc)) { rtlpriv->cfg->ops->led_control(hw, LED_CTL_RX); if (unicast) rtlpriv->link_info.num_rx_inperiod++; } } } static void _rtl_usb_rx_process_noagg(struct ieee80211_hw *hw, struct sk_buff *skb) { struct rtl_priv *rtlpriv = rtl_priv(hw); u8 *rxdesc = skb->data; struct ieee80211_hdr *hdr; bool unicast = false; __le16 fc; struct ieee80211_rx_status rx_status = {0}; struct rtl_stats stats = { .signal = 0, .noise = -98, .rate = 0, }; skb_pull(skb, RTL_RX_DESC_SIZE); rtlpriv->cfg->ops->query_rx_desc(hw, &stats, &rx_status, rxdesc, skb); skb_pull(skb, (stats.rx_drvinfo_size + stats.rx_bufshift)); hdr = (struct ieee80211_hdr *)(skb->data); fc = hdr->frame_control; if (!stats.crc) { memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status)); if (is_broadcast_ether_addr(hdr->addr1)) { /*TODO*/; } else if (is_multicast_ether_addr(hdr->addr1)) { /*TODO*/ } else { unicast = true; rtlpriv->stats.rxbytesunicast += skb->len; } rtl_is_special_data(hw, skb, false); if (ieee80211_is_data(fc)) { rtlpriv->cfg->ops->led_control(hw, LED_CTL_RX); if (unicast) rtlpriv->link_info.num_rx_inperiod++; } if (likely(rtl_action_proc(hw, skb, false))) { struct sk_buff *uskb = NULL; u8 *pdata; uskb = dev_alloc_skb(skb->len + 128); if (uskb) { /* drop packet on allocation failure */ memcpy(IEEE80211_SKB_RXCB(uskb), &rx_status, sizeof(rx_status)); pdata = (u8 *)skb_put(uskb, skb->len); memcpy(pdata, skb->data, skb->len); ieee80211_rx_irqsafe(hw, uskb); } dev_kfree_skb_any(skb); } else { dev_kfree_skb_any(skb); } } } static void _rtl_rx_pre_process(struct ieee80211_hw *hw, struct sk_buff *skb) { struct sk_buff *_skb; struct sk_buff_head rx_queue; struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); skb_queue_head_init(&rx_queue); if (rtlusb->usb_rx_segregate_hdl) rtlusb->usb_rx_segregate_hdl(hw, skb, &rx_queue); WARN_ON(skb_queue_empty(&rx_queue)); while (!skb_queue_empty(&rx_queue)) { _skb = skb_dequeue(&rx_queue); _rtl_usb_rx_process_agg(hw, skb); ieee80211_rx_irqsafe(hw, skb); } } static void _rtl_rx_completed(struct urb *_urb) { struct sk_buff *skb = (struct sk_buff *)_urb->context; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct rtl_usb *rtlusb = (struct rtl_usb *)info->rate_driver_data[0]; struct ieee80211_hw *hw = usb_get_intfdata(rtlusb->intf); struct rtl_priv *rtlpriv = rtl_priv(hw); int err = 0; if (unlikely(IS_USB_STOP(rtlusb))) goto free; if (likely(0 == _urb->status)) { /* If this code were moved to work queue, would CPU * utilization be improved? NOTE: We shall allocate another skb * and reuse the original one. */ skb_put(skb, _urb->actual_length); if (likely(!rtlusb->usb_rx_segregate_hdl)) { struct sk_buff *_skb; _rtl_usb_rx_process_noagg(hw, skb); _skb = _rtl_prep_rx_urb(hw, rtlusb, _urb, GFP_ATOMIC); if (IS_ERR(_skb)) { err = PTR_ERR(_skb); RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG, "Can't allocate skb for bulk IN!\n"); return; } skb = _skb; } else{ /* TO DO */ _rtl_rx_pre_process(hw, skb); pr_err("rx agg not supported\n"); } goto resubmit; } switch (_urb->status) { /* disconnect */ case -ENOENT: case -ECONNRESET: case -ENODEV: case -ESHUTDOWN: goto free; default: break; } resubmit: skb_reset_tail_pointer(skb); skb_trim(skb, 0); usb_anchor_urb(_urb, &rtlusb->rx_submitted); err = usb_submit_urb(_urb, GFP_ATOMIC); if (unlikely(err)) { usb_unanchor_urb(_urb); goto free; } return; free: dev_kfree_skb_irq(skb); } static int _rtl_usb_receive(struct ieee80211_hw *hw) { struct urb *urb; struct sk_buff *skb; int err; int i; struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); WARN_ON(0 == rtlusb->rx_urb_num); /* 1600 == 1514 + max WLAN header + rtk info */ WARN_ON(rtlusb->rx_max_size < 1600); for (i = 0; i < rtlusb->rx_urb_num; i++) { err = -ENOMEM; urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG, "Failed to alloc URB!!\n"); goto err_out; } skb = _rtl_prep_rx_urb(hw, rtlusb, urb, GFP_KERNEL); if (IS_ERR(skb)) { RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG, "Failed to prep_rx_urb!!\n"); err = PTR_ERR(skb); goto err_out; } usb_anchor_urb(urb, &rtlusb->rx_submitted); err = usb_submit_urb(urb, GFP_KERNEL); if (err) goto err_out; usb_free_urb(urb); } return 0; err_out: usb_kill_anchored_urbs(&rtlusb->rx_submitted); return err; } static int rtl_usb_start(struct ieee80211_hw *hw) { int err; struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); err = rtlpriv->cfg->ops->hw_init(hw); if (!err) { rtl_init_rx_config(hw); /* Enable software */ SET_USB_START(rtlusb); /* should after adapter start and interrupt enable. */ set_hal_start(rtlhal); /* Start bulk IN */ _rtl_usb_receive(hw); } return err; } /** * * */ /*======================= tx =========================================*/ static void rtl_usb_cleanup(struct ieee80211_hw *hw) { u32 i; struct sk_buff *_skb; struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); struct ieee80211_tx_info *txinfo; SET_USB_STOP(rtlusb); /* clean up rx stuff. */ usb_kill_anchored_urbs(&rtlusb->rx_submitted); /* clean up tx stuff */ for (i = 0; i < RTL_USB_MAX_EP_NUM; i++) { while ((_skb = skb_dequeue(&rtlusb->tx_skb_queue[i]))) { rtlusb->usb_tx_cleanup(hw, _skb); txinfo = IEEE80211_SKB_CB(_skb); ieee80211_tx_info_clear_status(txinfo); txinfo->flags |= IEEE80211_TX_STAT_ACK; ieee80211_tx_status_irqsafe(hw, _skb); } usb_kill_anchored_urbs(&rtlusb->tx_pending[i]); } usb_kill_anchored_urbs(&rtlusb->tx_submitted); }
/** * iwlagn_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA * * Handles block-acknowledge notification from device, which reports success * of frames sent via aggregation. */ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb, struct iwl_device_cmd *cmd) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba; struct iwl_ht_agg *agg; struct sk_buff_head reclaimed_skbs; struct ieee80211_tx_info *info; struct ieee80211_hdr *hdr; struct sk_buff *skb; unsigned long flags; int sta_id; int tid; int freed; /* "flow" corresponds to Tx queue */ u16 scd_flow = le16_to_cpu(ba_resp->scd_flow); /* "ssn" is start of block-ack Tx window, corresponds to index * (in Tx queue's circular buffer) of first TFD/frame in window */ u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn); if (scd_flow >= hw_params(priv).max_txq_num) { IWL_ERR(priv, "BUG_ON scd_flow is bigger than number of queues\n"); return 0; } sta_id = ba_resp->sta_id; tid = ba_resp->tid; agg = &priv->shrd->tid_data[sta_id][tid].agg; spin_lock_irqsave(&priv->shrd->sta_lock, flags); if (unlikely(agg->txq_id != scd_flow)) { /* * FIXME: this is a uCode bug which need to be addressed, * log the information and return for now! * since it is possible happen very often and in order * not to fill the syslog, don't enable the logging by default */ IWL_DEBUG_TX_REPLY(priv, "BA scd_flow %d does not match txq_id %d\n", scd_flow, agg->txq_id); spin_unlock_irqrestore(&priv->shrd->sta_lock, flags); return 0; } if (unlikely(!agg->wait_for_ba)) { if (unlikely(ba_resp->bitmap)) IWL_ERR(priv, "Received BA when not expected\n"); spin_unlock_irqrestore(&priv->shrd->sta_lock, flags); return 0; } IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, " "sta_id = %d\n", agg->wait_for_ba, (u8 *) &ba_resp->sta_addr_lo32, ba_resp->sta_id); IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, " "scd_flow = %d, scd_ssn = %d\n", ba_resp->tid, ba_resp->seq_ctl, (unsigned long long)le64_to_cpu(ba_resp->bitmap), scd_flow, ba_resp_scd_ssn); /* Mark that the expected block-ack response arrived */ agg->wait_for_ba = false; /* Sanity check values reported by uCode */ if (ba_resp->txed_2_done > ba_resp->txed) { IWL_DEBUG_TX_REPLY(priv, "bogus sent(%d) and ack(%d) count\n", ba_resp->txed, ba_resp->txed_2_done); /* * set txed_2_done = txed, * so it won't impact rate scale */ ba_resp->txed = ba_resp->txed_2_done; } IWL_DEBUG_HT(priv, "agg frames sent:%d, acked:%d\n", ba_resp->txed, ba_resp->txed_2_done); __skb_queue_head_init(&reclaimed_skbs); /* Release all TFDs before the SSN, i.e. all TFDs in front of * block-ack window (we assume that they've been successfully * transmitted ... if not, it's too late anyway). */ iwl_trans_reclaim(trans(priv), sta_id, tid, scd_flow, ba_resp_scd_ssn, 0, &reclaimed_skbs); freed = 0; while (!skb_queue_empty(&reclaimed_skbs)) { skb = __skb_dequeue(&reclaimed_skbs); hdr = (struct ieee80211_hdr *)skb->data; if (ieee80211_is_data_qos(hdr->frame_control)) freed++; else WARN_ON_ONCE(1); info = IEEE80211_SKB_CB(skb); kmem_cache_free(priv->tx_cmd_pool, (info->driver_data[1])); if (freed == 1) { /* this is the first skb we deliver in this batch */ /* put the rate scaling data there */ info = IEEE80211_SKB_CB(skb); memset(&info->status, 0, sizeof(info->status)); info->flags |= IEEE80211_TX_STAT_ACK; info->flags |= IEEE80211_TX_STAT_AMPDU; info->status.ampdu_ack_len = ba_resp->txed_2_done; info->status.ampdu_len = ba_resp->txed; iwlagn_hwrate_to_tx_control(priv, agg->rate_n_flags, info); } ieee80211_tx_status_irqsafe(priv->hw, skb); } spin_unlock_irqrestore(&priv->shrd->sta_lock, flags); return 0; }
void rt2x00lib_txdone(struct queue_entry *entry, struct txdone_entry_desc *txdesc) { struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb); struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); enum data_queue_qid qid = skb_get_queue_mapping(entry->skb); unsigned int header_length = ieee80211_get_hdrlen_from_skb(entry->skb); u8 rate_idx, rate_flags, retry_rates; u8 skbdesc_flags = skbdesc->flags; unsigned int i; bool success; /* * Unmap the skb. */ rt2x00queue_unmap_skb(rt2x00dev, entry->skb); /* * Remove L2 padding which was added during */ if (test_bit(DRIVER_REQUIRE_L2PAD, &rt2x00dev->flags)) rt2x00queue_remove_l2pad(entry->skb, header_length); /* * If the IV/EIV data was stripped from the frame before it was * passed to the hardware, we should now reinsert it again because * mac80211 will expect the the same data to be present it the * frame as it was passed to us. */ if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) rt2x00crypto_tx_insert_iv(entry->skb, header_length); /* * Send frame to debugfs immediately, after this call is completed * we are going to overwrite the skb->cb array. */ rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_TXDONE, entry->skb); /* * Determine if the frame has been successfully transmitted. */ success = test_bit(TXDONE_SUCCESS, &txdesc->flags) || test_bit(TXDONE_UNKNOWN, &txdesc->flags) || test_bit(TXDONE_FALLBACK, &txdesc->flags); /* * Update TX statistics. */ rt2x00dev->link.qual.tx_success += success; rt2x00dev->link.qual.tx_failed += !success; rate_idx = skbdesc->tx_rate_idx; rate_flags = skbdesc->tx_rate_flags; retry_rates = test_bit(TXDONE_FALLBACK, &txdesc->flags) ? (txdesc->retry + 1) : 1; /* * Initialize TX status */ memset(&tx_info->status, 0, sizeof(tx_info->status)); tx_info->status.ack_signal = 0; /* * Frame was send with retries, hardware tried * different rates to send out the frame, at each * retry it lowered the rate 1 step. */ for (i = 0; i < retry_rates && i < IEEE80211_TX_MAX_RATES; i++) { tx_info->status.rates[i].idx = rate_idx - i; tx_info->status.rates[i].flags = rate_flags; tx_info->status.rates[i].count = 1; } if (i < (IEEE80211_TX_MAX_RATES - 1)) tx_info->status.rates[i].idx = -1; /* terminate */ if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK)) { if (success) tx_info->flags |= IEEE80211_TX_STAT_ACK; else rt2x00dev->low_level_stats.dot11ACKFailureCount++; } if (rate_flags & IEEE80211_TX_RC_USE_RTS_CTS) { if (success) rt2x00dev->low_level_stats.dot11RTSSuccessCount++; else rt2x00dev->low_level_stats.dot11RTSFailureCount++; } /* * Only send the status report to mac80211 when it's a frame * that originated in mac80211. If this was a extra frame coming * through a mac80211 library call (RTS/CTS) then we should not * send the status report back. */ if (!(skbdesc_flags & SKBDESC_NOT_MAC80211)) ieee80211_tx_status_irqsafe(rt2x00dev->hw, entry->skb); else dev_kfree_skb_irq(entry->skb); /* * Make this entry available for reuse. */ entry->skb = NULL; entry->flags = 0; rt2x00dev->ops->lib->clear_entry(entry); clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags); rt2x00queue_index_inc(entry->queue, Q_INDEX_DONE); /* * If the data queue was below the threshold before the txdone * handler we must make sure the packet queue in the mac80211 stack * is reenabled when the txdone handler has finished. */ if (!rt2x00queue_threshold(entry->queue)) ieee80211_wake_queue(rt2x00dev->hw, qid); }
void rt2x00lib_txdone(struct queue_entry *entry, struct txdone_entry_desc *txdesc) { struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb); enum data_queue_qid qid = skb_get_queue_mapping(entry->skb); /* * Unmap the skb. */ rt2x00queue_unmap_skb(rt2x00dev, entry->skb); /* * Send frame to debugfs immediately, after this call is completed * we are going to overwrite the skb->cb array. */ rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_TXDONE, entry->skb); /* * Update TX statistics. */ rt2x00dev->link.qual.tx_success += test_bit(TXDONE_SUCCESS, &txdesc->flags); rt2x00dev->link.qual.tx_failed += test_bit(TXDONE_FAILURE, &txdesc->flags); /* * Initialize TX status */ memset(&tx_info->status, 0, sizeof(tx_info->status)); tx_info->status.ack_signal = 0; tx_info->status.excessive_retries = test_bit(TXDONE_EXCESSIVE_RETRY, &txdesc->flags); tx_info->status.retry_count = txdesc->retry; if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK)) { if (test_bit(TXDONE_SUCCESS, &txdesc->flags)) tx_info->flags |= IEEE80211_TX_STAT_ACK; else if (test_bit(TXDONE_FAILURE, &txdesc->flags)) rt2x00dev->low_level_stats.dot11ACKFailureCount++; } if (tx_info->flags & IEEE80211_TX_CTL_USE_RTS_CTS) { if (test_bit(TXDONE_SUCCESS, &txdesc->flags)) rt2x00dev->low_level_stats.dot11RTSSuccessCount++; else if (test_bit(TXDONE_FAILURE, &txdesc->flags)) rt2x00dev->low_level_stats.dot11RTSFailureCount++; } /* * Only send the status report to mac80211 when TX status was * requested by it. If this was a extra frame coming through * a mac80211 library call (RTS/CTS) then we should not send the * status report back. */ if (tx_info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) ieee80211_tx_status_irqsafe(rt2x00dev->hw, entry->skb); else dev_kfree_skb_irq(entry->skb); /* * Make this entry available for reuse. */ entry->skb = NULL; entry->flags = 0; rt2x00dev->ops->lib->init_txentry(rt2x00dev, entry); __clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags); rt2x00queue_index_inc(entry->queue, Q_INDEX_DONE); /* * If the data queue was below the threshold before the txdone * handler we must make sure the packet queue in the mac80211 stack * is reenabled when the txdone handler has finished. */ if (!rt2x00queue_threshold(entry->queue)) ieee80211_wake_queue(rt2x00dev->hw, qid); }
int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb, struct iwl_device_cmd *cmd) { struct iwl_rx_packet *pkt = rxb_addr(rxb); u16 sequence = le16_to_cpu(pkt->hdr.sequence); int txq_id = SEQ_TO_QUEUE(sequence); int cmd_index __maybe_unused = SEQ_TO_INDEX(sequence); struct iwlagn_tx_resp *tx_resp = (void *)&pkt->u.raw[0]; struct ieee80211_hdr *hdr; u32 status = le16_to_cpu(tx_resp->status.status); u16 ssn = iwlagn_get_scd_ssn(tx_resp); int tid; int sta_id; int freed; struct ieee80211_tx_info *info; unsigned long flags; struct sk_buff_head skbs; struct sk_buff *skb; struct iwl_rxon_context *ctx; bool is_agg = (txq_id >= IWLAGN_FIRST_AMPDU_QUEUE); tid = (tx_resp->ra_tid & IWLAGN_TX_RES_TID_MSK) >> IWLAGN_TX_RES_TID_POS; sta_id = (tx_resp->ra_tid & IWLAGN_TX_RES_RA_MSK) >> IWLAGN_TX_RES_RA_POS; spin_lock_irqsave(&priv->shrd->sta_lock, flags); if (is_agg) iwl_rx_reply_tx_agg(priv, tx_resp); if (tx_resp->frame_count == 1) { u16 next_reclaimed = le16_to_cpu(tx_resp->seq_ctl); next_reclaimed = SEQ_TO_SN(next_reclaimed + 0x10); if (is_agg) { /* If this is an aggregation queue, we can rely on the * ssn since the wifi sequence number corresponds to * the index in the TFD ring (%256). * The seq_ctl is the sequence control of the packet * to which this Tx response relates. But if there is a * hole in the bitmap of the BA we received, this Tx * response may allow to reclaim the hole and all the * subsequent packets that were already acked. * In that case, seq_ctl != ssn, and the next packet * to be reclaimed will be ssn and not seq_ctl. */ next_reclaimed = ssn; } __skb_queue_head_init(&skbs); priv->tid_data[sta_id][tid].next_reclaimed = next_reclaimed; IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d", next_reclaimed); /*we can free until ssn % q.n_bd not inclusive */ WARN_ON(iwl_trans_reclaim(trans(priv), sta_id, tid, txq_id, ssn, status, &skbs)); iwlagn_check_ratid_empty(priv, sta_id, tid); freed = 0; while (!skb_queue_empty(&skbs)) { skb = __skb_dequeue(&skbs); hdr = (struct ieee80211_hdr *)skb->data; if (!ieee80211_is_data_qos(hdr->frame_control)) priv->last_seq_ctl = tx_resp->seq_ctl; info = IEEE80211_SKB_CB(skb); ctx = info->driver_data[0]; kmem_cache_free(priv->tx_cmd_pool, (info->driver_data[1])); memset(&info->status, 0, sizeof(info->status)); if (status == TX_STATUS_FAIL_PASSIVE_NO_RX && iwl_is_associated_ctx(ctx) && ctx->vif && ctx->vif->type == NL80211_IFTYPE_STATION) { ctx->last_tx_rejected = true; iwl_trans_stop_queue(trans(priv), txq_id, "Tx on passive channel"); IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x) " "rate_n_flags 0x%x retries %d\n", txq_id, iwl_get_tx_fail_reason(status), status, le32_to_cpu(tx_resp->rate_n_flags), tx_resp->failure_frame); IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, idx=%d\n", tx_resp->frame_count, cmd_index); } /* check if BAR is needed */ if (is_agg && !iwl_is_tx_success(status)) info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; iwlagn_set_tx_status(priv, IEEE80211_SKB_CB(skb), tx_resp, is_agg); if (!is_agg) iwlagn_non_agg_tx_status(priv, ctx, hdr->addr1); ieee80211_tx_status_irqsafe(priv->hw, skb); freed++; } WARN_ON(!is_agg && freed != 1); } iwl_check_abort_status(priv, tx_resp->frame_count, status); spin_unlock_irqrestore(&priv->shrd->sta_lock, flags); return 0; }