static void ath10k_htt_tx_cleanup_pending(struct ath10k_htt *htt) { struct sk_buff *txdesc; int msdu_id; /* No locks needed. Called after communication with the device has * been stopped. */ for (msdu_id = 0; msdu_id < htt->max_num_pending_tx; msdu_id++) { if (!test_bit(msdu_id, htt->used_msdu_ids)) continue; txdesc = htt->pending_tx[msdu_id]; if (!txdesc) continue; ath10k_dbg(ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n", msdu_id); if (ATH10K_SKB_CB(txdesc)->htt.refcount > 0) ATH10K_SKB_CB(txdesc)->htt.refcount = 1; ATH10K_SKB_CB(txdesc)->htt.discard = true; ath10k_txrx_tx_unref(htt, txdesc); } }
int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt) { struct sk_buff *skb; struct htt_cmd *cmd; int len = 0; int ret; len += sizeof(cmd->hdr); len += sizeof(cmd->ver_req); skb = ath10k_htc_alloc_skb(len); if (!skb) return -ENOMEM; skb_put(skb, len); cmd = (struct htt_cmd *)skb->data; cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_VERSION_REQ; ATH10K_SKB_CB(skb)->htt.is_conf = true; ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); if (ret) { dev_kfree_skb_any(skb); return ret; } return 0; }
static inline void ath10k_htc_restore_tx_skb(struct ath10k_htc *htc, struct sk_buff *skb) { struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb); dma_unmap_single(htc->ar->dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE); skb_pull(skb, sizeof(struct ath10k_htc_hdr)); }
static u8 ath10k_htt_tx_get_tid(struct sk_buff *skb, bool is_eth) { struct ieee80211_hdr *hdr = (void *)skb->data; struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb); if (!is_eth && ieee80211_is_mgmt(hdr->frame_control)) return HTT_DATA_TX_EXT_TID_MGMT; else if (cb->flags & ATH10K_SKB_F_QOS) return skb->priority % IEEE80211_QOS_CTL_TID_MASK; else return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST; }
static u8 ath10k_htt_tx_get_vdev_id(struct ath10k *ar, struct sk_buff *skb) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb); struct ath10k_vif *arvif = (void *)cb->vif->drv_priv; if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) return ar->scan.vdev_id; else if (cb->vif) return arvif->vdev_id; else if (ar->monitor_started) return ar->monitor_vdev_id; else return 0; }
void ath10k_htc_tx_completion_handler(struct ath10k *ar, struct sk_buff *skb) { struct ath10k_htc *htc = &ar->htc; struct ath10k_skb_cb *skb_cb; struct ath10k_htc_ep *ep; if (WARN_ON_ONCE(!skb)) return; skb_cb = ATH10K_SKB_CB(skb); ep = &htc->endpoint[skb_cb->eid]; ath10k_htc_notify_tx_completion(ep, skb); /* the skb now belongs to the completion handler */ }
static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num) { struct htt_rx_desc *rx_desc; struct sk_buff *skb; dma_addr_t paddr; int ret = 0, idx; idx = __le32_to_cpu(*(htt->rx_ring.alloc_idx.vaddr)); while (num > 0) { skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN); if (!skb) { ret = -ENOMEM; goto fail; } if (!IS_ALIGNED((unsigned long)skb->data, HTT_RX_DESC_ALIGN)) skb_pull(skb, PTR_ALIGN(skb->data, HTT_RX_DESC_ALIGN) - skb->data); /* Clear rx_desc attention word before posting to Rx ring */ rx_desc = (struct htt_rx_desc *)skb->data; rx_desc->attention.flags = __cpu_to_le32(0); paddr = dma_map_single(htt->ar->dev, skb->data, skb->len + skb_tailroom(skb), DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(htt->ar->dev, paddr))) { dev_kfree_skb_any(skb); ret = -ENOMEM; goto fail; } ATH10K_SKB_CB(skb)->paddr = paddr; htt->rx_ring.netbufs_ring[idx] = skb; htt->rx_ring.paddrs_ring[idx] = __cpu_to_le32(paddr); htt->rx_ring.fill_cnt++; num--; idx++; idx &= htt->rx_ring.size_mask; } fail: *(htt->rx_ring.alloc_idx.vaddr) = __cpu_to_le32(idx); return ret; }
static void ath10k_htt_rx_ring_clean_up(struct ath10k_htt *htt) { struct sk_buff *skb; int i; for (i = 0; i < htt->rx_ring.size; i++) { skb = htt->rx_ring.netbufs_ring[i]; if (!skb) continue; dma_unmap_single(htt->ar->dev, ATH10K_SKB_CB(skb)->paddr, skb->len + skb_tailroom(skb), DMA_FROM_DEVICE); dev_kfree_skb_any(skb); htt->rx_ring.netbufs_ring[i] = NULL; } }
static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt) { struct sk_buff *skb; struct ath10k_skb_cb *cb; int i; for (i = 0; i < htt->rx_ring.fill_cnt; i++) { skb = htt->rx_ring.netbufs_ring[i]; cb = ATH10K_SKB_CB(skb); dma_unmap_single(htt->ar->dev, cb->paddr, skb->len + skb_tailroom(skb), DMA_FROM_DEVICE); dev_kfree_skb_any(skb); } htt->rx_ring.fill_cnt = 0; }
static struct sk_buff *ath10k_htc_build_tx_ctrl_skb(void *ar) { struct sk_buff *skb; struct ath10k_skb_cb *skb_cb; skb = dev_alloc_skb(ATH10K_HTC_CONTROL_BUFFER_SIZE); if (!skb) return NULL; skb_reserve(skb, 20); /* FIXME: why 20 bytes? */ WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb"); skb_cb = ATH10K_SKB_CB(skb); memset(skb_cb, 0, sizeof(*skb_cb)); ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: skb %pK\n", __func__, skb); return skb; }
void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb) { struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb); struct ath10k_htt *htt = &ar->htt; if (skb_cb->htt.is_conf) { dev_kfree_skb_any(skb); return; } if (skb_cb->is_aborted) { skb_cb->htt.discard = true; /* if the skbuff is aborted we need to make sure we'll free up * the tx resources, we can't simply run tx_unref() 2 times * because if htt tx completion came in earlier we'd access * unallocated memory */ if (skb_cb->htt.refcount > 1) skb_cb->htt.refcount = 1; } ath10k_txrx_tx_unref(htt, skb); }
void athp_freebuf(struct ath10k *ar, struct athp_buf_ring *br, struct athp_buf *bf) { struct ath10k_skb_cb *cb = ATH10K_SKB_CB(bf); /* Complain if the buffer has a noderef left */ if (cb->ni != NULL) { ath10k_err(ar, "%s: TODO: pbuf=%p, mbuf=%p, ni is not null (%p) !\n", __func__, bf, bf->m, cb->ni); } ATHP_BUF_LOCK(ar); if (br->btype != bf->btype) { ath10k_err(ar, "%s: ERROR: bf=%p, bf btype=%d, ring btype=%d\n", __func__, bf, bf->btype, br->btype); } ath10k_dbg(ar, ATH10K_DBG_PBUF, "%s: br=%d, m=%p, bf=%p, paddr=0x%lx\n", __func__, br->btype, bf->m, bf, bf->mb.paddr); /* if there's an mbuf - unmap (if needed) and free it */ if (bf->m != NULL) _athp_free_buf(ar, br, bf); /* Push it into the inactive queue */ TAILQ_INSERT_TAIL(&br->br_inactive, bf, next); ATHP_BUF_UNLOCK(ar); }
int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) { struct device *dev = htt->ar->dev; struct ath10k_skb_cb *skb_cb; struct sk_buff *txdesc = NULL; struct htt_cmd *cmd; u8 vdev_id = ATH10K_SKB_CB(msdu)->htt.vdev_id; int len = 0; int msdu_id = -1; int res; res = ath10k_htt_tx_inc_pending(htt); if (res) return res; len += sizeof(cmd->hdr); len += sizeof(cmd->mgmt_tx); txdesc = ath10k_htc_alloc_skb(len); if (!txdesc) { res = -ENOMEM; goto err; } spin_lock_bh(&htt->tx_lock); msdu_id = ath10k_htt_tx_alloc_msdu_id(htt); if (msdu_id < 0) { spin_unlock_bh(&htt->tx_lock); res = msdu_id; goto err; } htt->pending_tx[msdu_id] = txdesc; spin_unlock_bh(&htt->tx_lock); res = ath10k_skb_map(dev, msdu); if (res) goto err; skb_put(txdesc, len); cmd = (struct htt_cmd *)txdesc->data; cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_MGMT_TX; cmd->mgmt_tx.msdu_paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr); cmd->mgmt_tx.len = __cpu_to_le32(msdu->len); cmd->mgmt_tx.desc_id = __cpu_to_le32(msdu_id); cmd->mgmt_tx.vdev_id = __cpu_to_le32(vdev_id); memcpy(cmd->mgmt_tx.hdr, msdu->data, min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN)); /* refcount is decremented by HTC and HTT completions until it reaches * zero and is freed */ skb_cb = ATH10K_SKB_CB(txdesc); skb_cb->htt.msdu_id = msdu_id; skb_cb->htt.refcount = 2; skb_cb->htt.msdu = msdu; res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc); if (res) goto err; return 0; err: ath10k_skb_unmap(dev, msdu); if (txdesc) dev_kfree_skb_any(txdesc); if (msdu_id >= 0) { spin_lock_bh(&htt->tx_lock); htt->pending_tx[msdu_id] = NULL; ath10k_htt_tx_free_msdu_id(htt, msdu_id); spin_unlock_bh(&htt->tx_lock); } ath10k_htt_tx_dec_pending(htt); return res; }
int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) { struct device *dev = htt->ar->dev; struct htt_cmd *cmd; struct htt_data_tx_desc_frag *tx_frags; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data; struct ath10k_skb_cb *skb_cb; struct sk_buff *txdesc = NULL; struct sk_buff *txfrag = NULL; u8 vdev_id = ATH10K_SKB_CB(msdu)->htt.vdev_id; u8 tid; int prefetch_len, desc_len, frag_len; dma_addr_t frags_paddr; int msdu_id = -1; int res; u8 flags0; u16 flags1; res = ath10k_htt_tx_inc_pending(htt); if (res) return res; prefetch_len = min(htt->prefetch_len, msdu->len); prefetch_len = roundup(prefetch_len, 4); desc_len = sizeof(cmd->hdr) + sizeof(cmd->data_tx) + prefetch_len; frag_len = sizeof(*tx_frags) * 2; txdesc = ath10k_htc_alloc_skb(desc_len); if (!txdesc) { res = -ENOMEM; goto err; } txfrag = dev_alloc_skb(frag_len); if (!txfrag) { res = -ENOMEM; goto err; } if (!IS_ALIGNED((unsigned long)txdesc->data, 4)) { ath10k_warn("htt alignment check failed. dropping packet.\n"); res = -EIO; goto err; } spin_lock_bh(&htt->tx_lock); msdu_id = ath10k_htt_tx_alloc_msdu_id(htt); if (msdu_id < 0) { spin_unlock_bh(&htt->tx_lock); res = msdu_id; goto err; } htt->pending_tx[msdu_id] = txdesc; spin_unlock_bh(&htt->tx_lock); res = ath10k_skb_map(dev, msdu); if (res) goto err; /* tx fragment list must be terminated with zero-entry */ skb_put(txfrag, frag_len); tx_frags = (struct htt_data_tx_desc_frag *)txfrag->data; tx_frags[0].paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr); tx_frags[0].len = __cpu_to_le32(msdu->len); tx_frags[1].paddr = __cpu_to_le32(0); tx_frags[1].len = __cpu_to_le32(0); res = ath10k_skb_map(dev, txfrag); if (res) goto err; ath10k_dbg(ATH10K_DBG_HTT, "txfrag 0x%llx msdu 0x%llx\n", (unsigned long long) ATH10K_SKB_CB(txfrag)->paddr, (unsigned long long) ATH10K_SKB_CB(msdu)->paddr); ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "txfrag: ", txfrag->data, frag_len); ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "msdu: ", msdu->data, msdu->len); skb_put(txdesc, desc_len); cmd = (struct htt_cmd *)txdesc->data; memset(cmd, 0, desc_len); tid = ATH10K_SKB_CB(msdu)->htt.tid; ath10k_dbg(ATH10K_DBG_HTT, "htt data tx using tid %hhu\n", tid); flags0 = 0; if (!ieee80211_has_protected(hdr->frame_control)) flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT; flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; flags0 |= SM(ATH10K_HW_TXRX_NATIVE_WIFI, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); flags1 = 0; flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID); flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID); frags_paddr = ATH10K_SKB_CB(txfrag)->paddr; cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM; cmd->data_tx.flags0 = flags0; cmd->data_tx.flags1 = __cpu_to_le16(flags1); cmd->data_tx.len = __cpu_to_le16(msdu->len); cmd->data_tx.id = __cpu_to_le16(msdu_id); cmd->data_tx.frags_paddr = __cpu_to_le32(frags_paddr); cmd->data_tx.peerid = __cpu_to_le32(HTT_INVALID_PEERID); memcpy(cmd->data_tx.prefetch, msdu->data, prefetch_len); /* refcount is decremented by HTC and HTT completions until it reaches * zero and is freed */ skb_cb = ATH10K_SKB_CB(txdesc); skb_cb->htt.msdu_id = msdu_id; skb_cb->htt.refcount = 2; skb_cb->htt.txfrag = txfrag; skb_cb->htt.msdu = msdu; res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc); if (res) goto err; return 0; err: if (txfrag) ath10k_skb_unmap(dev, txfrag); if (txdesc) dev_kfree_skb_any(txdesc); if (txfrag) dev_kfree_skb_any(txfrag); if (msdu_id >= 0) { spin_lock_bh(&htt->tx_lock); htt->pending_tx[msdu_id] = NULL; ath10k_htt_tx_free_msdu_id(htt, msdu_id); spin_unlock_bh(&htt->tx_lock); } ath10k_htt_tx_dec_pending(htt); ath10k_skb_unmap(dev, msdu); return res; }
int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) { struct ath10k *ar = htt->ar; struct device *dev = ar->dev; struct sk_buff *txdesc = NULL; struct htt_cmd *cmd; struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu); u8 vdev_id = skb_cb->vdev_id; int len = 0; int msdu_id = -1; int res; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data; bool limit_mgmt_desc = false; bool is_probe_resp = false; if (ar->hw_params.max_probe_resp_desc_thres) { limit_mgmt_desc = true; if (ieee80211_is_probe_resp(hdr->frame_control)) is_probe_resp = true; } res = ath10k_htt_tx_inc_pending(htt, limit_mgmt_desc, is_probe_resp); if (res) goto err; len += sizeof(cmd->hdr); len += sizeof(cmd->mgmt_tx); spin_lock_bh(&htt->tx_lock); res = ath10k_htt_tx_alloc_msdu_id(htt, msdu); spin_unlock_bh(&htt->tx_lock); if (res < 0) { goto err_tx_dec; } msdu_id = res; txdesc = ath10k_htc_alloc_skb(ar, len); if (!txdesc) { res = -ENOMEM; goto err_free_msdu_id; } skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len, DMA_TO_DEVICE); res = dma_mapping_error(dev, skb_cb->paddr); if (res) { res = -EIO; goto err_free_txdesc; } skb_put(txdesc, len); cmd = (struct htt_cmd *)txdesc->data; memset(cmd, 0, len); cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_MGMT_TX; cmd->mgmt_tx.msdu_paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr); cmd->mgmt_tx.len = __cpu_to_le32(msdu->len); cmd->mgmt_tx.desc_id = __cpu_to_le32(msdu_id); cmd->mgmt_tx.vdev_id = __cpu_to_le32(vdev_id); memcpy(cmd->mgmt_tx.hdr, msdu->data, min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN)); skb_cb->htt.txbuf = NULL; res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc); if (res) goto err_unmap_msdu; return 0; err_unmap_msdu: dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); err_free_txdesc: dev_kfree_skb_any(txdesc); err_free_msdu_id: spin_lock_bh(&htt->tx_lock); ath10k_htt_tx_free_msdu_id(htt, msdu_id); spin_unlock_bh(&htt->tx_lock); err_tx_dec: ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc); err: return res; }
int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt) { struct sk_buff *skb; struct htt_cmd *cmd; struct htt_rx_ring_setup_ring *ring; const int num_rx_ring = 1; u16 flags; u32 fw_idx; int len; int ret; /* * the HW expects the buffer to be an integral number of 4-byte * "words" */ BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4)); BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0); len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup.hdr) + (sizeof(*ring) * num_rx_ring); skb = ath10k_htc_alloc_skb(len); if (!skb) return -ENOMEM; skb_put(skb, len); cmd = (struct htt_cmd *)skb->data; ring = &cmd->rx_setup.rings[0]; cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG; cmd->rx_setup.hdr.num_rings = 1; /* FIXME: do we need all of this? */ flags = 0; flags |= HTT_RX_RING_FLAGS_MAC80211_HDR; flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD; flags |= HTT_RX_RING_FLAGS_PPDU_START; flags |= HTT_RX_RING_FLAGS_PPDU_END; flags |= HTT_RX_RING_FLAGS_MPDU_START; flags |= HTT_RX_RING_FLAGS_MPDU_END; flags |= HTT_RX_RING_FLAGS_MSDU_START; flags |= HTT_RX_RING_FLAGS_MSDU_END; flags |= HTT_RX_RING_FLAGS_RX_ATTENTION; flags |= HTT_RX_RING_FLAGS_FRAG_INFO; flags |= HTT_RX_RING_FLAGS_UNICAST_RX; flags |= HTT_RX_RING_FLAGS_MULTICAST_RX; flags |= HTT_RX_RING_FLAGS_CTRL_RX; flags |= HTT_RX_RING_FLAGS_MGMT_RX; flags |= HTT_RX_RING_FLAGS_NULL_RX; flags |= HTT_RX_RING_FLAGS_PHY_DATA_RX; fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr); ring->fw_idx_shadow_reg_paddr = __cpu_to_le32(htt->rx_ring.alloc_idx.paddr); ring->rx_ring_base_paddr = __cpu_to_le32(htt->rx_ring.base_paddr); ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size); ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE); ring->flags = __cpu_to_le16(flags); ring->fw_idx_init_val = __cpu_to_le16(fw_idx); #define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4) ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status)); ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload)); ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start)); ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end)); ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start)); ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end)); ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start)); ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end)); ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention)); ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info)); #undef desc_offset ATH10K_SKB_CB(skb)->htt.is_conf = true; ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); if (ret) { dev_kfree_skb_any(skb); return ret; } return 0; }
int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct athp_buf *msdu) { #if 0 struct ath10k *ar = htt->ar; // struct device *dev = ar->sc_dev; struct athp_buf *txdesc = NULL; struct htt_cmd *cmd; struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu); u8 vdev_id = skb_cb->vdev_id; int len = 0; int msdu_id = -1; int res; res = ath10k_htt_tx_inc_pending(htt); if (res) goto err; len += sizeof(cmd->hdr); len += sizeof(cmd->mgmt_tx); ATHP_HTT_TX_LOCK(htt); /* XXX note: we're specifically trying to store athp_buf's in the idr */ res = ath10k_htt_tx_alloc_msdu_id(htt, msdu); ATHP_HTT_TX_UNLOCK(htt); if (res < 0) { goto err_tx_dec; } msdu_id = res; txdesc = ath10k_htc_alloc_skb(ar, len); if (!txdesc) { res = -ENOMEM; goto err_free_msdu_id; } /* * load/sync happens here for the msdu contents. * Then, the command that's allocated below will get * load/sync in the HTC layer. */ /* XXX TODO: ADRIAN: figure out what I'm missing! */ res = athp_dma_mbuf_load(ar, &ar->buf_tx.dh, &msdu->mb, msdu->m); if (res) { res = -EIO; goto err_free_txdesc; } /* Ok, we're not modifying the msdu further, so sync here */ athp_dma_mbuf_pre_xmit(ar, &ar->buf_tx.dh, &msdu->mb); mbuf_skb_put(txdesc->m, len); cmd = (struct htt_cmd *)mbuf_skb_data(txdesc->m); memset(cmd, 0, len); cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_MGMT_TX; cmd->mgmt_tx.msdu_paddr = __cpu_to_le32(msdu->mb.paddr); cmd->mgmt_tx.len = __cpu_to_le32(mbuf_skb_len(msdu->m)); cmd->mgmt_tx.desc_id = __cpu_to_le32(msdu_id); cmd->mgmt_tx.vdev_id = __cpu_to_le32(vdev_id); memcpy(cmd->mgmt_tx.hdr, mbuf_skb_data(msdu->m), min_t(int, mbuf_skb_len(msdu->m), HTT_MGMT_FRM_HDR_DOWNLOAD_LEN)); skb_cb->htt.txbuf = NULL; res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc); if (res) goto err_unmap_msdu; return 0; err_unmap_msdu: athp_dma_mbuf_unload(ar, &ar->buf_tx.dh, &msdu->mb); err_free_txdesc: athp_freebuf(ar, &ar->buf_tx, txdesc); err_free_msdu_id: ATHP_HTT_TX_LOCK(htt); ath10k_htt_tx_free_msdu_id(htt, msdu_id); ATHP_HTT_TX_UNLOCK(htt); err_tx_dec: ath10k_htt_tx_dec_pending(htt); err: return res; #else device_printf(htt->ar->sc_dev, "%s; TODO implement!\n", __func__); return (-EINVAL); #endif }
/* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt, u8 **fw_desc, int *fw_desc_len, struct sk_buff **head_msdu, struct sk_buff **tail_msdu, u32 *attention) { int msdu_len, msdu_chaining = 0; struct sk_buff *msdu; struct htt_rx_desc *rx_desc; lockdep_assert_held(&htt->rx_ring.lock); if (htt->rx_confused) { ath10k_warn("htt is confused. refusing rx\n"); return -1; } msdu = *head_msdu = ath10k_htt_rx_netbuf_pop(htt); while (msdu) { int last_msdu, msdu_len_invalid, msdu_chained; dma_unmap_single(htt->ar->dev, ATH10K_SKB_CB(msdu)->paddr, msdu->len + skb_tailroom(msdu), DMA_FROM_DEVICE); ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx pop: ", msdu->data, msdu->len + skb_tailroom(msdu)); rx_desc = (struct htt_rx_desc *)msdu->data; /* FIXME: we must report msdu payload since this is what caller * expects now */ skb_put(msdu, offsetof(struct htt_rx_desc, msdu_payload)); skb_pull(msdu, offsetof(struct htt_rx_desc, msdu_payload)); /* * Sanity check - confirm the HW is finished filling in the * rx data. * If the HW and SW are working correctly, then it's guaranteed * that the HW's MAC DMA is done before this point in the SW. * To prevent the case that we handle a stale Rx descriptor, * just assert for now until we have a way to recover. */ if (!(__le32_to_cpu(rx_desc->attention.flags) & RX_ATTENTION_FLAGS_MSDU_DONE)) { ath10k_htt_rx_free_msdu_chain(*head_msdu); *head_msdu = NULL; msdu = NULL; ath10k_err("htt rx stopped. cannot recover\n"); htt->rx_confused = true; break; } *attention |= __le32_to_cpu(rx_desc->attention.flags) & (RX_ATTENTION_FLAGS_TKIP_MIC_ERR | RX_ATTENTION_FLAGS_DECRYPT_ERR | RX_ATTENTION_FLAGS_FCS_ERR | RX_ATTENTION_FLAGS_MGMT_TYPE); /* * Copy the FW rx descriptor for this MSDU from the rx * indication message into the MSDU's netbuf. HL uses the * same rx indication message definition as LL, and simply * appends new info (fields from the HW rx desc, and the * MSDU payload itself). So, the offset into the rx * indication message only has to account for the standard * offset of the per-MSDU FW rx desc info within the * message, and how many bytes of the per-MSDU FW rx desc * info have already been consumed. (And the endianness of * the host, since for a big-endian host, the rx ind * message contents, including the per-MSDU rx desc bytes, * were byteswapped during upload.) */ if (*fw_desc_len > 0) { rx_desc->fw_desc.info0 = **fw_desc; /* * The target is expected to only provide the basic * per-MSDU rx descriptors. Just to be sure, verify * that the target has not attached extension data * (e.g. LRO flow ID). */ /* or more, if there's extension data */ (*fw_desc)++; (*fw_desc_len)--; } else { /* * When an oversized AMSDU happened, FW will lost * some of MSDU status - in this case, the FW * descriptors provided will be less than the * actual MSDUs inside this MPDU. Mark the FW * descriptors so that it will still deliver to * upper stack, if no CRC error for this MPDU. * * FIX THIS - the FW descriptors are actually for * MSDUs in the end of this A-MSDU instead of the * beginning. */ rx_desc->fw_desc.info0 = 0; } msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags) & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR | RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR)); msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.info0), RX_MSDU_START_INFO0_MSDU_LENGTH); msdu_chained = rx_desc->frag_info.ring2_more_count; if (msdu_len_invalid) msdu_len = 0; skb_trim(msdu, 0); skb_put(msdu, min(msdu_len, HTT_RX_MSDU_SIZE)); msdu_len -= msdu->len; /* FIXME: Do chained buffers include htt_rx_desc or not? */ while (msdu_chained--) { struct sk_buff *next = ath10k_htt_rx_netbuf_pop(htt); dma_unmap_single(htt->ar->dev, ATH10K_SKB_CB(next)->paddr, next->len + skb_tailroom(next), DMA_FROM_DEVICE); ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx chained: ", next->data, next->len + skb_tailroom(next)); skb_trim(next, 0); skb_put(next, min(msdu_len, HTT_RX_BUF_SIZE)); msdu_len -= next->len; msdu->next = next; msdu = next; msdu_chaining = 1; } last_msdu = __le32_to_cpu(rx_desc->msdu_end.info0) & RX_MSDU_END_INFO0_LAST_MSDU; if (last_msdu) { msdu->next = NULL; break; } else { struct sk_buff *next = ath10k_htt_rx_netbuf_pop(htt); msdu->next = next; msdu = next; } } *tail_msdu = msdu; if (*head_msdu == NULL) msdu_chaining = -1; /* * Don't refill the ring yet. * * First, the elements popped here are still in use - it is not * safe to overwrite them until the matching call to * mpdu_desc_list_next. Second, for efficiency it is preferable to * refill the rx ring with 1 PPDU's worth of rx buffers (something * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers * (something like 3 buffers). Consequently, we'll rely on the txrx * SW to tell us when it is done pulling all the PPDU's rx buffers * out of the rx ring, and then refill it just once. */ return msdu_chaining; }
int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) { struct ath10k *ar = htt->ar; struct device *dev = ar->dev; struct sk_buff *txdesc = NULL; struct htt_cmd *cmd; struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu); u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu); int len = 0; int msdu_id = -1; int res; int skb_len; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data; len += sizeof(cmd->hdr); len += sizeof(cmd->mgmt_tx); spin_lock_bh(&htt->tx_lock); res = ath10k_htt_tx_alloc_msdu_id(htt, msdu); spin_unlock_bh(&htt->tx_lock); if (res < 0) goto err; msdu_id = res; if ((ieee80211_is_action(hdr->frame_control) || ieee80211_is_deauth(hdr->frame_control) || ieee80211_is_disassoc(hdr->frame_control)) && ieee80211_has_protected(hdr->frame_control)) { skb_put(msdu, IEEE80211_CCMP_MIC_LEN); } txdesc = ath10k_htc_alloc_skb(ar, len); if (!txdesc) { res = -ENOMEM; goto err_free_msdu_id; } skb_len = msdu->len; skb_cb->paddr = dma_map_single(dev, msdu->data, skb_len, DMA_TO_DEVICE); res = dma_mapping_error(dev, skb_cb->paddr); if (res) { res = -EIO; goto err_free_txdesc; } skb_put(txdesc, len); cmd = (struct htt_cmd *)txdesc->data; memset(cmd, 0, len); cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_MGMT_TX; cmd->mgmt_tx.msdu_paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr); cmd->mgmt_tx.len = __cpu_to_le32(skb_len); cmd->mgmt_tx.desc_id = __cpu_to_le32(msdu_id); cmd->mgmt_tx.vdev_id = __cpu_to_le32(vdev_id); memcpy(cmd->mgmt_tx.hdr, msdu->data, min_t(int, skb_len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN)); res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc); if (res) goto err_unmap_msdu; #ifdef CONFIG_ATH10K_DEBUGFS ar->debug.tx_bytes += skb_len; #endif return 0; err_unmap_msdu: dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); err_free_txdesc: dev_kfree_skb_any(txdesc); err_free_msdu_id: spin_lock_bh(&htt->tx_lock); ath10k_htt_tx_free_msdu_id(htt, msdu_id); spin_unlock_bh(&htt->tx_lock); err: return res; }
int ath10k_htt_tx(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode, struct sk_buff *msdu) { struct ath10k *ar = htt->ar; struct device *dev = ar->dev; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu); struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu); struct ath10k_hif_sg_item sg_items[2]; struct ath10k_htt_txbuf *txbuf; struct htt_data_tx_desc_frag *frags; bool is_eth = (txmode == ATH10K_HW_TXRX_ETHERNET); u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu); u8 tid = ath10k_htt_tx_get_tid(msdu, is_eth); int prefetch_len; int res; u8 flags0 = 0; u16 msdu_id, flags1 = 0; u16 freq = 0; int skb_len; u32 frags_paddr = 0; u32 txbuf_paddr; struct htt_msdu_ext_desc *ext_desc = NULL; spin_lock_bh(&htt->tx_lock); res = ath10k_htt_tx_alloc_msdu_id(htt, msdu); spin_unlock_bh(&htt->tx_lock); if (res < 0) goto err; msdu_id = res; prefetch_len = min(htt->prefetch_len, msdu->len); prefetch_len = roundup(prefetch_len, 4); txbuf = &htt->txbuf.vaddr[msdu_id]; txbuf_paddr = htt->txbuf.paddr + (sizeof(struct ath10k_htt_txbuf) * msdu_id); if ((ieee80211_is_action(hdr->frame_control) || ieee80211_is_deauth(hdr->frame_control) || ieee80211_is_disassoc(hdr->frame_control)) && ieee80211_has_protected(hdr->frame_control)) { skb_put(msdu, IEEE80211_CCMP_MIC_LEN); } else if (!(skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) && txmode == ATH10K_HW_TXRX_RAW && ieee80211_has_protected(hdr->frame_control)) { skb_put(msdu, IEEE80211_CCMP_MIC_LEN); } skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len, DMA_TO_DEVICE); res = dma_mapping_error(dev, skb_cb->paddr); if (res) { res = -EIO; goto err_free_msdu_id; } if (unlikely(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)) freq = ar->scan.roc_freq; switch (txmode) { case ATH10K_HW_TXRX_RAW: case ATH10K_HW_TXRX_NATIVE_WIFI: flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; /* pass through */ case ATH10K_HW_TXRX_ETHERNET: if (ar->hw_params.continuous_frag_desc) { memset(&htt->frag_desc.vaddr[msdu_id], 0, sizeof(struct htt_msdu_ext_desc)); frags = (struct htt_data_tx_desc_frag *) &htt->frag_desc.vaddr[msdu_id].frags; ext_desc = &htt->frag_desc.vaddr[msdu_id]; frags[0].tword_addr.paddr_lo = __cpu_to_le32(skb_cb->paddr); frags[0].tword_addr.paddr_hi = 0; frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len); frags_paddr = htt->frag_desc.paddr + (sizeof(struct htt_msdu_ext_desc) * msdu_id); } else { frags = txbuf->frags; frags[0].dword_addr.paddr = __cpu_to_le32(skb_cb->paddr); frags[0].dword_addr.len = __cpu_to_le32(msdu->len); frags[1].dword_addr.paddr = 0; frags[1].dword_addr.len = 0; frags_paddr = txbuf_paddr; } flags0 |= SM(txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); break; case ATH10K_HW_TXRX_MGMT: flags0 |= SM(ATH10K_HW_TXRX_MGMT, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; frags_paddr = skb_cb->paddr; break; } /* Normally all commands go through HTC which manages tx credits for * each endpoint and notifies when tx is completed. * * HTT endpoint is creditless so there's no need to care about HTC * flags. In that case it is trivial to fill the HTC header here. * * MSDU transmission is considered completed upon HTT event. This * implies no relevant resources can be freed until after the event is * received. That's why HTC tx completion handler itself is ignored by * setting NULL to transfer_context for all sg items. * * There is simply no point in pushing HTT TX_FRM through HTC tx path * as it's a waste of resources. By bypassing HTC it is possible to * avoid extra memory allocations, compress data structures and thus * improve performance. */ txbuf->htc_hdr.eid = htt->eid; txbuf->htc_hdr.len = __cpu_to_le16(sizeof(txbuf->cmd_hdr) + sizeof(txbuf->cmd_tx) + prefetch_len); txbuf->htc_hdr.flags = 0; if (skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT; flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID); flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID); if (msdu->ip_summed == CHECKSUM_PARTIAL && !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD; flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD; if (ar->hw_params.continuous_frag_desc) ext_desc->flags |= HTT_MSDU_CHECKSUM_ENABLE; } /* Prevent firmware from sending up tx inspection requests. There's * nothing ath10k can do with frames requested for inspection so force * it to simply rely a regular tx completion with discard status. */ flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED; txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM; txbuf->cmd_tx.flags0 = flags0; txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1); txbuf->cmd_tx.len = __cpu_to_le16(msdu->len); txbuf->cmd_tx.id = __cpu_to_le16(msdu_id); txbuf->cmd_tx.frags_paddr = __cpu_to_le32(frags_paddr); if (ath10k_mac_tx_frm_has_freq(ar)) { txbuf->cmd_tx.offchan_tx.peerid = __cpu_to_le16(HTT_INVALID_PEERID); txbuf->cmd_tx.offchan_tx.freq = __cpu_to_le16(freq); } else { txbuf->cmd_tx.peerid = __cpu_to_le32(HTT_INVALID_PEERID); } skb_len = msdu->len; trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid); ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %08x, msdu_paddr %08x vdev %hhu tid %hhu freq %hu\n", flags0, flags1, skb_len, msdu_id, frags_paddr, (u32)skb_cb->paddr, vdev_id, tid, freq); ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ", msdu->data, skb_len); trace_ath10k_tx_hdr(ar, msdu->data, msdu->len); trace_ath10k_tx_payload(ar, msdu->data, msdu->len); sg_items[0].transfer_id = 0; sg_items[0].transfer_context = NULL; sg_items[0].vaddr = &txbuf->htc_hdr; sg_items[0].paddr = txbuf_paddr + sizeof(txbuf->frags); sg_items[0].len = sizeof(txbuf->htc_hdr) + sizeof(txbuf->cmd_hdr) + sizeof(txbuf->cmd_tx); sg_items[1].transfer_id = 0; sg_items[1].transfer_context = NULL; sg_items[1].vaddr = msdu->data; sg_items[1].paddr = skb_cb->paddr; sg_items[1].len = prefetch_len; res = ath10k_hif_tx_sg(htt->ar, htt->ar->htc.endpoint[htt->eid].ul_pipe_id, sg_items, ARRAY_SIZE(sg_items)); if (res) goto err_unmap_msdu; #ifdef CONFIG_ATH10K_DEBUGFS ar->debug.tx_bytes += skb_len; #endif return 0; err_unmap_msdu: dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); err_free_msdu_id: ath10k_htt_tx_free_msdu_id(htt, msdu_id); err: return res; }
int ath10k_htt_tx(struct ath10k_htt *htt, struct athp_buf *msdu) { #if 0 struct ath10k *ar = htt->ar; //struct device *dev = ar->sc_dev; //struct ieee80211_frame *hdr = (struct ieee80211_frame *)mbuf_skb_data(msdu->m); struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu); struct ath10k_hif_sg_item sg_items[2]; struct htt_data_tx_desc_frag *frags; u8 vdev_id = skb_cb->vdev_id; u8 tid = skb_cb->htt.tid; int prefetch_len; int res; u8 flags0 = 0; u16 msdu_id, flags1 = 0; dma_addr_t paddr = 0; u32 frags_paddr = 0; struct htt_msdu_ext_desc *ext_desc = NULL; res = ath10k_htt_tx_inc_pending(htt); if (res) goto err; ATHP_HTT_TX_LOCK(htt); res = ath10k_htt_tx_alloc_msdu_id(htt, msdu); ATHP_HTT_TX_UNLOCK(htt); if (res < 0) { goto err_tx_dec; } msdu_id = res; prefetch_len = min(htt->prefetch_len, mbuf_skb_len(msdu->m)); prefetch_len = roundup(prefetch_len, 4); skb_cb->htt.txbuf = dma_pool_alloc(htt->tx_pool, GFP_ATOMIC, &paddr); if (!skb_cb->htt.txbuf) { res = -ENOMEM; goto err_free_msdu_id; } skb_cb->htt.txbuf_paddr = paddr; if ((IEEE80211_IS_ACTION(hdr) || IEEE80211_IS_DEAUTH(hdr) || IEEE80211_IS_DISASSOC(hdr)) && IEEE80211_HAS_PROT(hdr)) { mbuf_skb_put(msdu->m, IEEE80211_CCMP_MIC_LEN); } else if (!skb_cb->htt.nohwcrypt && skb_cb->txmode == ATH10K_HW_TXRX_RAW) { mbuf_skb_put(msdu->m, IEEE80211_CCMP_MIC_LEN); } /* Do the initial load/sync */ /* XXX TODO: ADRIAN: figure out what I'm missing! */ res = athp_dma_mbuf_load(ar, &ar->buf_tx.dh, &msdu->mb, msdu->m); if (res) { res = -EIO; goto err_free_txbuf; } /* Ok, we're not modifying the msdu further, so sync here */ athp_dma_mbuf_pre_xmit(ar, &ar->buf_tx.dh, &msdu->mb); switch (skb_cb->txmode) { case ATH10K_HW_TXRX_RAW: case ATH10K_HW_TXRX_NATIVE_WIFI: flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; /* pass through */ case ATH10K_HW_TXRX_ETHERNET: if (ar->hw_params.continuous_frag_desc) { memset(&htt->frag_desc.vaddr[msdu_id], 0, sizeof(struct htt_msdu_ext_desc)); frags = (struct htt_data_tx_desc_frag *) &htt->frag_desc.vaddr[msdu_id].frags; ext_desc = &htt->frag_desc.vaddr[msdu_id]; frags[0].tword_addr.paddr_lo = __cpu_to_le32(msdu->mb.paddr); frags[0].tword_addr.paddr_hi = 0; frags[0].tword_addr.len_16 = __cpu_to_le16(mbuf_skb_len(msdu->m)); frags_paddr = htt->frag_desc.paddr + (sizeof(struct htt_msdu_ext_desc) * msdu_id); } else { frags = skb_cb->htt.txbuf->frags; frags[0].dword_addr.paddr = __cpu_to_le32(msdu->mb.paddr); frags[0].dword_addr.len = __cpu_to_le32(mbuf_skb_len(msdu->m)); frags[1].dword_addr.paddr = 0; frags[1].dword_addr.len = 0; frags_paddr = skb_cb->htt.txbuf_paddr; } flags0 |= SM(skb_cb->txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); break; case ATH10K_HW_TXRX_MGMT: flags0 |= SM(ATH10K_HW_TXRX_MGMT, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; frags_paddr = msdu->mb.paddr; break; } /* Normally all commands go through HTC which manages tx credits for * each endpoint and notifies when tx is completed. * * HTT endpoint is creditless so there's no need to care about HTC * flags. In that case it is trivial to fill the HTC header here. * * MSDU transmission is considered completed upon HTT event. This * implies no relevant resources can be freed until after the event is * received. That's why HTC tx completion handler itself is ignored by * setting NULL to transfer_context for all sg items. * * There is simply no point in pushing HTT TX_FRM through HTC tx path * as it's a waste of resources. By bypassing HTC it is possible to * avoid extra memory allocations, compress data structures and thus * improve performance. */ skb_cb->htt.txbuf->htc_hdr.eid = htt->eid; skb_cb->htt.txbuf->htc_hdr.len = __cpu_to_le16( sizeof(skb_cb->htt.txbuf->cmd_hdr) + sizeof(skb_cb->htt.txbuf->cmd_tx) + prefetch_len); skb_cb->htt.txbuf->htc_hdr.flags = 0; if (skb_cb->htt.nohwcrypt) flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT; if (!skb_cb->is_protected) flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT; flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID); flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID); /* XXX TODO: ADRIAN: L3/L4 offload */ #if 0 if (msdu->ip_summed == CHECKSUM_PARTIAL && !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD; flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD; if (ar->hw_params.continuous_frag_desc) ext_desc->flags |= HTT_MSDU_CHECKSUM_ENABLE; } #endif /* Prevent firmware from sending up tx inspection requests. There's * nothing ath10k can do with frames requested for inspection so force * it to simply rely a regular tx completion with discard status. */ flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED; skb_cb->htt.txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM; skb_cb->htt.txbuf->cmd_tx.flags0 = flags0; skb_cb->htt.txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1); skb_cb->htt.txbuf->cmd_tx.len = __cpu_to_le16(mbuf_skb_len(msdu->m)); skb_cb->htt.txbuf->cmd_tx.id = __cpu_to_le16(msdu_id); skb_cb->htt.txbuf->cmd_tx.frags_paddr = __cpu_to_le32(frags_paddr); skb_cb->htt.txbuf->cmd_tx.peerid = __cpu_to_le16(HTT_INVALID_PEERID); skb_cb->htt.txbuf->cmd_tx.freq = __cpu_to_le16(skb_cb->htt.freq); #ifdef ATHP_TRACE_DIAG trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid); #endif ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx flags0 %u flags1 %u len %d id %hu frags_paddr %08x, msdu_paddr %08x vdev %hhu tid %hhu freq %hu\n", (unsigned) flags0, (unsigned) flags1, mbuf_skb_len(msdu->m), msdu_id, frags_paddr, (u32)msdu->mb.paddr, vdev_id, tid, skb_cb->htt.freq); athp_debug_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ", mbuf_skb_data(msdu->m), mbuf_skb_len(msdu->m)); #ifdef ATHP_TRACE_DIAG trace_ath10k_tx_hdr(ar, msdu->data, msdu->len); trace_ath10k_tx_payload(ar, msdu->data, msdu->len); #endif sg_items[0].transfer_id = 0; sg_items[0].transfer_context = NULL; sg_items[0].vaddr = &skb_cb->htt.txbuf->htc_hdr; sg_items[0].paddr = skb_cb->htt.txbuf_paddr + sizeof(skb_cb->htt.txbuf->frags); sg_items[0].len = sizeof(skb_cb->htt.txbuf->htc_hdr) + sizeof(skb_cb->htt.txbuf->cmd_hdr) + sizeof(skb_cb->htt.txbuf->cmd_tx); sg_items[1].transfer_id = 0; sg_items[1].transfer_context = NULL; sg_items[1].vaddr = mbuf_skb_data(msdu->m); sg_items[1].paddr = msdu->mb.paddr; sg_items[1].len = prefetch_len; res = ath10k_hif_tx_sg(htt->ar, htt->ar->htc.endpoint[htt->eid].ul_pipe_id, sg_items, ARRAY_SIZE(sg_items)); if (res) goto err_unmap_msdu; return 0; err_unmap_msdu: athp_dma_mbuf_unload(ar, &ar->buf_tx.dh, &msdu->mb); err_free_txbuf: dma_pool_free(htt->tx_pool, skb_cb->htt.txbuf, skb_cb->htt.txbuf_paddr); err_free_msdu_id: ATHP_HTT_TX_LOCK(htt); ath10k_htt_tx_free_msdu_id(htt, msdu_id); ATHP_HTT_TX_UNLOCK(htt); err_tx_dec: ath10k_htt_tx_dec_pending(htt); err: return res; #else device_printf(htt->ar->sc_dev, "%s; TODO implement!\n", __func__); return (-EINVAL); #endif }
int ath10k_htc_send(struct ath10k_htc *htc, enum ath10k_htc_ep_id eid, struct sk_buff *skb) { struct ath10k *ar = htc->ar; struct ath10k_htc_ep *ep = &htc->endpoint[eid]; struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb); struct ath10k_hif_sg_item sg_item; struct device *dev = htc->ar->dev; int credits = 0; int ret; if (htc->ar->state == ATH10K_STATE_WEDGED) return -ECOMM; if (eid >= ATH10K_HTC_EP_COUNT) { ath10k_warn(ar, "Invalid endpoint id: %d\n", eid); return -ENOENT; } skb_push(skb, sizeof(struct ath10k_htc_hdr)); if (ep->tx_credit_flow_enabled) { credits = DIV_ROUND_UP(skb->len, htc->target_credit_size); spin_lock_bh(&htc->tx_lock); if (ep->tx_credits < credits) { spin_unlock_bh(&htc->tx_lock); ret = -EAGAIN; goto err_pull; } ep->tx_credits -= credits; ath10k_dbg(ar, ATH10K_DBG_HTC, "htc ep %d consumed %d credits (total %d)\n", eid, credits, ep->tx_credits); spin_unlock_bh(&htc->tx_lock); } ath10k_htc_prepare_tx_skb(ep, skb); skb_cb->eid = eid; skb_cb->paddr = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE); ret = dma_mapping_error(dev, skb_cb->paddr); if (ret) { ret = -EIO; goto err_credits; } sg_item.transfer_id = ep->eid; sg_item.transfer_context = skb; sg_item.vaddr = skb->data; sg_item.paddr = skb_cb->paddr; sg_item.len = skb->len; ret = ath10k_hif_tx_sg(htc->ar, ep->ul_pipe_id, &sg_item, 1); if (ret) goto err_unmap; return 0; err_unmap: dma_unmap_single(dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE); err_credits: if (ep->tx_credit_flow_enabled) { spin_lock_bh(&htc->tx_lock); ep->tx_credits += credits; ath10k_dbg(ar, ATH10K_DBG_HTC, "htc ep %d reverted %d credits back (total %d)\n", eid, credits, ep->tx_credits); spin_unlock_bh(&htc->tx_lock); if (ep->ep_ops.ep_tx_credits) ep->ep_ops.ep_tx_credits(htc->ar); } err_pull: skb_pull(skb, sizeof(struct ath10k_htc_hdr)); return ret; }
int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) { struct ath10k *ar = htt->ar; struct device *dev = ar->dev; struct sk_buff *txdesc = NULL; struct htt_cmd *cmd; struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu); u8 vdev_id = skb_cb->vdev_id; int len = 0; int msdu_id = -1; int res; res = ath10k_htt_tx_inc_pending(htt); if (res) goto err; len += sizeof(cmd->hdr); len += sizeof(cmd->mgmt_tx); spin_lock_bh(&htt->tx_lock); res = ath10k_htt_tx_alloc_msdu_id(htt); if (res < 0) { spin_unlock_bh(&htt->tx_lock); goto err_tx_dec; } msdu_id = res; htt->pending_tx[msdu_id] = msdu; spin_unlock_bh(&htt->tx_lock); txdesc = ath10k_htc_alloc_skb(ar, len); if (!txdesc) { res = -ENOMEM; goto err_free_msdu_id; } skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len, DMA_TO_DEVICE); res = dma_mapping_error(dev, skb_cb->paddr); if (res) goto err_free_txdesc; skb_put(txdesc, len); cmd = (struct htt_cmd *)txdesc->data; cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_MGMT_TX; cmd->mgmt_tx.msdu_paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr); cmd->mgmt_tx.len = __cpu_to_le32(msdu->len); cmd->mgmt_tx.desc_id = __cpu_to_le32(msdu_id); cmd->mgmt_tx.vdev_id = __cpu_to_le32(vdev_id); memcpy(cmd->mgmt_tx.hdr, msdu->data, min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN)); skb_cb->htt.txbuf = NULL; res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc); if (res) goto err_unmap_msdu; return 0; err_unmap_msdu: dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); err_free_txdesc: dev_kfree_skb_any(txdesc); err_free_msdu_id: spin_lock_bh(&htt->tx_lock); htt->pending_tx[msdu_id] = NULL; ath10k_htt_tx_free_msdu_id(htt, msdu_id); spin_unlock_bh(&htt->tx_lock); err_tx_dec: ath10k_htt_tx_dec_pending(htt); err: return res; }
int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) { struct ath10k *ar = htt->ar; struct device *dev = ar->dev; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data; struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu); struct ath10k_hif_sg_item sg_items[2]; struct htt_data_tx_desc_frag *frags; u8 vdev_id = skb_cb->vdev_id; u8 tid = skb_cb->htt.tid; int prefetch_len; int res; u8 flags0 = 0; u16 msdu_id, flags1 = 0; dma_addr_t paddr; u32 frags_paddr; bool use_frags; res = ath10k_htt_tx_inc_pending(htt); if (res) goto err; spin_lock_bh(&htt->tx_lock); res = ath10k_htt_tx_alloc_msdu_id(htt); if (res < 0) { spin_unlock_bh(&htt->tx_lock); goto err_tx_dec; } msdu_id = res; htt->pending_tx[msdu_id] = msdu; spin_unlock_bh(&htt->tx_lock); prefetch_len = min(htt->prefetch_len, msdu->len); prefetch_len = roundup(prefetch_len, 4); /* Since HTT 3.0 there is no separate mgmt tx command. However in case * of mgmt tx using TX_FRM there is not tx fragment list. Instead of tx * fragment list host driver specifies directly frame pointer. */ use_frags = htt->target_version_major < 3 || !ieee80211_is_mgmt(hdr->frame_control); skb_cb->htt.txbuf = dma_pool_alloc(htt->tx_pool, GFP_ATOMIC, &paddr); if (!skb_cb->htt.txbuf) goto err_free_msdu_id; skb_cb->htt.txbuf_paddr = paddr; skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len, DMA_TO_DEVICE); res = dma_mapping_error(dev, skb_cb->paddr); if (res) goto err_free_txbuf; if (likely(use_frags)) { frags = skb_cb->htt.txbuf->frags; frags[0].paddr = __cpu_to_le32(skb_cb->paddr); frags[0].len = __cpu_to_le32(msdu->len); frags[1].paddr = 0; frags[1].len = 0; flags0 |= SM(ATH10K_HW_TXRX_NATIVE_WIFI, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); frags_paddr = skb_cb->htt.txbuf_paddr; } else { flags0 |= SM(ATH10K_HW_TXRX_MGMT, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); frags_paddr = skb_cb->paddr; } /* Normally all commands go through HTC which manages tx credits for * each endpoint and notifies when tx is completed. * * HTT endpoint is creditless so there's no need to care about HTC * flags. In that case it is trivial to fill the HTC header here. * * MSDU transmission is considered completed upon HTT event. This * implies no relevant resources can be freed until after the event is * received. That's why HTC tx completion handler itself is ignored by * setting NULL to transfer_context for all sg items. * * There is simply no point in pushing HTT TX_FRM through HTC tx path * as it's a waste of resources. By bypassing HTC it is possible to * avoid extra memory allocations, compress data structures and thus * improve performance. */ skb_cb->htt.txbuf->htc_hdr.eid = htt->eid; skb_cb->htt.txbuf->htc_hdr.len = __cpu_to_le16( sizeof(skb_cb->htt.txbuf->cmd_hdr) + sizeof(skb_cb->htt.txbuf->cmd_tx) + prefetch_len); skb_cb->htt.txbuf->htc_hdr.flags = 0; if (!ieee80211_has_protected(hdr->frame_control)) flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT; flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID); flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID); flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD; flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD; /* Prevent firmware from sending up tx inspection requests. There's * nothing ath10k can do with frames requested for inspection so force * it to simply rely a regular tx completion with discard status. */ flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED; skb_cb->htt.txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM; skb_cb->htt.txbuf->cmd_tx.flags0 = flags0; skb_cb->htt.txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1); skb_cb->htt.txbuf->cmd_tx.len = __cpu_to_le16(msdu->len); skb_cb->htt.txbuf->cmd_tx.id = __cpu_to_le16(msdu_id); skb_cb->htt.txbuf->cmd_tx.frags_paddr = __cpu_to_le32(frags_paddr); skb_cb->htt.txbuf->cmd_tx.peerid = __cpu_to_le16(HTT_INVALID_PEERID); skb_cb->htt.txbuf->cmd_tx.freq = __cpu_to_le16(skb_cb->htt.freq); trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid); ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %08x, msdu_paddr %08x vdev %hhu tid %hhu freq %hu\n", flags0, flags1, msdu->len, msdu_id, frags_paddr, (u32)skb_cb->paddr, vdev_id, tid, skb_cb->htt.freq); ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ", msdu->data, msdu->len); trace_ath10k_tx_hdr(ar, msdu->data, msdu->len); trace_ath10k_tx_payload(ar, msdu->data, msdu->len); sg_items[0].transfer_id = 0; sg_items[0].transfer_context = NULL; sg_items[0].vaddr = &skb_cb->htt.txbuf->htc_hdr; sg_items[0].paddr = skb_cb->htt.txbuf_paddr + sizeof(skb_cb->htt.txbuf->frags); sg_items[0].len = sizeof(skb_cb->htt.txbuf->htc_hdr) + sizeof(skb_cb->htt.txbuf->cmd_hdr) + sizeof(skb_cb->htt.txbuf->cmd_tx); sg_items[1].transfer_id = 0; sg_items[1].transfer_context = NULL; sg_items[1].vaddr = msdu->data; sg_items[1].paddr = skb_cb->paddr; sg_items[1].len = prefetch_len; res = ath10k_hif_tx_sg(htt->ar, htt->ar->htc.endpoint[htt->eid].ul_pipe_id, sg_items, ARRAY_SIZE(sg_items)); if (res) goto err_unmap_msdu; return 0; err_unmap_msdu: dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); err_free_txbuf: dma_pool_free(htt->tx_pool, skb_cb->htt.txbuf, skb_cb->htt.txbuf_paddr); err_free_msdu_id: spin_lock_bh(&htt->tx_lock); htt->pending_tx[msdu_id] = NULL; ath10k_htt_tx_free_msdu_id(htt, msdu_id); spin_unlock_bh(&htt->tx_lock); err_tx_dec: ath10k_htt_tx_dec_pending(htt); err: return res; }
int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) { struct device *dev = htt->ar->dev; struct htt_cmd *cmd; struct htt_data_tx_desc_frag *tx_frags; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data; struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu); struct sk_buff *txdesc = NULL; bool use_frags; u8 vdev_id = ATH10K_SKB_CB(msdu)->vdev_id; u8 tid; int prefetch_len, desc_len; int msdu_id = -1; int res; u8 flags0; u16 flags1; res = ath10k_htt_tx_inc_pending(htt); if (res) goto err; spin_lock_bh(&htt->tx_lock); res = ath10k_htt_tx_alloc_msdu_id(htt); if (res < 0) { spin_unlock_bh(&htt->tx_lock); goto err_tx_dec; } msdu_id = res; htt->pending_tx[msdu_id] = msdu; spin_unlock_bh(&htt->tx_lock); prefetch_len = min(htt->prefetch_len, msdu->len); prefetch_len = roundup(prefetch_len, 4); desc_len = sizeof(cmd->hdr) + sizeof(cmd->data_tx) + prefetch_len; txdesc = ath10k_htc_alloc_skb(desc_len); if (!txdesc) { res = -ENOMEM; goto err_free_msdu_id; } /* Since HTT 3.0 there is no separate mgmt tx command. However in case * of mgmt tx using TX_FRM there is not tx fragment list. Instead of tx * fragment list host driver specifies directly frame pointer. */ use_frags = htt->target_version_major < 3 || !ieee80211_is_mgmt(hdr->frame_control); if (!IS_ALIGNED((unsigned long)txdesc->data, 4)) { ath10k_warn("htt alignment check failed. dropping packet.\n"); res = -EIO; goto err_free_txdesc; } if (use_frags) { skb_cb->htt.frag_len = sizeof(*tx_frags) * 2; skb_cb->htt.pad_len = (unsigned long)msdu->data - round_down((unsigned long)msdu->data, 4); skb_push(msdu, skb_cb->htt.frag_len + skb_cb->htt.pad_len); } else { skb_cb->htt.frag_len = 0; skb_cb->htt.pad_len = 0; } res = ath10k_skb_map(dev, msdu); if (res) goto err_pull_txfrag; if (use_frags) { dma_sync_single_for_cpu(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); /* tx fragment list must be terminated with zero-entry */ tx_frags = (struct htt_data_tx_desc_frag *)msdu->data; tx_frags[0].paddr = __cpu_to_le32(skb_cb->paddr + skb_cb->htt.frag_len + skb_cb->htt.pad_len); tx_frags[0].len = __cpu_to_le32(msdu->len - skb_cb->htt.frag_len - skb_cb->htt.pad_len); tx_frags[1].paddr = __cpu_to_le32(0); tx_frags[1].len = __cpu_to_le32(0); dma_sync_single_for_device(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); } ath10k_dbg(ATH10K_DBG_HTT, "msdu 0x%llx\n", (unsigned long long) ATH10K_SKB_CB(msdu)->paddr); ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "msdu: ", msdu->data, msdu->len); skb_put(txdesc, desc_len); cmd = (struct htt_cmd *)txdesc->data; tid = ATH10K_SKB_CB(msdu)->htt.tid; ath10k_dbg(ATH10K_DBG_HTT, "htt data tx using tid %hhu\n", tid); flags0 = 0; if (!ieee80211_has_protected(hdr->frame_control)) flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT; flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; if (use_frags) flags0 |= SM(ATH10K_HW_TXRX_NATIVE_WIFI, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); else flags0 |= SM(ATH10K_HW_TXRX_MGMT, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); flags1 = 0; flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID); flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID); flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD; flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD; cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM; cmd->data_tx.flags0 = flags0; cmd->data_tx.flags1 = __cpu_to_le16(flags1); cmd->data_tx.len = __cpu_to_le16(msdu->len - skb_cb->htt.frag_len - skb_cb->htt.pad_len); cmd->data_tx.id = __cpu_to_le16(msdu_id); cmd->data_tx.frags_paddr = __cpu_to_le32(skb_cb->paddr); cmd->data_tx.peerid = __cpu_to_le32(HTT_INVALID_PEERID); memcpy(cmd->data_tx.prefetch, hdr, prefetch_len); res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc); if (res) goto err_unmap_msdu; return 0; err_unmap_msdu: ath10k_skb_unmap(dev, msdu); err_pull_txfrag: skb_pull(msdu, skb_cb->htt.frag_len + skb_cb->htt.pad_len); err_free_txdesc: dev_kfree_skb_any(txdesc); err_free_msdu_id: spin_lock_bh(&htt->tx_lock); htt->pending_tx[msdu_id] = NULL; ath10k_htt_tx_free_msdu_id(htt, msdu_id); spin_unlock_bh(&htt->tx_lock); err_tx_dec: ath10k_htt_tx_dec_pending(htt); err: return res; }