int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) { struct device *dev = htt->ar->dev; struct htt_cmd *cmd; struct htt_data_tx_desc_frag *tx_frags; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data; struct ath10k_skb_cb *skb_cb; struct sk_buff *txdesc = NULL; struct sk_buff *txfrag = NULL; u8 vdev_id = ATH10K_SKB_CB(msdu)->htt.vdev_id; u8 tid; int prefetch_len, desc_len, frag_len; dma_addr_t frags_paddr; int msdu_id = -1; int res; u8 flags0; u16 flags1; res = ath10k_htt_tx_inc_pending(htt); if (res) return res; prefetch_len = min(htt->prefetch_len, msdu->len); prefetch_len = roundup(prefetch_len, 4); desc_len = sizeof(cmd->hdr) + sizeof(cmd->data_tx) + prefetch_len; frag_len = sizeof(*tx_frags) * 2; txdesc = ath10k_htc_alloc_skb(desc_len); if (!txdesc) { res = -ENOMEM; goto err; } txfrag = dev_alloc_skb(frag_len); if (!txfrag) { res = -ENOMEM; goto err; } if (!IS_ALIGNED((unsigned long)txdesc->data, 4)) { ath10k_warn("htt alignment check failed. dropping packet.\n"); res = -EIO; goto err; } spin_lock_bh(&htt->tx_lock); msdu_id = ath10k_htt_tx_alloc_msdu_id(htt); if (msdu_id < 0) { spin_unlock_bh(&htt->tx_lock); res = msdu_id; goto err; } htt->pending_tx[msdu_id] = txdesc; spin_unlock_bh(&htt->tx_lock); res = ath10k_skb_map(dev, msdu); if (res) goto err; /* tx fragment list must be terminated with zero-entry */ skb_put(txfrag, frag_len); tx_frags = (struct htt_data_tx_desc_frag *)txfrag->data; tx_frags[0].paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr); tx_frags[0].len = __cpu_to_le32(msdu->len); tx_frags[1].paddr = __cpu_to_le32(0); tx_frags[1].len = __cpu_to_le32(0); res = ath10k_skb_map(dev, txfrag); if (res) goto err; ath10k_dbg(ATH10K_DBG_HTT, "txfrag 0x%llx msdu 0x%llx\n", (unsigned long long) ATH10K_SKB_CB(txfrag)->paddr, (unsigned long long) ATH10K_SKB_CB(msdu)->paddr); ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "txfrag: ", txfrag->data, frag_len); ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "msdu: ", msdu->data, msdu->len); skb_put(txdesc, desc_len); cmd = (struct htt_cmd *)txdesc->data; memset(cmd, 0, desc_len); tid = ATH10K_SKB_CB(msdu)->htt.tid; ath10k_dbg(ATH10K_DBG_HTT, "htt data tx using tid %hhu\n", tid); flags0 = 0; if (!ieee80211_has_protected(hdr->frame_control)) flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT; flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; flags0 |= SM(ATH10K_HW_TXRX_NATIVE_WIFI, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); flags1 = 0; flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID); flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID); frags_paddr = ATH10K_SKB_CB(txfrag)->paddr; cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM; cmd->data_tx.flags0 = flags0; cmd->data_tx.flags1 = __cpu_to_le16(flags1); cmd->data_tx.len = __cpu_to_le16(msdu->len); cmd->data_tx.id = __cpu_to_le16(msdu_id); cmd->data_tx.frags_paddr = __cpu_to_le32(frags_paddr); cmd->data_tx.peerid = __cpu_to_le32(HTT_INVALID_PEERID); memcpy(cmd->data_tx.prefetch, msdu->data, prefetch_len); /* refcount is decremented by HTC and HTT completions until it reaches * zero and is freed */ skb_cb = ATH10K_SKB_CB(txdesc); skb_cb->htt.msdu_id = msdu_id; skb_cb->htt.refcount = 2; skb_cb->htt.txfrag = txfrag; skb_cb->htt.msdu = msdu; res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc); if (res) goto err; return 0; err: if (txfrag) ath10k_skb_unmap(dev, txfrag); if (txdesc) dev_kfree_skb_any(txdesc); if (txfrag) dev_kfree_skb_any(txfrag); if (msdu_id >= 0) { spin_lock_bh(&htt->tx_lock); htt->pending_tx[msdu_id] = NULL; ath10k_htt_tx_free_msdu_id(htt, msdu_id); spin_unlock_bh(&htt->tx_lock); } ath10k_htt_tx_dec_pending(htt); ath10k_skb_unmap(dev, msdu); return res; }
int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) { struct device *dev = htt->ar->dev; struct ath10k_skb_cb *skb_cb; struct sk_buff *txdesc = NULL; struct htt_cmd *cmd; u8 vdev_id = ATH10K_SKB_CB(msdu)->htt.vdev_id; int len = 0; int msdu_id = -1; int res; res = ath10k_htt_tx_inc_pending(htt); if (res) return res; len += sizeof(cmd->hdr); len += sizeof(cmd->mgmt_tx); txdesc = ath10k_htc_alloc_skb(len); if (!txdesc) { res = -ENOMEM; goto err; } spin_lock_bh(&htt->tx_lock); msdu_id = ath10k_htt_tx_alloc_msdu_id(htt); if (msdu_id < 0) { spin_unlock_bh(&htt->tx_lock); res = msdu_id; goto err; } htt->pending_tx[msdu_id] = txdesc; spin_unlock_bh(&htt->tx_lock); res = ath10k_skb_map(dev, msdu); if (res) goto err; skb_put(txdesc, len); cmd = (struct htt_cmd *)txdesc->data; cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_MGMT_TX; cmd->mgmt_tx.msdu_paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr); cmd->mgmt_tx.len = __cpu_to_le32(msdu->len); cmd->mgmt_tx.desc_id = __cpu_to_le32(msdu_id); cmd->mgmt_tx.vdev_id = __cpu_to_le32(vdev_id); memcpy(cmd->mgmt_tx.hdr, msdu->data, min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN)); /* refcount is decremented by HTC and HTT completions until it reaches * zero and is freed */ skb_cb = ATH10K_SKB_CB(txdesc); skb_cb->htt.msdu_id = msdu_id; skb_cb->htt.refcount = 2; skb_cb->htt.msdu = msdu; res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc); if (res) goto err; return 0; err: ath10k_skb_unmap(dev, msdu); if (txdesc) dev_kfree_skb_any(txdesc); if (msdu_id >= 0) { spin_lock_bh(&htt->tx_lock); htt->pending_tx[msdu_id] = NULL; ath10k_htt_tx_free_msdu_id(htt, msdu_id); spin_unlock_bh(&htt->tx_lock); } ath10k_htt_tx_dec_pending(htt); return res; }
static inline void ath10k_htc_restore_tx_skb(struct ath10k_htc *htc, struct sk_buff *skb) { ath10k_skb_unmap(htc->ar->dev, skb); skb_pull(skb, sizeof(struct ath10k_htc_hdr)); }
int ath10k_htc_send(struct ath10k_htc *htc, enum ath10k_htc_ep_id eid, struct sk_buff *skb) { struct ath10k_htc_ep *ep = &htc->endpoint[eid]; int credits = 0; int ret; if (htc->ar->state == ATH10K_STATE_WEDGED) return -ECOMM; if (eid >= ATH10K_HTC_EP_COUNT) { ath10k_warn("Invalid endpoint id: %d\n", eid); return -ENOENT; } /* FIXME: This looks ugly, can we fix it? */ spin_lock_bh(&htc->tx_lock); if (htc->stopped) { spin_unlock_bh(&htc->tx_lock); return -ESHUTDOWN; } spin_unlock_bh(&htc->tx_lock); skb_push(skb, sizeof(struct ath10k_htc_hdr)); if (ep->tx_credit_flow_enabled) { credits = DIV_ROUND_UP(skb->len, htc->target_credit_size); spin_lock_bh(&htc->tx_lock); if (ep->tx_credits < credits) { spin_unlock_bh(&htc->tx_lock); ret = -EAGAIN; goto err_pull; } ep->tx_credits -= credits; spin_unlock_bh(&htc->tx_lock); } ath10k_htc_prepare_tx_skb(ep, skb); ret = ath10k_skb_map(htc->ar->dev, skb); if (ret) goto err_credits; ret = ath10k_hif_send_head(htc->ar, ep->ul_pipe_id, ep->eid, skb->len, skb); if (ret) goto err_unmap; return 0; err_unmap: ath10k_skb_unmap(htc->ar->dev, skb); err_credits: if (ep->tx_credit_flow_enabled) { spin_lock_bh(&htc->tx_lock); ep->tx_credits += credits; spin_unlock_bh(&htc->tx_lock); if (ep->ep_ops.ep_tx_credits) ep->ep_ops.ep_tx_credits(htc->ar); } err_pull: skb_pull(skb, sizeof(struct ath10k_htc_hdr)); return ret; }
int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) { struct device *dev = htt->ar->dev; struct htt_cmd *cmd; struct htt_data_tx_desc_frag *tx_frags; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data; struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu); struct sk_buff *txdesc = NULL; bool use_frags; u8 vdev_id = ATH10K_SKB_CB(msdu)->vdev_id; u8 tid; int prefetch_len, desc_len; int msdu_id = -1; int res; u8 flags0; u16 flags1; res = ath10k_htt_tx_inc_pending(htt); if (res) goto err; spin_lock_bh(&htt->tx_lock); res = ath10k_htt_tx_alloc_msdu_id(htt); if (res < 0) { spin_unlock_bh(&htt->tx_lock); goto err_tx_dec; } msdu_id = res; htt->pending_tx[msdu_id] = msdu; spin_unlock_bh(&htt->tx_lock); prefetch_len = min(htt->prefetch_len, msdu->len); prefetch_len = roundup(prefetch_len, 4); desc_len = sizeof(cmd->hdr) + sizeof(cmd->data_tx) + prefetch_len; txdesc = ath10k_htc_alloc_skb(desc_len); if (!txdesc) { res = -ENOMEM; goto err_free_msdu_id; } /* Since HTT 3.0 there is no separate mgmt tx command. However in case * of mgmt tx using TX_FRM there is not tx fragment list. Instead of tx * fragment list host driver specifies directly frame pointer. */ use_frags = htt->target_version_major < 3 || !ieee80211_is_mgmt(hdr->frame_control); if (!IS_ALIGNED((unsigned long)txdesc->data, 4)) { ath10k_warn("htt alignment check failed. dropping packet.\n"); res = -EIO; goto err_free_txdesc; } if (use_frags) { skb_cb->htt.frag_len = sizeof(*tx_frags) * 2; skb_cb->htt.pad_len = (unsigned long)msdu->data - round_down((unsigned long)msdu->data, 4); skb_push(msdu, skb_cb->htt.frag_len + skb_cb->htt.pad_len); } else { skb_cb->htt.frag_len = 0; skb_cb->htt.pad_len = 0; } res = ath10k_skb_map(dev, msdu); if (res) goto err_pull_txfrag; if (use_frags) { dma_sync_single_for_cpu(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); /* tx fragment list must be terminated with zero-entry */ tx_frags = (struct htt_data_tx_desc_frag *)msdu->data; tx_frags[0].paddr = __cpu_to_le32(skb_cb->paddr + skb_cb->htt.frag_len + skb_cb->htt.pad_len); tx_frags[0].len = __cpu_to_le32(msdu->len - skb_cb->htt.frag_len - skb_cb->htt.pad_len); tx_frags[1].paddr = __cpu_to_le32(0); tx_frags[1].len = __cpu_to_le32(0); dma_sync_single_for_device(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); } ath10k_dbg(ATH10K_DBG_HTT, "msdu 0x%llx\n", (unsigned long long) ATH10K_SKB_CB(msdu)->paddr); ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "msdu: ", msdu->data, msdu->len); skb_put(txdesc, desc_len); cmd = (struct htt_cmd *)txdesc->data; tid = ATH10K_SKB_CB(msdu)->htt.tid; ath10k_dbg(ATH10K_DBG_HTT, "htt data tx using tid %hhu\n", tid); flags0 = 0; if (!ieee80211_has_protected(hdr->frame_control)) flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT; flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; if (use_frags) flags0 |= SM(ATH10K_HW_TXRX_NATIVE_WIFI, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); else flags0 |= SM(ATH10K_HW_TXRX_MGMT, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); flags1 = 0; flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID); flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID); flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD; flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD; cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM; cmd->data_tx.flags0 = flags0; cmd->data_tx.flags1 = __cpu_to_le16(flags1); cmd->data_tx.len = __cpu_to_le16(msdu->len - skb_cb->htt.frag_len - skb_cb->htt.pad_len); cmd->data_tx.id = __cpu_to_le16(msdu_id); cmd->data_tx.frags_paddr = __cpu_to_le32(skb_cb->paddr); cmd->data_tx.peerid = __cpu_to_le32(HTT_INVALID_PEERID); memcpy(cmd->data_tx.prefetch, hdr, prefetch_len); res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc); if (res) goto err_unmap_msdu; return 0; err_unmap_msdu: ath10k_skb_unmap(dev, msdu); err_pull_txfrag: skb_pull(msdu, skb_cb->htt.frag_len + skb_cb->htt.pad_len); err_free_txdesc: dev_kfree_skb_any(txdesc); err_free_msdu_id: spin_lock_bh(&htt->tx_lock); htt->pending_tx[msdu_id] = NULL; ath10k_htt_tx_free_msdu_id(htt, msdu_id); spin_unlock_bh(&htt->tx_lock); err_tx_dec: ath10k_htt_tx_dec_pending(htt); err: return res; }
int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) { struct device *dev = htt->ar->dev; struct sk_buff *txdesc = NULL; struct htt_cmd *cmd; struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu); u8 vdev_id = skb_cb->vdev_id; int len = 0; int msdu_id = -1; int res; res = ath10k_htt_tx_inc_pending(htt); if (res) goto err; len += sizeof(cmd->hdr); len += sizeof(cmd->mgmt_tx); spin_lock_bh(&htt->tx_lock); res = ath10k_htt_tx_alloc_msdu_id(htt); if (res < 0) { spin_unlock_bh(&htt->tx_lock); goto err_tx_dec; } msdu_id = res; htt->pending_tx[msdu_id] = msdu; spin_unlock_bh(&htt->tx_lock); txdesc = ath10k_htc_alloc_skb(len); if (!txdesc) { res = -ENOMEM; goto err_free_msdu_id; } res = ath10k_skb_map(dev, msdu); if (res) goto err_free_txdesc; skb_put(txdesc, len); cmd = (struct htt_cmd *)txdesc->data; cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_MGMT_TX; cmd->mgmt_tx.msdu_paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr); cmd->mgmt_tx.len = __cpu_to_le32(msdu->len); cmd->mgmt_tx.desc_id = __cpu_to_le32(msdu_id); cmd->mgmt_tx.vdev_id = __cpu_to_le32(vdev_id); memcpy(cmd->mgmt_tx.hdr, msdu->data, min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN)); skb_cb->htt.frag_len = 0; skb_cb->htt.pad_len = 0; res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc); if (res) goto err_unmap_msdu; return 0; err_unmap_msdu: ath10k_skb_unmap(dev, msdu); err_free_txdesc: dev_kfree_skb_any(txdesc); err_free_msdu_id: spin_lock_bh(&htt->tx_lock); htt->pending_tx[msdu_id] = NULL; ath10k_htt_tx_free_msdu_id(htt, msdu_id); spin_unlock_bh(&htt->tx_lock); err_tx_dec: ath10k_htt_tx_dec_pending(htt); err: return res; }