int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt) { struct sk_buff *skb; struct htt_cmd *cmd; int len = 0; int ret; len += sizeof(cmd->hdr); len += sizeof(cmd->ver_req); skb = ath10k_htc_alloc_skb(len); if (!skb) return -ENOMEM; skb_put(skb, len); cmd = (struct htt_cmd *)skb->data; cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_VERSION_REQ; ATH10K_SKB_CB(skb)->htt.is_conf = true; ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); if (ret) { dev_kfree_skb_any(skb); return ret; } return 0; }
int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt) { struct ath10k *ar = htt->ar; struct sk_buff *skb; struct htt_cmd *cmd; struct htt_frag_desc_bank_cfg *cfg; int ret, size; u8 info; if (!ar->hw_params.continuous_frag_desc) return 0; if (!htt->frag_desc.paddr) { ath10k_warn(ar, "invalid frag desc memory\n"); return -EINVAL; } size = sizeof(cmd->hdr) + sizeof(cmd->frag_desc_bank_cfg); skb = ath10k_htc_alloc_skb(ar, size); if (!skb) return -ENOMEM; skb_put(skb, size); cmd = (struct htt_cmd *)skb->data; cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG; info = 0; info |= SM(htt->tx_q_state.type, HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE); if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, ar->fw_features)) info |= HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID; cfg = &cmd->frag_desc_bank_cfg; cfg->info = info; cfg->num_banks = 1; cfg->desc_size = sizeof(struct htt_msdu_ext_desc); cfg->bank_base_addrs[0] = __cpu_to_le32(htt->frag_desc.paddr); cfg->bank_id[0].bank_min_id = 0; cfg->bank_id[0].bank_max_id = __cpu_to_le16(htt->max_num_pending_tx - 1); cfg->q_state.paddr = cpu_to_le32(htt->tx_q_state.paddr); cfg->q_state.num_peers = cpu_to_le16(htt->tx_q_state.num_peers); cfg->q_state.num_tids = cpu_to_le16(htt->tx_q_state.num_tids); cfg->q_state.record_size = HTT_TX_Q_STATE_ENTRY_SIZE; cfg->q_state.record_multiplier = HTT_TX_Q_STATE_ENTRY_MULTIPLIER; ath10k_dbg(ar, ATH10K_DBG_HTT, "htt frag desc bank cmd\n"); ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); if (ret) { ath10k_warn(ar, "failed to send frag desc bank cfg request: %d\n", ret); dev_kfree_skb_any(skb); return ret; } return 0; }
int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt) { struct ath10k *ar = htt->ar; struct athp_buf *skb; struct htt_cmd *cmd; int len = 0; int ret; len += sizeof(cmd->hdr); len += sizeof(cmd->ver_req); skb = ath10k_htc_alloc_skb(ar, len); if (!skb) return -ENOMEM; mbuf_skb_put(skb->m, len); cmd = (struct htt_cmd *)mbuf_skb_data(skb->m); cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_VERSION_REQ; ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); if (ret) { athp_freebuf(ar, &ar->buf_tx, skb); return ret; } return 0; }
int ath10k_htt_tx_fetch_resp(struct ath10k *ar, __le32 token, __le16 fetch_seq_num, struct htt_tx_fetch_record *records, size_t num_records) { struct sk_buff *skb; struct htt_cmd *cmd; const u16 resp_id = 0; int len = 0; int ret; /* Response IDs are echo-ed back only for host driver convienence * purposes. They aren't used for anything in the driver yet so use 0. */ len += sizeof(cmd->hdr); len += sizeof(cmd->tx_fetch_resp); len += sizeof(cmd->tx_fetch_resp.records[0]) * num_records; skb = ath10k_htc_alloc_skb(ar, len); if (!skb) return -ENOMEM; skb_put(skb, len); cmd = (struct htt_cmd *)skb->data; cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FETCH_RESP; cmd->tx_fetch_resp.resp_id = cpu_to_le16(resp_id); cmd->tx_fetch_resp.fetch_seq_num = fetch_seq_num; cmd->tx_fetch_resp.num_records = cpu_to_le16(num_records); cmd->tx_fetch_resp.token = token; memcpy(cmd->tx_fetch_resp.records, records, sizeof(records[0]) * num_records); ret = ath10k_htc_send(&ar->htc, ar->htt.eid, skb); if (ret) { ath10k_warn(ar, "failed to submit htc command: %d\n", ret); goto err_free_skb; } return 0; err_free_skb: dev_kfree_skb_any(skb); return ret; }
int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt, u8 max_subfrms_ampdu, u8 max_subfrms_amsdu) { struct ath10k *ar = htt->ar; struct htt_aggr_conf *aggr_conf; struct sk_buff *skb; struct htt_cmd *cmd; int len; int ret; /* Firmware defaults are: amsdu = 3 and ampdu = 64 */ if (max_subfrms_ampdu == 0 || max_subfrms_ampdu > 64) return -EINVAL; if (max_subfrms_amsdu == 0 || max_subfrms_amsdu > 31) return -EINVAL; len = sizeof(cmd->hdr); len += sizeof(cmd->aggr_conf); skb = ath10k_htc_alloc_skb(ar, len); if (!skb) return -ENOMEM; skb_put(skb, len); cmd = (struct htt_cmd *)skb->data; cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_AGGR_CFG; aggr_conf = &cmd->aggr_conf; aggr_conf->max_num_ampdu_subframes = max_subfrms_ampdu; aggr_conf->max_num_amsdu_subframes = max_subfrms_amsdu; ath10k_dbg(ar, ATH10K_DBG_HTT, "htt h2t aggr cfg msg amsdu %d ampdu %d", aggr_conf->max_num_amsdu_subframes, aggr_conf->max_num_ampdu_subframes); ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); if (ret) { dev_kfree_skb_any(skb); return ret; } return 0; }
int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt) { struct ath10k *ar = htt->ar; struct athp_buf *skb; struct htt_cmd *cmd; int ret, size; if (!ar->hw_params.continuous_frag_desc) return 0; if (!htt->frag_desc.paddr) { ath10k_warn(ar, "invalid frag desc memory\n"); return -EINVAL; } size = sizeof(cmd->hdr) + sizeof(cmd->frag_desc_bank_cfg); skb = ath10k_htc_alloc_skb(ar, size); if (!skb) return -ENOMEM; mbuf_skb_put(skb->m, size); cmd = (struct htt_cmd *)mbuf_skb_data(skb->m); cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG; cmd->frag_desc_bank_cfg.info = 0; cmd->frag_desc_bank_cfg.num_banks = 1; cmd->frag_desc_bank_cfg.desc_size = sizeof(struct htt_msdu_ext_desc); cmd->frag_desc_bank_cfg.bank_base_addrs[0] = __cpu_to_le32(htt->frag_desc.paddr); cmd->frag_desc_bank_cfg.bank_id[0].bank_min_id = 0; cmd->frag_desc_bank_cfg.bank_id[0].bank_max_id = __cpu_to_le16(htt->max_num_pending_tx - 1); ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); if (ret) { ath10k_warn(ar, "failed to send frag desc bank cfg request: %d\n", ret); athp_freebuf(ar, &ar->buf_tx, skb); return ret; } return 0; }
int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie) { struct ath10k *ar = htt->ar; struct htt_stats_req *req; struct sk_buff *skb; struct htt_cmd *cmd; int len = 0, ret; len += sizeof(cmd->hdr); len += sizeof(cmd->stats_req); skb = ath10k_htc_alloc_skb(ar, len); if (!skb) return -ENOMEM; skb_put(skb, len); cmd = (struct htt_cmd *)skb->data; cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_STATS_REQ; req = &cmd->stats_req; memset(req, 0, sizeof(*req)); /* currently we support only max 8 bit masks so no need to worry * about endian support */ req->upload_types[0] = mask; req->reset_types[0] = mask; req->stat_type = HTT_STATS_REQ_CFG_STAT_TYPE_INVALID; req->cookie_lsb = cpu_to_le32(cookie & 0xffffffff); req->cookie_msb = cpu_to_le32((cookie & 0xffffffff00000000ULL) >> 32); ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); if (ret) { ath10k_warn(ar, "failed to send htt type stats request: %d", ret); dev_kfree_skb_any(skb); return ret; } return 0; }
int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) { struct device *dev = htt->ar->dev; struct htt_cmd *cmd; struct htt_data_tx_desc_frag *tx_frags; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data; struct ath10k_skb_cb *skb_cb; struct sk_buff *txdesc = NULL; struct sk_buff *txfrag = NULL; u8 vdev_id = ATH10K_SKB_CB(msdu)->htt.vdev_id; u8 tid; int prefetch_len, desc_len, frag_len; dma_addr_t frags_paddr; int msdu_id = -1; int res; u8 flags0; u16 flags1; res = ath10k_htt_tx_inc_pending(htt); if (res) return res; prefetch_len = min(htt->prefetch_len, msdu->len); prefetch_len = roundup(prefetch_len, 4); desc_len = sizeof(cmd->hdr) + sizeof(cmd->data_tx) + prefetch_len; frag_len = sizeof(*tx_frags) * 2; txdesc = ath10k_htc_alloc_skb(desc_len); if (!txdesc) { res = -ENOMEM; goto err; } txfrag = dev_alloc_skb(frag_len); if (!txfrag) { res = -ENOMEM; goto err; } if (!IS_ALIGNED((unsigned long)txdesc->data, 4)) { ath10k_warn("htt alignment check failed. dropping packet.\n"); res = -EIO; goto err; } spin_lock_bh(&htt->tx_lock); msdu_id = ath10k_htt_tx_alloc_msdu_id(htt); if (msdu_id < 0) { spin_unlock_bh(&htt->tx_lock); res = msdu_id; goto err; } htt->pending_tx[msdu_id] = txdesc; spin_unlock_bh(&htt->tx_lock); res = ath10k_skb_map(dev, msdu); if (res) goto err; /* tx fragment list must be terminated with zero-entry */ skb_put(txfrag, frag_len); tx_frags = (struct htt_data_tx_desc_frag *)txfrag->data; tx_frags[0].paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr); tx_frags[0].len = __cpu_to_le32(msdu->len); tx_frags[1].paddr = __cpu_to_le32(0); tx_frags[1].len = __cpu_to_le32(0); res = ath10k_skb_map(dev, txfrag); if (res) goto err; ath10k_dbg(ATH10K_DBG_HTT, "txfrag 0x%llx msdu 0x%llx\n", (unsigned long long) ATH10K_SKB_CB(txfrag)->paddr, (unsigned long long) ATH10K_SKB_CB(msdu)->paddr); ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "txfrag: ", txfrag->data, frag_len); ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "msdu: ", msdu->data, msdu->len); skb_put(txdesc, desc_len); cmd = (struct htt_cmd *)txdesc->data; memset(cmd, 0, desc_len); tid = ATH10K_SKB_CB(msdu)->htt.tid; ath10k_dbg(ATH10K_DBG_HTT, "htt data tx using tid %hhu\n", tid); flags0 = 0; if (!ieee80211_has_protected(hdr->frame_control)) flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT; flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; flags0 |= SM(ATH10K_HW_TXRX_NATIVE_WIFI, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); flags1 = 0; flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID); flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID); frags_paddr = ATH10K_SKB_CB(txfrag)->paddr; cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM; cmd->data_tx.flags0 = flags0; cmd->data_tx.flags1 = __cpu_to_le16(flags1); cmd->data_tx.len = __cpu_to_le16(msdu->len); cmd->data_tx.id = __cpu_to_le16(msdu_id); cmd->data_tx.frags_paddr = __cpu_to_le32(frags_paddr); cmd->data_tx.peerid = __cpu_to_le32(HTT_INVALID_PEERID); memcpy(cmd->data_tx.prefetch, msdu->data, prefetch_len); /* refcount is decremented by HTC and HTT completions until it reaches * zero and is freed */ skb_cb = ATH10K_SKB_CB(txdesc); skb_cb->htt.msdu_id = msdu_id; skb_cb->htt.refcount = 2; skb_cb->htt.txfrag = txfrag; skb_cb->htt.msdu = msdu; res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc); if (res) goto err; return 0; err: if (txfrag) ath10k_skb_unmap(dev, txfrag); if (txdesc) dev_kfree_skb_any(txdesc); if (txfrag) dev_kfree_skb_any(txfrag); if (msdu_id >= 0) { spin_lock_bh(&htt->tx_lock); htt->pending_tx[msdu_id] = NULL; ath10k_htt_tx_free_msdu_id(htt, msdu_id); spin_unlock_bh(&htt->tx_lock); } ath10k_htt_tx_dec_pending(htt); ath10k_skb_unmap(dev, msdu); return res; }
int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) { struct device *dev = htt->ar->dev; struct ath10k_skb_cb *skb_cb; struct sk_buff *txdesc = NULL; struct htt_cmd *cmd; u8 vdev_id = ATH10K_SKB_CB(msdu)->htt.vdev_id; int len = 0; int msdu_id = -1; int res; res = ath10k_htt_tx_inc_pending(htt); if (res) return res; len += sizeof(cmd->hdr); len += sizeof(cmd->mgmt_tx); txdesc = ath10k_htc_alloc_skb(len); if (!txdesc) { res = -ENOMEM; goto err; } spin_lock_bh(&htt->tx_lock); msdu_id = ath10k_htt_tx_alloc_msdu_id(htt); if (msdu_id < 0) { spin_unlock_bh(&htt->tx_lock); res = msdu_id; goto err; } htt->pending_tx[msdu_id] = txdesc; spin_unlock_bh(&htt->tx_lock); res = ath10k_skb_map(dev, msdu); if (res) goto err; skb_put(txdesc, len); cmd = (struct htt_cmd *)txdesc->data; cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_MGMT_TX; cmd->mgmt_tx.msdu_paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr); cmd->mgmt_tx.len = __cpu_to_le32(msdu->len); cmd->mgmt_tx.desc_id = __cpu_to_le32(msdu_id); cmd->mgmt_tx.vdev_id = __cpu_to_le32(vdev_id); memcpy(cmd->mgmt_tx.hdr, msdu->data, min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN)); /* refcount is decremented by HTC and HTT completions until it reaches * zero and is freed */ skb_cb = ATH10K_SKB_CB(txdesc); skb_cb->htt.msdu_id = msdu_id; skb_cb->htt.refcount = 2; skb_cb->htt.msdu = msdu; res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc); if (res) goto err; return 0; err: ath10k_skb_unmap(dev, msdu); if (txdesc) dev_kfree_skb_any(txdesc); if (msdu_id >= 0) { spin_lock_bh(&htt->tx_lock); htt->pending_tx[msdu_id] = NULL; ath10k_htt_tx_free_msdu_id(htt, msdu_id); spin_unlock_bh(&htt->tx_lock); } ath10k_htt_tx_dec_pending(htt); return res; }
int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt) { struct sk_buff *skb; struct htt_cmd *cmd; struct htt_rx_ring_setup_ring *ring; const int num_rx_ring = 1; u16 flags; u32 fw_idx; int len; int ret; /* * the HW expects the buffer to be an integral number of 4-byte * "words" */ BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4)); BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0); len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup.hdr) + (sizeof(*ring) * num_rx_ring); skb = ath10k_htc_alloc_skb(len); if (!skb) return -ENOMEM; skb_put(skb, len); cmd = (struct htt_cmd *)skb->data; ring = &cmd->rx_setup.rings[0]; cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG; cmd->rx_setup.hdr.num_rings = 1; /* FIXME: do we need all of this? */ flags = 0; flags |= HTT_RX_RING_FLAGS_MAC80211_HDR; flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD; flags |= HTT_RX_RING_FLAGS_PPDU_START; flags |= HTT_RX_RING_FLAGS_PPDU_END; flags |= HTT_RX_RING_FLAGS_MPDU_START; flags |= HTT_RX_RING_FLAGS_MPDU_END; flags |= HTT_RX_RING_FLAGS_MSDU_START; flags |= HTT_RX_RING_FLAGS_MSDU_END; flags |= HTT_RX_RING_FLAGS_RX_ATTENTION; flags |= HTT_RX_RING_FLAGS_FRAG_INFO; flags |= HTT_RX_RING_FLAGS_UNICAST_RX; flags |= HTT_RX_RING_FLAGS_MULTICAST_RX; flags |= HTT_RX_RING_FLAGS_CTRL_RX; flags |= HTT_RX_RING_FLAGS_MGMT_RX; flags |= HTT_RX_RING_FLAGS_NULL_RX; flags |= HTT_RX_RING_FLAGS_PHY_DATA_RX; fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr); ring->fw_idx_shadow_reg_paddr = __cpu_to_le32(htt->rx_ring.alloc_idx.paddr); ring->rx_ring_base_paddr = __cpu_to_le32(htt->rx_ring.base_paddr); ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size); ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE); ring->flags = __cpu_to_le16(flags); ring->fw_idx_init_val = __cpu_to_le16(fw_idx); #define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4) ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status)); ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload)); ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start)); ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end)); ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start)); ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end)); ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start)); ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end)); ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention)); ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info)); #undef desc_offset ATH10K_SKB_CB(skb)->htt.is_conf = true; ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); if (ret) { dev_kfree_skb_any(skb); return ret; } return 0; }
int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) { struct ath10k *ar = htt->ar; struct device *dev = ar->dev; struct sk_buff *txdesc = NULL; struct htt_cmd *cmd; struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu); u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu); int len = 0; int msdu_id = -1; int res; int skb_len; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data; len += sizeof(cmd->hdr); len += sizeof(cmd->mgmt_tx); spin_lock_bh(&htt->tx_lock); res = ath10k_htt_tx_alloc_msdu_id(htt, msdu); spin_unlock_bh(&htt->tx_lock); if (res < 0) goto err; msdu_id = res; if ((ieee80211_is_action(hdr->frame_control) || ieee80211_is_deauth(hdr->frame_control) || ieee80211_is_disassoc(hdr->frame_control)) && ieee80211_has_protected(hdr->frame_control)) { skb_put(msdu, IEEE80211_CCMP_MIC_LEN); } txdesc = ath10k_htc_alloc_skb(ar, len); if (!txdesc) { res = -ENOMEM; goto err_free_msdu_id; } skb_len = msdu->len; skb_cb->paddr = dma_map_single(dev, msdu->data, skb_len, DMA_TO_DEVICE); res = dma_mapping_error(dev, skb_cb->paddr); if (res) { res = -EIO; goto err_free_txdesc; } skb_put(txdesc, len); cmd = (struct htt_cmd *)txdesc->data; memset(cmd, 0, len); cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_MGMT_TX; cmd->mgmt_tx.msdu_paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr); cmd->mgmt_tx.len = __cpu_to_le32(skb_len); cmd->mgmt_tx.desc_id = __cpu_to_le32(msdu_id); cmd->mgmt_tx.vdev_id = __cpu_to_le32(vdev_id); memcpy(cmd->mgmt_tx.hdr, msdu->data, min_t(int, skb_len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN)); res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc); if (res) goto err_unmap_msdu; #ifdef CONFIG_ATH10K_DEBUGFS ar->debug.tx_bytes += skb_len; #endif return 0; err_unmap_msdu: dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); err_free_txdesc: dev_kfree_skb_any(txdesc); err_free_msdu_id: spin_lock_bh(&htt->tx_lock); ath10k_htt_tx_free_msdu_id(htt, msdu_id); spin_unlock_bh(&htt->tx_lock); err: return res; }
int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) { struct ath10k *ar = htt->ar; struct device *dev = ar->dev; struct sk_buff *txdesc = NULL; struct htt_cmd *cmd; struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu); u8 vdev_id = skb_cb->vdev_id; int len = 0; int msdu_id = -1; int res; res = ath10k_htt_tx_inc_pending(htt); if (res) goto err; len += sizeof(cmd->hdr); len += sizeof(cmd->mgmt_tx); spin_lock_bh(&htt->tx_lock); res = ath10k_htt_tx_alloc_msdu_id(htt); if (res < 0) { spin_unlock_bh(&htt->tx_lock); goto err_tx_dec; } msdu_id = res; htt->pending_tx[msdu_id] = msdu; spin_unlock_bh(&htt->tx_lock); txdesc = ath10k_htc_alloc_skb(ar, len); if (!txdesc) { res = -ENOMEM; goto err_free_msdu_id; } skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len, DMA_TO_DEVICE); res = dma_mapping_error(dev, skb_cb->paddr); if (res) goto err_free_txdesc; skb_put(txdesc, len); cmd = (struct htt_cmd *)txdesc->data; cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_MGMT_TX; cmd->mgmt_tx.msdu_paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr); cmd->mgmt_tx.len = __cpu_to_le32(msdu->len); cmd->mgmt_tx.desc_id = __cpu_to_le32(msdu_id); cmd->mgmt_tx.vdev_id = __cpu_to_le32(vdev_id); memcpy(cmd->mgmt_tx.hdr, msdu->data, min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN)); skb_cb->htt.txbuf = NULL; res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc); if (res) goto err_unmap_msdu; return 0; err_unmap_msdu: dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); err_free_txdesc: dev_kfree_skb_any(txdesc); err_free_msdu_id: spin_lock_bh(&htt->tx_lock); htt->pending_tx[msdu_id] = NULL; ath10k_htt_tx_free_msdu_id(htt, msdu_id); spin_unlock_bh(&htt->tx_lock); err_tx_dec: ath10k_htt_tx_dec_pending(htt); err: return res; }
int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct athp_buf *msdu) { #if 0 struct ath10k *ar = htt->ar; // struct device *dev = ar->sc_dev; struct athp_buf *txdesc = NULL; struct htt_cmd *cmd; struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu); u8 vdev_id = skb_cb->vdev_id; int len = 0; int msdu_id = -1; int res; res = ath10k_htt_tx_inc_pending(htt); if (res) goto err; len += sizeof(cmd->hdr); len += sizeof(cmd->mgmt_tx); ATHP_HTT_TX_LOCK(htt); /* XXX note: we're specifically trying to store athp_buf's in the idr */ res = ath10k_htt_tx_alloc_msdu_id(htt, msdu); ATHP_HTT_TX_UNLOCK(htt); if (res < 0) { goto err_tx_dec; } msdu_id = res; txdesc = ath10k_htc_alloc_skb(ar, len); if (!txdesc) { res = -ENOMEM; goto err_free_msdu_id; } /* * load/sync happens here for the msdu contents. * Then, the command that's allocated below will get * load/sync in the HTC layer. */ /* XXX TODO: ADRIAN: figure out what I'm missing! */ res = athp_dma_mbuf_load(ar, &ar->buf_tx.dh, &msdu->mb, msdu->m); if (res) { res = -EIO; goto err_free_txdesc; } /* Ok, we're not modifying the msdu further, so sync here */ athp_dma_mbuf_pre_xmit(ar, &ar->buf_tx.dh, &msdu->mb); mbuf_skb_put(txdesc->m, len); cmd = (struct htt_cmd *)mbuf_skb_data(txdesc->m); memset(cmd, 0, len); cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_MGMT_TX; cmd->mgmt_tx.msdu_paddr = __cpu_to_le32(msdu->mb.paddr); cmd->mgmt_tx.len = __cpu_to_le32(mbuf_skb_len(msdu->m)); cmd->mgmt_tx.desc_id = __cpu_to_le32(msdu_id); cmd->mgmt_tx.vdev_id = __cpu_to_le32(vdev_id); memcpy(cmd->mgmt_tx.hdr, mbuf_skb_data(msdu->m), min_t(int, mbuf_skb_len(msdu->m), HTT_MGMT_FRM_HDR_DOWNLOAD_LEN)); skb_cb->htt.txbuf = NULL; res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc); if (res) goto err_unmap_msdu; return 0; err_unmap_msdu: athp_dma_mbuf_unload(ar, &ar->buf_tx.dh, &msdu->mb); err_free_txdesc: athp_freebuf(ar, &ar->buf_tx, txdesc); err_free_msdu_id: ATHP_HTT_TX_LOCK(htt); ath10k_htt_tx_free_msdu_id(htt, msdu_id); ATHP_HTT_TX_UNLOCK(htt); err_tx_dec: ath10k_htt_tx_dec_pending(htt); err: return res; #else device_printf(htt->ar->sc_dev, "%s; TODO implement!\n", __func__); return (-EINVAL); #endif }
int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) { struct ath10k *ar = htt->ar; struct device *dev = ar->dev; struct sk_buff *txdesc = NULL; struct htt_cmd *cmd; struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu); u8 vdev_id = skb_cb->vdev_id; int len = 0; int msdu_id = -1; int res; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data; bool limit_mgmt_desc = false; bool is_probe_resp = false; if (ar->hw_params.max_probe_resp_desc_thres) { limit_mgmt_desc = true; if (ieee80211_is_probe_resp(hdr->frame_control)) is_probe_resp = true; } res = ath10k_htt_tx_inc_pending(htt, limit_mgmt_desc, is_probe_resp); if (res) goto err; len += sizeof(cmd->hdr); len += sizeof(cmd->mgmt_tx); spin_lock_bh(&htt->tx_lock); res = ath10k_htt_tx_alloc_msdu_id(htt, msdu); spin_unlock_bh(&htt->tx_lock); if (res < 0) { goto err_tx_dec; } msdu_id = res; txdesc = ath10k_htc_alloc_skb(ar, len); if (!txdesc) { res = -ENOMEM; goto err_free_msdu_id; } skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len, DMA_TO_DEVICE); res = dma_mapping_error(dev, skb_cb->paddr); if (res) { res = -EIO; goto err_free_txdesc; } skb_put(txdesc, len); cmd = (struct htt_cmd *)txdesc->data; memset(cmd, 0, len); cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_MGMT_TX; cmd->mgmt_tx.msdu_paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr); cmd->mgmt_tx.len = __cpu_to_le32(msdu->len); cmd->mgmt_tx.desc_id = __cpu_to_le32(msdu_id); cmd->mgmt_tx.vdev_id = __cpu_to_le32(vdev_id); memcpy(cmd->mgmt_tx.hdr, msdu->data, min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN)); skb_cb->htt.txbuf = NULL; res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc); if (res) goto err_unmap_msdu; return 0; err_unmap_msdu: dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); err_free_txdesc: dev_kfree_skb_any(txdesc); err_free_msdu_id: spin_lock_bh(&htt->tx_lock); ath10k_htt_tx_free_msdu_id(htt, msdu_id); spin_unlock_bh(&htt->tx_lock); err_tx_dec: ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc); err: return res; }
int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) { struct device *dev = htt->ar->dev; struct htt_cmd *cmd; struct htt_data_tx_desc_frag *tx_frags; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data; struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu); struct sk_buff *txdesc = NULL; bool use_frags; u8 vdev_id = ATH10K_SKB_CB(msdu)->vdev_id; u8 tid; int prefetch_len, desc_len; int msdu_id = -1; int res; u8 flags0; u16 flags1; res = ath10k_htt_tx_inc_pending(htt); if (res) goto err; spin_lock_bh(&htt->tx_lock); res = ath10k_htt_tx_alloc_msdu_id(htt); if (res < 0) { spin_unlock_bh(&htt->tx_lock); goto err_tx_dec; } msdu_id = res; htt->pending_tx[msdu_id] = msdu; spin_unlock_bh(&htt->tx_lock); prefetch_len = min(htt->prefetch_len, msdu->len); prefetch_len = roundup(prefetch_len, 4); desc_len = sizeof(cmd->hdr) + sizeof(cmd->data_tx) + prefetch_len; txdesc = ath10k_htc_alloc_skb(desc_len); if (!txdesc) { res = -ENOMEM; goto err_free_msdu_id; } /* Since HTT 3.0 there is no separate mgmt tx command. However in case * of mgmt tx using TX_FRM there is not tx fragment list. Instead of tx * fragment list host driver specifies directly frame pointer. */ use_frags = htt->target_version_major < 3 || !ieee80211_is_mgmt(hdr->frame_control); if (!IS_ALIGNED((unsigned long)txdesc->data, 4)) { ath10k_warn("htt alignment check failed. dropping packet.\n"); res = -EIO; goto err_free_txdesc; } if (use_frags) { skb_cb->htt.frag_len = sizeof(*tx_frags) * 2; skb_cb->htt.pad_len = (unsigned long)msdu->data - round_down((unsigned long)msdu->data, 4); skb_push(msdu, skb_cb->htt.frag_len + skb_cb->htt.pad_len); } else { skb_cb->htt.frag_len = 0; skb_cb->htt.pad_len = 0; } res = ath10k_skb_map(dev, msdu); if (res) goto err_pull_txfrag; if (use_frags) { dma_sync_single_for_cpu(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); /* tx fragment list must be terminated with zero-entry */ tx_frags = (struct htt_data_tx_desc_frag *)msdu->data; tx_frags[0].paddr = __cpu_to_le32(skb_cb->paddr + skb_cb->htt.frag_len + skb_cb->htt.pad_len); tx_frags[0].len = __cpu_to_le32(msdu->len - skb_cb->htt.frag_len - skb_cb->htt.pad_len); tx_frags[1].paddr = __cpu_to_le32(0); tx_frags[1].len = __cpu_to_le32(0); dma_sync_single_for_device(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); } ath10k_dbg(ATH10K_DBG_HTT, "msdu 0x%llx\n", (unsigned long long) ATH10K_SKB_CB(msdu)->paddr); ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "msdu: ", msdu->data, msdu->len); skb_put(txdesc, desc_len); cmd = (struct htt_cmd *)txdesc->data; tid = ATH10K_SKB_CB(msdu)->htt.tid; ath10k_dbg(ATH10K_DBG_HTT, "htt data tx using tid %hhu\n", tid); flags0 = 0; if (!ieee80211_has_protected(hdr->frame_control)) flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT; flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; if (use_frags) flags0 |= SM(ATH10K_HW_TXRX_NATIVE_WIFI, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); else flags0 |= SM(ATH10K_HW_TXRX_MGMT, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); flags1 = 0; flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID); flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID); flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD; flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD; cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM; cmd->data_tx.flags0 = flags0; cmd->data_tx.flags1 = __cpu_to_le16(flags1); cmd->data_tx.len = __cpu_to_le16(msdu->len - skb_cb->htt.frag_len - skb_cb->htt.pad_len); cmd->data_tx.id = __cpu_to_le16(msdu_id); cmd->data_tx.frags_paddr = __cpu_to_le32(skb_cb->paddr); cmd->data_tx.peerid = __cpu_to_le32(HTT_INVALID_PEERID); memcpy(cmd->data_tx.prefetch, hdr, prefetch_len); res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc); if (res) goto err_unmap_msdu; return 0; err_unmap_msdu: ath10k_skb_unmap(dev, msdu); err_pull_txfrag: skb_pull(msdu, skb_cb->htt.frag_len + skb_cb->htt.pad_len); err_free_txdesc: dev_kfree_skb_any(txdesc); err_free_msdu_id: spin_lock_bh(&htt->tx_lock); htt->pending_tx[msdu_id] = NULL; ath10k_htt_tx_free_msdu_id(htt, msdu_id); spin_unlock_bh(&htt->tx_lock); err_tx_dec: ath10k_htt_tx_dec_pending(htt); err: return res; }