static void ath10k_htt_tx_cleanup_pending(struct ath10k_htt *htt) { struct sk_buff *txdesc; int msdu_id; /* No locks needed. Called after communication with the device has * been stopped. */ for (msdu_id = 0; msdu_id < htt->max_num_pending_tx; msdu_id++) { if (!test_bit(msdu_id, htt->used_msdu_ids)) continue; txdesc = htt->pending_tx[msdu_id]; if (!txdesc) continue; ath10k_dbg(ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n", msdu_id); if (ATH10K_SKB_CB(txdesc)->htt.refcount > 0) ATH10K_SKB_CB(txdesc)->htt.refcount = 1; ATH10K_SKB_CB(txdesc)->htt.discard = true; ath10k_txrx_tx_unref(htt, txdesc); } }
static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx) { struct ath10k *ar = ctx; struct ath10k_htt *htt = &ar->htt; struct htt_tx_done tx_done = {0}; ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n", msdu_id); tx_done.msdu_id = msdu_id; tx_done.status = HTT_TX_COMPL_STATE_DISCARD; ath10k_txrx_tx_unref(htt, &tx_done); return 0; }
static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *pbuf, void *ctx) { struct ath10k *ar = ctx; struct ath10k_htt *htt = &ar->htt; struct htt_tx_done tx_done = {0}; ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %u\n", (unsigned int) msdu_id); tx_done.discard = 1; tx_done.msdu_id = msdu_id; ath10k_txrx_tx_unref(htt, &tx_done); return 0; }
static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx) { struct ath10k *ar = ctx; struct ath10k_htt *htt = &ar->htt; struct htt_tx_done tx_done = {0}; ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n", msdu_id); tx_done.discard = 1; tx_done.msdu_id = msdu_id; spin_lock_bh(&htt->tx_lock); ath10k_txrx_tx_unref(htt, &tx_done); spin_unlock_bh(&htt->tx_lock); return 0; }
static void ath10k_htt_tx_cleanup_pending(struct ath10k_htt *htt) { struct htt_tx_done tx_done = {0}; int msdu_id; spin_lock_bh(&htt->tx_lock); for (msdu_id = 0; msdu_id < htt->max_num_pending_tx; msdu_id++) { if (!test_bit(msdu_id, htt->used_msdu_ids)) continue; ath10k_dbg(ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n", msdu_id); tx_done.discard = 1; tx_done.msdu_id = msdu_id; ath10k_txrx_tx_unref(htt, &tx_done); } spin_unlock_bh(&htt->tx_lock); }
static void ath10k_htt_tx_cleanup_pending(struct ath10k_htt *htt) { struct htt_tx_done tx_done = {0}; int msdu_id; /* No locks needed. Called after communication with the device has * been stopped. */ for (msdu_id = 0; msdu_id < htt->max_num_pending_tx; msdu_id++) { if (!test_bit(msdu_id, htt->used_msdu_ids)) continue; ath10k_dbg(ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n", msdu_id); tx_done.discard = 1; tx_done.msdu_id = msdu_id; ath10k_txrx_tx_unref(htt, &tx_done); } }
void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb) { struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb); struct ath10k_htt *htt = &ar->htt; if (skb_cb->htt.is_conf) { dev_kfree_skb_any(skb); return; } if (skb_cb->is_aborted) { skb_cb->htt.discard = true; /* if the skbuff is aborted we need to make sure we'll free up * the tx resources, we can't simply run tx_unref() 2 times * because if htt tx completion came in earlier we'd access * unallocated memory */ if (skb_cb->htt.refcount > 1) skb_cb->htt.refcount = 1; } ath10k_txrx_tx_unref(htt, skb); }