/** * htt_tx_get_paddr() - get physical address for htt desc * * Get HTT descriptor physical address from virtaul address * Find page first and find offset * * Return: Physical address of descriptor */ static cdf_dma_addr_t htt_tx_get_paddr(htt_pdev_handle pdev, char *target_vaddr) { uint16_t i; struct cdf_mem_dma_page_t *page_info = NULL; uint64_t offset; for (i = 0; i < pdev->tx_descs.desc_pages.num_pages; i++) { page_info = pdev->tx_descs.desc_pages.dma_pages + i; if (!page_info->page_v_addr_start) { cdf_assert(0); return 0; } if ((target_vaddr >= page_info->page_v_addr_start) && (target_vaddr <= page_info->page_v_addr_end)) break; } if (!page_info) { TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "invalid page_info"); return 0; } offset = (uint64_t)(target_vaddr - page_info->page_v_addr_start); return page_info->page_p_addr + offset; }
static inline void ol_tx_desc_compute_delay(struct ol_tx_desc_t *tx_desc) { if (tx_desc->entry_timestamp_ticks != 0xffffffff) { TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "%s Timestamp:0x%x\n", __func__, tx_desc->entry_timestamp_ticks); cdf_assert(0); } tx_desc->entry_timestamp_ticks = cdf_system_ticks(); }
static inline void ol_tx_desc_sanity_checks(struct ol_txrx_pdev_t *pdev, struct ol_tx_desc_t *tx_desc) { if (tx_desc->pkt_type != 0xff) { TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "%s Potential tx_desc corruption pkt_type:0x%x pdev:0x%p", __func__, tx_desc->pkt_type, pdev); cdf_assert(0); } if ((uint32_t *) tx_desc->htt_tx_desc < g_dbg_htt_desc_start_addr || (uint32_t *) tx_desc->htt_tx_desc > g_dbg_htt_desc_end_addr) { TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "%s Potential htt_desc curruption:0x%p pdev:0x%p\n", __func__, tx_desc->htt_tx_desc, pdev); cdf_assert(0); } }
static inline struct ol_tx_desc_t * ol_tx_desc_alloc(struct ol_txrx_pdev_t *pdev, struct ol_txrx_vdev_t *vdev) { struct ol_tx_desc_t *tx_desc = NULL; adf_os_spin_lock_bh(&pdev->tx_mutex); if (pdev->tx_desc.freelist) { pdev->tx_desc.num_free--; tx_desc = &pdev->tx_desc.freelist->tx_desc; pdev->tx_desc.freelist = pdev->tx_desc.freelist->next; #ifdef QCA_SUPPORT_TXDESC_SANITY_CHECKS if (tx_desc->pkt_type != ol_tx_frm_freed #ifdef QCA_COMPUTE_TX_DELAY || tx_desc->entry_timestamp_ticks != 0xffffffff #endif ) { TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "%s Potential tx_desc corruption pkt_type:0x%x pdev:0x%p", __func__, tx_desc->pkt_type, pdev); #ifdef QCA_COMPUTE_TX_DELAY TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "%s Timestamp:0x%x\n", __func__, tx_desc->entry_timestamp_ticks); #endif adf_os_assert(0); } #endif } adf_os_spin_unlock_bh(&pdev->tx_mutex); if (!tx_desc) { return NULL; } tx_desc->vdev = vdev; #if defined(CONFIG_PER_VDEV_TX_DESC_POOL) adf_os_atomic_inc(&vdev->tx_desc_count); #endif OL_TX_TIMESTAMP_SET(tx_desc); return tx_desc; }
/** * htt_tx_frag_desc_attach() - Attach fragment descriptor * @pdev: htt device instance pointer * @desc_pool_elems: Number of fragment descriptor * * This function will allocate fragment descriptor * * Return: 0 success */ static int htt_tx_frag_desc_attach(struct htt_pdev_t *pdev, uint16_t desc_pool_elems) { pdev->frag_descs.pool_elems = desc_pool_elems; cdf_mem_multi_pages_alloc(pdev->osdev, &pdev->frag_descs.desc_pages, pdev->frag_descs.size, desc_pool_elems, cdf_get_dma_mem_context((&pdev->frag_descs), memctx), false); if ((0 == pdev->frag_descs.desc_pages.num_pages) || (NULL == pdev->frag_descs.desc_pages.dma_pages)) { TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "FRAG descriptor alloc fail"); return -ENOBUFS; } return 0; }
adf_nbuf_t ol_rx_pn_check_base( struct ol_txrx_vdev_t *vdev, struct ol_txrx_peer_t *peer, unsigned tid, adf_nbuf_t msdu_list) { struct ol_txrx_pdev_t *pdev = vdev->pdev; union htt_rx_pn_t *last_pn; adf_nbuf_t out_list_head = NULL; adf_nbuf_t out_list_tail = NULL; adf_nbuf_t mpdu; int index; /* unicast vs. multicast */ int pn_len; void *rx_desc; int last_pn_valid; /* Make sure host pn check is not redundant */ if ((adf_os_atomic_read(&peer->fw_pn_check)) || (vdev->opmode == wlan_op_mode_ibss)) { return msdu_list; } /* First, check whether the PN check applies */ rx_desc = htt_rx_msdu_desc_retrieve(pdev->htt_pdev, msdu_list); adf_os_assert(htt_rx_msdu_has_wlan_mcast_flag(pdev->htt_pdev, rx_desc)); index = htt_rx_msdu_is_wlan_mcast(pdev->htt_pdev, rx_desc) ? txrx_sec_mcast : txrx_sec_ucast; pn_len = pdev->rx_pn[peer->security[index].sec_type].len; if (pn_len == 0) { return msdu_list; } last_pn_valid = peer->tids_last_pn_valid[tid]; last_pn = &peer->tids_last_pn[tid]; mpdu = msdu_list; while (mpdu) { adf_nbuf_t mpdu_tail, next_mpdu; union htt_rx_pn_t new_pn; int pn_is_replay = 0; rx_desc = htt_rx_msdu_desc_retrieve(pdev->htt_pdev, mpdu); /* * Find the last MSDU within this MPDU, and * the find the first MSDU within the next MPDU. */ ol_rx_mpdu_list_next(pdev, mpdu, &mpdu_tail, &next_mpdu); /* Don't check the PN replay for non-encrypted frames */ if (!htt_rx_mpdu_is_encrypted(pdev->htt_pdev, rx_desc)) { ADD_MPDU_TO_LIST(out_list_head, out_list_tail, mpdu, mpdu_tail); mpdu = next_mpdu; continue; } /* retrieve PN from rx descriptor */ htt_rx_mpdu_desc_pn(pdev->htt_pdev, rx_desc, &new_pn, pn_len); /* if there was no prior PN, there's nothing to check */ if (last_pn_valid) { pn_is_replay = pdev->rx_pn[peer->security[index].sec_type].cmp( &new_pn, last_pn, index == txrx_sec_ucast, vdev->opmode); } else { last_pn_valid = peer->tids_last_pn_valid[tid] = 1; } if (pn_is_replay) { adf_nbuf_t msdu; static u_int32_t last_pncheck_print_time = 0; int log_level; u_int32_t current_time_ms; /* * This MPDU failed the PN check: * 1. Notify the control SW of the PN failure * (so countermeasures can be taken, if necessary) * 2. Discard all the MSDUs from this MPDU. */ msdu = mpdu; current_time_ms = adf_os_ticks_to_msecs(adf_os_ticks()); if (TXRX_PN_CHECK_FAILURE_PRINT_PERIOD_MS < (current_time_ms - last_pncheck_print_time)) { last_pncheck_print_time = current_time_ms; log_level = TXRX_PRINT_LEVEL_WARN; } else { log_level = TXRX_PRINT_LEVEL_INFO2; } TXRX_PRINT(log_level, "PN check failed - TID %d, peer %p " "(%02x:%02x:%02x:%02x:%02x:%02x) %s\n" " old PN (u64 x2)= 0x%08llx %08llx (LSBs = %lld)\n" " new PN (u64 x2)= 0x%08llx %08llx (LSBs = %lld)\n" " new seq num = %d\n", tid, peer, peer->mac_addr.raw[0], peer->mac_addr.raw[1], peer->mac_addr.raw[2], peer->mac_addr.raw[3], peer->mac_addr.raw[4], peer->mac_addr.raw[5], (index == txrx_sec_ucast) ? "ucast" : "mcast", last_pn->pn128[1], last_pn->pn128[0], last_pn->pn128[0] & 0xffffffffffffULL, new_pn.pn128[1], new_pn.pn128[0], new_pn.pn128[0] & 0xffffffffffffULL, htt_rx_mpdu_desc_seq_num(pdev->htt_pdev, rx_desc)); #if defined(ENABLE_RX_PN_TRACE) ol_rx_pn_trace_display(pdev, 1); #endif /* ENABLE_RX_PN_TRACE */ ol_rx_err( pdev->ctrl_pdev, vdev->vdev_id, peer->mac_addr.raw, tid, htt_rx_mpdu_desc_tsf32(pdev->htt_pdev, rx_desc), OL_RX_ERR_PN, mpdu, NULL, 0); /* free all MSDUs within this MPDU */ do { adf_nbuf_t next_msdu; OL_RX_ERR_STATISTICS_1(pdev, vdev, peer, rx_desc, OL_RX_ERR_PN); next_msdu = adf_nbuf_next(msdu); htt_rx_desc_frame_free(pdev->htt_pdev, msdu); if (msdu == mpdu_tail) { break; } else { msdu = next_msdu; } } while (1); } else { ADD_MPDU_TO_LIST(out_list_head, out_list_tail, mpdu, mpdu_tail); /* * Remember the new PN. * For simplicity, just do 2 64-bit word copies to cover the worst * case (WAPI), regardless of the length of the PN. * This is more efficient than doing a conditional branch to copy * only the relevant portion. */ last_pn->pn128[0] = new_pn.pn128[0]; last_pn->pn128[1] = new_pn.pn128[1]; OL_RX_PN_TRACE_ADD(pdev, peer, tid, rx_desc); } mpdu = next_mpdu; } /* make sure the list is null-terminated */ if (out_list_tail) { adf_nbuf_set_next(out_list_tail, NULL); } return out_list_head; }
adf_nbuf_t ol_rx_pn_check_base( struct ol_txrx_vdev_t *vdev, struct ol_txrx_peer_t *peer, unsigned tid, adf_nbuf_t msdu_list) { struct ol_txrx_pdev_t *pdev = vdev->pdev; union htt_rx_pn_t *last_pn, *global_pn, *suspect_pn; adf_nbuf_t out_list_head = NULL; adf_nbuf_t out_list_tail = NULL; adf_nbuf_t mpdu; int index; /* unicast vs. multicast */ int pn_len; void *rx_desc; int last_pn_valid; /* First, check whether the PN check applies */ rx_desc = htt_rx_msdu_desc_retrieve(pdev->htt_pdev, msdu_list); adf_os_assert(htt_rx_msdu_has_wlan_mcast_flag(pdev->htt_pdev, rx_desc)); index = htt_rx_msdu_is_wlan_mcast(pdev->htt_pdev, rx_desc) ? txrx_sec_mcast : txrx_sec_ucast; pn_len = pdev->rx_pn[peer->security[index].sec_type].len; if (pn_len == 0) { return msdu_list; } last_pn_valid = peer->tids_last_pn_valid[tid]; last_pn = &peer->tids_last_pn[tid]; global_pn = &peer->global_pn; suspect_pn = &peer->tids_suspect_pn[tid]; mpdu = msdu_list; while (mpdu) { adf_nbuf_t mpdu_tail, next_mpdu; union htt_rx_pn_t new_pn; int pn_is_replay = 0, update_last_pn = 1; #if ATH_SUPPORT_WAPI bool is_mpdu_encrypted = 0; bool is_unencrypted_pkt_wai = 0; #endif rx_desc = htt_rx_msdu_desc_retrieve(pdev->htt_pdev, mpdu); /* * Find the last MSDU within this MPDU, and * the find the first MSDU within the next MPDU. */ ol_rx_mpdu_list_next(pdev, mpdu, &mpdu_tail, &next_mpdu); #if ATH_SUPPORT_WAPI /* Don't check the PN replay for non-encrypted frames or if this is a WAI packet */ is_mpdu_encrypted = htt_rx_mpdu_is_encrypted(pdev->htt_pdev, rx_desc); is_unencrypted_pkt_wai = is_mpdu_encrypted ? false : vdev->osif_check_wai(vdev->osif_vdev, mpdu, mpdu_tail); if ((!vdev->drop_unenc && !is_mpdu_encrypted) || is_unencrypted_pkt_wai) { #else /* Don't check the PN replay for non-encrypted frames */ if (!vdev->drop_unenc && !htt_rx_mpdu_is_encrypted(pdev->htt_pdev, rx_desc)) { #endif ADD_MPDU_TO_LIST(out_list_head, out_list_tail, mpdu, mpdu_tail); mpdu = next_mpdu; continue; } /* retrieve PN from rx descriptor */ htt_rx_mpdu_desc_pn(pdev->htt_pdev, rx_desc, &new_pn, pn_len); /* if there was no prior PN, there's nothing to check */ if (last_pn_valid) { pn_is_replay = pdev->rx_pn[peer->security[index].sec_type].cmp( &new_pn, last_pn, index == txrx_sec_ucast, vdev->opmode); } else if (peer->authorize) { last_pn_valid = peer->tids_last_pn_valid[tid] = 1; } if (peer->authorize && peer->security[index].sec_type == htt_sec_type_aes_ccmp) { if ((new_pn.pn48 & 0xffffffffffffULL) > ((last_pn->pn48 + MAX_CCMP_PN_GAP_ERR_CHECK) & 0xffffffffffffULL)) { /* PN jump wrt last_pn is > MAX_CCMP_PN_GAP_ERR_CHECK - PN of current frame is suspected */ if (suspect_pn->pn48) { /* Check whether PN of the current frame is following prev PN seq or not */ if ((new_pn.pn48 & 0xffffffffffffULL) < (suspect_pn->pn48 & 0xffffffffffffULL)) { /* * PN number of the curr frame < PN no of prev rxed frame * As we are not sure about prev suspect PN, to detect replay, * check the current PN with global PN */ if ((new_pn.pn48 & 0xffffffffffffULL) < (global_pn->pn48 & 0xffffffffffffULL)) { /* Replay violation */ pn_is_replay = 1; } else { /* Current PN is following global PN, so mark this as suspected PN * Don't update last_pn & global_pn */ suspect_pn->pn128[0] = new_pn.pn128[0]; suspect_pn->pn128[1] = new_pn.pn128[1]; update_last_pn = 0; } } else if ((new_pn.pn48 & 0xffffffffffffULL) < ((suspect_pn->pn48 + MAX_CCMP_PN_GAP_ERR_CHECK) & 0xffffffffffffULL)) { /* Current PN is following prev suspected PN seq * Update last_pn & global_pn (update_last_pn = 1;) */ } else { /* * Current PN is neither following prev suspected PN nor last_pn * Mark this as new suspect and don't update last_pn & global_pn */ suspect_pn->pn128[0] = new_pn.pn128[0]; suspect_pn->pn128[1] = new_pn.pn128[1]; update_last_pn = 0; } } else { /* New Jump in PN observed * So mark this PN as suspected and don't update last_pn/global_pn */ suspect_pn->pn128[0] = new_pn.pn128[0]; suspect_pn->pn128[1] = new_pn.pn128[1]; update_last_pn = 0; } } else { /* Valid PN, update last_pn & global_pn (update_last_pn = 1;) */ } } if (pn_is_replay) { adf_nbuf_t msdu; /* * This MPDU failed the PN check: * 1. Notify the control SW of the PN failure * (so countermeasures can be taken, if necessary) * 2. Discard all the MSDUs from this MPDU. */ msdu = mpdu; TXRX_PRINT(TXRX_PRINT_LEVEL_INFO1, "PN check failed on offload path- TID %d, peer %p " "(%02x:%02x:%02x:%02x:%02x:%02x) %s\n" " old PN (u64 x2)= 0x%08llx %08llx (LSBs = %lld)\n" " new PN (u64 x2)= 0x%08llx %08llx (LSBs = %lld)\n" " global PN (u64 x2)= 0x%08llx %08llx (LSBs = %lld)\n" " suspect PN (u64 x2)= 0x%08llx %08llx (LSBs = %lld)\n" #if RX_DEBUG " htt_status = %d\n" #endif " prev seq num = %d\n", " new seq num = %d\n", tid, peer, peer->mac_addr.raw[0], peer->mac_addr.raw[1], peer->mac_addr.raw[2], peer->mac_addr.raw[3], peer->mac_addr.raw[4], peer->mac_addr.raw[5], (index == txrx_sec_ucast) ? "ucast" : "mcast", last_pn->pn128[1], last_pn->pn128[0], last_pn->pn128[0] & 0xffffffffffffULL, new_pn.pn128[1], new_pn.pn128[0], new_pn.pn128[0] & 0xffffffffffffULL, global_pn->pn128[1], global_pn->pn128[0], global_pn->pn128[0] & 0xffffffffffffULL, suspect_pn->pn128[1], suspect_pn->pn128[0], suspect_pn->pn128[0] & 0xffffffffffffULL, #if RX_DEBUG htt_rx_mpdu_status(pdev->htt_pdev), #endif peer->tids_last_seq[tid], htt_rx_mpdu_desc_seq_num(pdev->htt_pdev, rx_desc)); ol_rx_pn_trace_display(pdev, 1); ol_rx_err( pdev->ctrl_pdev, vdev->vdev_id, peer->mac_addr.raw, tid, htt_rx_mpdu_desc_tsf32(pdev->htt_pdev, rx_desc), OL_RX_ERR_PN, mpdu); /* free all MSDUs within this MPDU */ do { adf_nbuf_t next_msdu; next_msdu = adf_nbuf_next(msdu); htt_rx_desc_frame_free(pdev->htt_pdev, msdu); if (msdu == mpdu_tail) { break; } else { msdu = next_msdu; } } while (1); } else { ADD_MPDU_TO_LIST(out_list_head, out_list_tail, mpdu, mpdu_tail); if(peer->authorize) { /* * Remember the new PN. * For simplicity, just do 2 64-bit word copies to cover the worst * case (WAPI), regardless of the length of the PN. * This is more efficient than doing a conditional branch to copy * only the relevant portion. */ if (update_last_pn) { last_pn->pn128[0] = new_pn.pn128[0]; last_pn->pn128[1] = new_pn.pn128[1]; global_pn->pn128[0] = new_pn.pn128[0]; global_pn->pn128[1] = new_pn.pn128[1]; suspect_pn->pn128[0] = 0; suspect_pn->pn128[1] = 0; } OL_RX_PN_TRACE_ADD(pdev, peer, tid, rx_desc); } } mpdu = next_mpdu; } /* make sure the list is null-terminated */ if (out_list_tail) { adf_nbuf_set_next(out_list_tail, NULL); } return out_list_head; } void ol_rx_pn_check( struct ol_txrx_vdev_t *vdev, struct ol_txrx_peer_t *peer, unsigned tid, adf_nbuf_t msdu_list) { msdu_list = ol_rx_pn_check_base(vdev, peer, tid, msdu_list); ol_rx_fwd_check(vdev, peer, tid, msdu_list); }
/* * Porting from Ap11PrepareForwardedPacket. * This routine is called when a RX data frame from an associated station is * to be forwarded to another associated station. We will prepare the * received packet so that it is suitable for transmission again. * Check that this Packet is suitable for forwarding. If yes, then * prepare the new 802.11 header. */ static inline void ol_ap_fwd_check(struct ol_txrx_vdev_t *vdev, adf_nbuf_t msdu) { struct ieee80211_frame *mac_header; unsigned char tmp_addr[6]; unsigned char type; unsigned char subtype; unsigned char fromds; unsigned char tods; mac_header = (struct ieee80211_frame *) (adf_nbuf_data(msdu)); TXRX_ASSERT1(mac_header); type = mac_header->i_fc[0] & IEEE80211_FC0_TYPE_MASK; subtype = mac_header->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; tods = mac_header->i_fc[1] & IEEE80211_FC1_DIR_TODS; fromds = mac_header->i_fc[1] & IEEE80211_FC1_DIR_FROMDS; /* * Make sure no QOS or any other non-data subtype * Should be a ToDs data frame. * Make sure that this frame is unicast and not for us. * These packets should come up through the normal rx path and not forwarded. */ if (type != IEEE80211_FC0_TYPE_DATA || subtype != 0x0 || ((tods != 1) || (fromds != 0)) || (adf_os_mem_cmp( mac_header->i_addr3, vdev->mac_addr.raw, IEEE80211_ADDR_LEN) == 0)) { #ifdef DEBUG_HOST_RC TXRX_PRINT(TXRX_PRINT_LEVEL_INFO1, "Exit: %s | Unnecessary to adjust mac header\n", __func__); #endif } else { // Flip the ToDs bit to FromDs mac_header->i_fc[1] &= 0xfe; mac_header->i_fc[1] |= 0x2; /* * Flip the addresses * (ToDs, addr1, RA=BSSID) move to (FrDs, addr2, TA=BSSID) * (ToDs, addr2, SA) move to (FrDs, addr3, SA) * (ToDs, addr3, DA) move to (FrDs, addr1, DA) */ memcpy(tmp_addr, mac_header->i_addr2, sizeof (tmp_addr)); memcpy(mac_header->i_addr2, mac_header->i_addr1, sizeof (tmp_addr)); memcpy(mac_header->i_addr1, mac_header->i_addr3, sizeof (tmp_addr)); memcpy(mac_header->i_addr3, tmp_addr, sizeof (tmp_addr)); } }
/** * htt_tx_attach() - Attach HTT device instance * @pdev: htt device instance pointer * @desc_pool_elems: Number of TX descriptors * * This function will allocate HTT TX resources * * Return: 0 Success */ int htt_tx_attach(struct htt_pdev_t *pdev, int desc_pool_elems) { int i, i_int, pool_size; uint32_t **p; struct cdf_mem_dma_page_t *page_info; uint32_t num_link = 0; uint16_t num_page, num_desc_per_page; htt_tx_desc_get_size(pdev); /* * Make sure tx_descs.size is a multiple of 4-bytes. * It should be, but round up just to be sure. */ pdev->tx_descs.size = (pdev->tx_descs.size + 3) & (~0x3); pdev->tx_descs.pool_elems = desc_pool_elems; pdev->tx_descs.alloc_cnt = 0; pool_size = pdev->tx_descs.pool_elems * pdev->tx_descs.size; cdf_mem_multi_pages_alloc(pdev->osdev, &pdev->tx_descs.desc_pages, pdev->tx_descs.size, pdev->tx_descs.pool_elems, cdf_get_dma_mem_context((&pdev->tx_descs), memctx), false); if ((0 == pdev->tx_descs.desc_pages.num_pages) || (NULL == pdev->tx_descs.desc_pages.dma_pages)) { TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "HTT desc alloc fail"); goto out_fail; } num_page = pdev->tx_descs.desc_pages.num_pages; num_desc_per_page = pdev->tx_descs.desc_pages.num_element_per_page; /* link tx descriptors into a freelist */ page_info = pdev->tx_descs.desc_pages.dma_pages; pdev->tx_descs.freelist = (uint32_t *)page_info->page_v_addr_start; p = (uint32_t **) pdev->tx_descs.freelist; for (i = 0; i < num_page; i++) { for (i_int = 0; i_int < num_desc_per_page; i_int++) { if (i_int == (num_desc_per_page - 1)) { /* * Last element on this page, * should pint next page */ if (!page_info->page_v_addr_start) { TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "over flow num link %d\n", num_link); goto free_htt_desc; } page_info++; *p = (uint32_t *)page_info->page_v_addr_start; } else { *p = (uint32_t *) (((char *) p) + pdev->tx_descs.size); } num_link++; p = (uint32_t **) *p; /* Last link established exit */ if (num_link == (pdev->tx_descs.pool_elems - 1)) break; } } *p = NULL; if (htt_tx_frag_desc_attach(pdev, desc_pool_elems)) { TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "HTT Frag descriptor alloc fail"); goto free_htt_desc; } /* success */ return 0; free_htt_desc: cdf_mem_multi_pages_free(pdev->osdev, &pdev->tx_descs.desc_pages, cdf_get_dma_mem_context((&pdev->tx_descs), memctx), false); out_fail: return -ENOBUFS; }