int htt_tx_ipa_uc_attach(struct htt_pdev_t *pdev, unsigned int uc_tx_buf_sz, unsigned int uc_tx_buf_cnt, unsigned int uc_tx_partition_base) { unsigned int tx_buffer_count; adf_nbuf_t buffer_vaddr; u_int32_t buffer_paddr; u_int32_t *header_ptr; u_int32_t *ring_vaddr; int return_code = 0; /* Allocate CE Write Index WORD */ pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr = adf_os_mem_alloc_consistent(pdev->osdev, 4, &pdev->ipa_uc_tx_rsc.tx_ce_idx.paddr, adf_os_get_dma_mem_context( (&pdev->ipa_uc_tx_rsc.tx_ce_idx), memctx)); if (!pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr) { adf_os_print("%s: CE Write Index WORD alloc fail", __func__); return -1; } /* Allocate TX COMP Ring */ pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr = adf_os_mem_alloc_consistent(pdev->osdev, uc_tx_buf_cnt * 4, &pdev->ipa_uc_tx_rsc.tx_comp_base.paddr, adf_os_get_dma_mem_context( (&pdev->ipa_uc_tx_rsc.tx_comp_base), memctx)); if (!pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr) { adf_os_print("%s: TX COMP ring alloc fail", __func__); return_code = -2; goto free_tx_ce_idx; } adf_os_mem_zero(pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr, uc_tx_buf_cnt * 4); /* Allocate TX BUF vAddress Storage */ pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg = (adf_nbuf_t *)adf_os_mem_alloc(pdev->osdev, uc_tx_buf_cnt * sizeof(adf_nbuf_t)); if (!pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg) { adf_os_print("%s: TX BUF POOL vaddr storage alloc fail", __func__); return_code = -3; goto free_tx_comp_base; } adf_os_mem_zero(pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg, uc_tx_buf_cnt * sizeof(adf_nbuf_t)); ring_vaddr = pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr; /* Allocate TX buffers as many as possible */ for (tx_buffer_count = 0; tx_buffer_count < (uc_tx_buf_cnt - 1); tx_buffer_count++) { buffer_vaddr = adf_nbuf_alloc(pdev->osdev, uc_tx_buf_sz, 0, 4, FALSE); if (!buffer_vaddr) { adf_os_print("%s: TX BUF alloc fail, allocated buffer count %d", __func__, tx_buffer_count); return 0; } /* Init buffer */ adf_os_mem_zero(adf_nbuf_data(buffer_vaddr), uc_tx_buf_sz); header_ptr = (u_int32_t *)adf_nbuf_data(buffer_vaddr); *header_ptr = HTT_IPA_UC_OFFLOAD_TX_HEADER_DEFAULT; header_ptr++; *header_ptr |= ((u_int16_t)uc_tx_partition_base + tx_buffer_count) << 16; adf_nbuf_map(pdev->osdev, buffer_vaddr, ADF_OS_DMA_BIDIRECTIONAL); buffer_paddr = adf_nbuf_get_frag_paddr_lo(buffer_vaddr, 0); header_ptr++; *header_ptr = (u_int32_t)(buffer_paddr + 16); header_ptr++; *header_ptr = 0xFFFFFFFF; /* FRAG Header */ header_ptr++; *header_ptr = buffer_paddr + 32; *ring_vaddr = buffer_paddr; pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[tx_buffer_count] = buffer_vaddr; /* Memory barrier to ensure actual value updated */ ring_vaddr++; } pdev->ipa_uc_tx_rsc.alloc_tx_buf_cnt = tx_buffer_count; return 0; free_tx_comp_base: adf_os_mem_free_consistent(pdev->osdev, ol_cfg_ipa_uc_tx_max_buf_cnt(pdev->ctrl_pdev) * 4, pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr, pdev->ipa_uc_tx_rsc.tx_comp_base.paddr, adf_os_get_dma_mem_context( (&pdev->ipa_uc_tx_rsc.tx_comp_base), memctx)); free_tx_ce_idx: adf_os_mem_free_consistent(pdev->osdev, 4, pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr, pdev->ipa_uc_tx_rsc.tx_ce_idx.paddr, adf_os_get_dma_mem_context( (&pdev->ipa_uc_tx_rsc.tx_ce_idx), memctx)); return return_code; }
int htt_tx_ipa_uc_attach(struct htt_pdev_t *pdev, unsigned int uc_tx_buf_sz, unsigned int uc_tx_buf_cnt, unsigned int uc_tx_partition_base) { unsigned int tx_buffer_count; unsigned int tx_buffer_count_pwr2; adf_nbuf_t buffer_vaddr; u_int32_t buffer_paddr; u_int32_t *header_ptr; u_int32_t *ring_vaddr; int return_code = 0; uint16_t idx; /* Allocate CE Write Index WORD */ pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr = adf_os_mem_alloc_consistent(pdev->osdev, 4, &pdev->ipa_uc_tx_rsc.tx_ce_idx.paddr, adf_os_get_dma_mem_context( (&pdev->ipa_uc_tx_rsc.tx_ce_idx), memctx)); if (!pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr) { adf_os_print("%s: CE Write Index WORD alloc fail", __func__); return -1; } /* Allocate TX COMP Ring */ pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr = adf_os_mem_alloc_consistent(pdev->osdev, uc_tx_buf_cnt * 4, &pdev->ipa_uc_tx_rsc.tx_comp_base.paddr, adf_os_get_dma_mem_context( (&pdev->ipa_uc_tx_rsc.tx_comp_base), memctx)); if (!pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr) { adf_os_print("%s: TX COMP ring alloc fail", __func__); return_code = -2; goto free_tx_ce_idx; } adf_os_mem_zero(pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr, uc_tx_buf_cnt * 4); /* Allocate TX BUF vAddress Storage */ pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg = (adf_nbuf_t *)adf_os_mem_alloc(pdev->osdev, uc_tx_buf_cnt * sizeof(adf_nbuf_t)); if (!pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg) { adf_os_print("%s: TX BUF POOL vaddr storage alloc fail", __func__); return_code = -3; goto free_tx_comp_base; } adf_os_mem_zero(pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg, uc_tx_buf_cnt * sizeof(adf_nbuf_t)); ring_vaddr = pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr; /* Allocate TX buffers as many as possible */ for (tx_buffer_count = 0; tx_buffer_count < (uc_tx_buf_cnt - 1); tx_buffer_count++) { buffer_vaddr = adf_nbuf_alloc(pdev->osdev, uc_tx_buf_sz, 0, 4, FALSE); if (!buffer_vaddr) { adf_os_print("%s: TX BUF alloc fail, allocated buffer count %d", __func__, tx_buffer_count); break; } /* Init buffer */ adf_os_mem_zero(adf_nbuf_data(buffer_vaddr), uc_tx_buf_sz); header_ptr = (u_int32_t *)adf_nbuf_data(buffer_vaddr); *header_ptr = HTT_IPA_UC_OFFLOAD_TX_HEADER_DEFAULT; header_ptr++; *header_ptr |= ((u_int16_t)uc_tx_partition_base + tx_buffer_count) << 16; adf_nbuf_map(pdev->osdev, buffer_vaddr, ADF_OS_DMA_BIDIRECTIONAL); buffer_paddr = adf_nbuf_get_frag_paddr_lo(buffer_vaddr, 0); header_ptr++; *header_ptr = (u_int32_t)(buffer_paddr + 16); header_ptr++; *header_ptr = 0xFFFFFFFF; /* FRAG Header */ header_ptr++; *header_ptr = buffer_paddr + 32; *ring_vaddr = buffer_paddr; pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[tx_buffer_count] = buffer_vaddr; /* Memory barrier to ensure actual value updated */ ring_vaddr++; } /* * Tx complete ring buffer count should be power of 2. * So, allocated Tx buffer count should be one less than ring buffer size. */ tx_buffer_count_pwr2 = vos_rounddown_pow_of_two(tx_buffer_count + 1) - 1; if (tx_buffer_count > tx_buffer_count_pwr2) { adf_os_print("%s: Allocated Tx buffer count %d is rounded down to %d", __func__, tx_buffer_count, tx_buffer_count_pwr2); /* Free over allocated buffers below power of 2 */ for(idx = tx_buffer_count_pwr2; idx < tx_buffer_count; idx++) { if (pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[idx]) { adf_nbuf_unmap(pdev->osdev, pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[idx], ADF_OS_DMA_FROM_DEVICE); adf_nbuf_free(pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[idx]); } } } if (tx_buffer_count_pwr2 < 0) { adf_os_print("%s: Failed to round down Tx buffer count %d", __func__, tx_buffer_count_pwr2); goto free_tx_comp_base; } pdev->ipa_uc_tx_rsc.alloc_tx_buf_cnt = tx_buffer_count_pwr2; return 0; free_tx_comp_base: adf_os_mem_free_consistent(pdev->osdev, ol_cfg_ipa_uc_tx_max_buf_cnt(pdev->ctrl_pdev) * 4, pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr, pdev->ipa_uc_tx_rsc.tx_comp_base.paddr, adf_os_get_dma_mem_context( (&pdev->ipa_uc_tx_rsc.tx_comp_base), memctx)); free_tx_ce_idx: adf_os_mem_free_consistent(pdev->osdev, 4, pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr, pdev->ipa_uc_tx_rsc.tx_ce_idx.paddr, adf_os_get_dma_mem_context( (&pdev->ipa_uc_tx_rsc.tx_ce_idx), memctx)); return return_code; }
struct ol_tx_desc_t * ol_tx_desc_ll( struct ol_txrx_pdev_t *pdev, struct ol_txrx_vdev_t *vdev, adf_nbuf_t netbuf, struct ol_txrx_msdu_info_t *msdu_info) { struct ol_tx_desc_t *tx_desc; unsigned int i; u_int32_t num_frags; msdu_info->htt.info.vdev_id = vdev->vdev_id; msdu_info->htt.action.cksum_offload = adf_nbuf_get_tx_cksum(netbuf); switch (adf_nbuf_get_exemption_type(netbuf)) { case ADF_NBUF_EXEMPT_NO_EXEMPTION: case ADF_NBUF_EXEMPT_ON_KEY_MAPPING_KEY_UNAVAILABLE: /* We want to encrypt this frame */ msdu_info->htt.action.do_encrypt = 1; break; case ADF_NBUF_EXEMPT_ALWAYS: /* We don't want to encrypt this frame */ msdu_info->htt.action.do_encrypt = 0; break; default: adf_os_assert(0); break; } /* allocate the descriptor */ tx_desc = ol_tx_desc_alloc(pdev, vdev); if (!tx_desc) return NULL; /* initialize the SW tx descriptor */ tx_desc->netbuf = netbuf; /* fix this - get pkt_type from msdu_info */ tx_desc->pkt_type = ol_tx_frm_std; /* initialize the HW tx descriptor */ htt_tx_desc_init( pdev->htt_pdev, tx_desc->htt_tx_desc, tx_desc->htt_tx_desc_paddr, ol_tx_desc_id(pdev, tx_desc), netbuf, &msdu_info->htt, NULL, vdev->opmode == wlan_op_mode_ocb); /* * Initialize the fragmentation descriptor. * Skip the prefix fragment (HTT tx descriptor) that was added * during the call to htt_tx_desc_init above. */ num_frags = adf_nbuf_get_num_frags(netbuf); /* num_frags are expected to be 2 max */ num_frags = (num_frags > CVG_NBUF_MAX_EXTRA_FRAGS) ? CVG_NBUF_MAX_EXTRA_FRAGS : num_frags; htt_tx_desc_num_frags(pdev->htt_pdev, tx_desc->htt_tx_desc, num_frags-1); for (i = 1; i < num_frags; i++) { adf_os_size_t frag_len; u_int32_t frag_paddr; frag_len = adf_nbuf_get_frag_len(netbuf, i); frag_paddr = adf_nbuf_get_frag_paddr_lo(netbuf, i); htt_tx_desc_frag( pdev->htt_pdev, tx_desc->htt_tx_desc, i-1, frag_paddr, frag_len); } return tx_desc; }