Exemplo n.º 1
0
/**
 * @brief Free the RX Ring ( S/W & H/W), dequeue all the SKB's
 *        and free them starting from the head
 * NOTE: The NULL terminator doesn't have a SKB
 * 
 * @param osdev
 * @param dma_q
 */
void    
pci_dma_deinit_rx(adf_os_device_t osdev, pci_dma_softc_t *dma_q) 
{
    a_uint32_t      i, num_desc;
    zdma_swdesc_t  *swdesc;
    adf_nbuf_t      buf;

    num_desc = dma_q->num_desc;
    swdesc = dma_q->sw_ring;

    for (i = 0; i < num_desc; i++, swdesc++) {
        
        pci_zdma_mark_notrdy(swdesc);

        adf_nbuf_unmap(osdev, swdesc->nbuf_map, ADF_OS_DMA_TO_DEVICE);
        
        buf = pci_dma_unlink_buf(osdev, swdesc);
        
        adf_os_assert(buf);
        
        adf_nbuf_free(buf);

        adf_nbuf_dmamap_destroy(osdev, swdesc->nbuf_map);
    }

    pci_dma_free_swdesc(osdev, dma_q, num_desc);
}
static void HTT_RX_FRAG_SET_LAST_MSDU(
    struct htt_pdev_t *pdev, adf_nbuf_t msg)
{
    u_int32_t *msg_word;
    unsigned num_msdu_bytes;
    adf_nbuf_t msdu;
    struct htt_host_rx_desc_base *rx_desc;
    int start_idx;
    u_int8_t *p_fw_msdu_rx_desc = 0;

    msg_word = (u_int32_t *) adf_nbuf_data(msg);
    num_msdu_bytes = HTT_RX_FRAG_IND_FW_RX_DESC_BYTES_GET(*(msg_word +
               HTT_RX_FRAG_IND_HDR_PREFIX_SIZE32));
    /*
     * 1 word for the message header,
     * 1 word to specify the number of MSDU bytes,
     * 1 word for every 4 MSDU bytes (round up),
     * 1 word for the MPDU range header
     */
    pdev->rx_mpdu_range_offset_words = 3 + ((num_msdu_bytes + 3) >> 2);
    pdev->rx_ind_msdu_byte_idx = 0;

    p_fw_msdu_rx_desc = ((u_int8_t *)(msg_word) +
               HTT_ENDIAN_BYTE_IDX_SWAP(HTT_RX_FRAG_IND_FW_DESC_BYTE_OFFSET));

    /*
     * Fix for EV126710, in which BSOD occurs due to last_msdu bit
     * not set while the next pointer is deliberately set to NULL
     * before calling ol_rx_pn_check_base()
     *
     * For fragment frames, the HW may not have set the last_msdu bit
     * in the rx descriptor, but the SW expects this flag to be set,
     * since each fragment is in a separate MPDU. Thus, set the flag here,
     * just in case the HW didn't.
     */
    start_idx = pdev->rx_ring.sw_rd_idx.msdu_payld;
    msdu = pdev->rx_ring.buf.netbufs_ring[start_idx];
    adf_nbuf_set_pktlen(msdu, HTT_RX_BUF_SIZE);
    adf_nbuf_unmap(pdev->osdev, msdu, ADF_OS_DMA_FROM_DEVICE);
    rx_desc = htt_rx_desc(msdu);
    *((u_int8_t *) &rx_desc->fw_desc.u.val) = *p_fw_msdu_rx_desc;
    rx_desc->msdu_end.last_msdu = 1;
    adf_nbuf_map(pdev->osdev, msdu, ADF_OS_DMA_FROM_DEVICE);
}
void ol_tx_desc_frame_list_free(
    struct ol_txrx_pdev_t *pdev,
    ol_tx_desc_list *tx_descs,
    int had_error)
{
    struct ol_tx_desc_t *tx_desc, *tmp;
    adf_nbuf_t msdus = NULL;

    TAILQ_FOREACH_SAFE(tx_desc, tx_descs, tx_desc_list_elem, tmp) {
        adf_nbuf_t msdu = tx_desc->netbuf;

        adf_os_atomic_init(&tx_desc->ref_cnt); /* clear the ref cnt */
#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
        OL_TX_RESTORE_HDR(tx_desc, msdu); /* restore original hdr offset */
#endif
        adf_nbuf_unmap(pdev->osdev, msdu, ADF_OS_DMA_TO_DEVICE);
        /* free the tx desc */
        ol_tx_desc_free(pdev, tx_desc);
        /* link the netbuf into a list to free as a batch */
        adf_nbuf_set_next(msdu, msdus);
        msdus = msdu;
    }
Exemplo n.º 4
0
int htt_tx_ipa_uc_detach(struct htt_pdev_t *pdev)
{
   u_int16_t idx;

   if (pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr) {
      adf_os_mem_free_consistent(pdev->osdev,
                   4,
                   pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr,
                   pdev->ipa_uc_tx_rsc.tx_ce_idx.paddr,
                   adf_os_get_dma_mem_context(
                      (&pdev->ipa_uc_tx_rsc.tx_ce_idx), memctx));
   }

   if (pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr) {
     adf_os_mem_free_consistent(pdev->osdev,
                   ol_cfg_ipa_uc_tx_max_buf_cnt(pdev->ctrl_pdev) * 4,
                   pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr,
                   pdev->ipa_uc_tx_rsc.tx_comp_base.paddr,
                   adf_os_get_dma_mem_context(
                      (&pdev->ipa_uc_tx_rsc.tx_comp_base), memctx));
   }

   /* Free each single buffer */
   for(idx = 0; idx < pdev->ipa_uc_tx_rsc.alloc_tx_buf_cnt; idx++) {
      if (pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[idx]) {
         adf_nbuf_unmap(pdev->osdev,
            pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[idx],
            ADF_OS_DMA_FROM_DEVICE);
         adf_nbuf_free(pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[idx]);
      }
   }

   /* Free storage */
   adf_os_mem_free(pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg);

   return 0;
}
Exemplo n.º 5
0
int htt_tx_ipa_uc_attach(struct htt_pdev_t *pdev,
    unsigned int uc_tx_buf_sz,
    unsigned int uc_tx_buf_cnt,
    unsigned int uc_tx_partition_base)
{
   unsigned int  tx_buffer_count;
   unsigned int  tx_buffer_count_pwr2;
   adf_nbuf_t    buffer_vaddr;
   u_int32_t     buffer_paddr;
   u_int32_t    *header_ptr;
   u_int32_t    *ring_vaddr;
   int           return_code = 0;
   uint16_t     idx;

   /* Allocate CE Write Index WORD */
   pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr =
       adf_os_mem_alloc_consistent(pdev->osdev,
                4,
                &pdev->ipa_uc_tx_rsc.tx_ce_idx.paddr,
                adf_os_get_dma_mem_context(
                   (&pdev->ipa_uc_tx_rsc.tx_ce_idx), memctx));
   if (!pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr) {
      adf_os_print("%s: CE Write Index WORD alloc fail", __func__);
      return -1;
   }

   /* Allocate TX COMP Ring */
   pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr =
       adf_os_mem_alloc_consistent(pdev->osdev,
                uc_tx_buf_cnt * 4,
                &pdev->ipa_uc_tx_rsc.tx_comp_base.paddr,
                adf_os_get_dma_mem_context(
                   (&pdev->ipa_uc_tx_rsc.tx_comp_base), memctx));
   if (!pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr) {
      adf_os_print("%s: TX COMP ring alloc fail", __func__);
      return_code = -2;
      goto free_tx_ce_idx;
   }

   adf_os_mem_zero(pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr, uc_tx_buf_cnt * 4);

   /* Allocate TX BUF vAddress Storage */
   pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg =
         (adf_nbuf_t *)adf_os_mem_alloc(pdev->osdev,
                          uc_tx_buf_cnt * sizeof(adf_nbuf_t));
   if (!pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg) {
      adf_os_print("%s: TX BUF POOL vaddr storage alloc fail",
                   __func__);
      return_code = -3;
      goto free_tx_comp_base;
   }
   adf_os_mem_zero(pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg,
                   uc_tx_buf_cnt * sizeof(adf_nbuf_t));

   ring_vaddr = pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr;
   /* Allocate TX buffers as many as possible */
   for (tx_buffer_count = 0;
        tx_buffer_count < (uc_tx_buf_cnt - 1);
        tx_buffer_count++) {
      buffer_vaddr = adf_nbuf_alloc(pdev->osdev,
                uc_tx_buf_sz, 0, 4, FALSE);
      if (!buffer_vaddr)
      {
         adf_os_print("%s: TX BUF alloc fail, allocated buffer count %d",
                      __func__, tx_buffer_count);
         break;
      }

      /* Init buffer */
      adf_os_mem_zero(adf_nbuf_data(buffer_vaddr), uc_tx_buf_sz);
      header_ptr = (u_int32_t *)adf_nbuf_data(buffer_vaddr);

      *header_ptr = HTT_IPA_UC_OFFLOAD_TX_HEADER_DEFAULT;
      header_ptr++;
      *header_ptr |= ((u_int16_t)uc_tx_partition_base + tx_buffer_count) << 16;

      adf_nbuf_map(pdev->osdev, buffer_vaddr, ADF_OS_DMA_BIDIRECTIONAL);
      buffer_paddr = adf_nbuf_get_frag_paddr_lo(buffer_vaddr, 0);
      header_ptr++;
      *header_ptr = (u_int32_t)(buffer_paddr + 16);

      header_ptr++;
      *header_ptr = 0xFFFFFFFF;

      /* FRAG Header */
      header_ptr++;
      *header_ptr = buffer_paddr + 32;

      *ring_vaddr = buffer_paddr;
      pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[tx_buffer_count] =
            buffer_vaddr;
      /* Memory barrier to ensure actual value updated */

      ring_vaddr++;
   }

   /*
    * Tx complete ring buffer count should be power of 2.
    * So, allocated Tx buffer count should be one less than ring buffer size.
    */
   tx_buffer_count_pwr2 = vos_rounddown_pow_of_two(tx_buffer_count + 1) - 1;
   if (tx_buffer_count > tx_buffer_count_pwr2) {
       adf_os_print("%s: Allocated Tx buffer count %d is rounded down to %d",
                   __func__, tx_buffer_count, tx_buffer_count_pwr2);

       /* Free over allocated buffers below power of 2 */
       for(idx = tx_buffer_count_pwr2; idx < tx_buffer_count; idx++) {
           if (pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[idx]) {
               adf_nbuf_unmap(pdev->osdev,
                   pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[idx],
                   ADF_OS_DMA_FROM_DEVICE);
               adf_nbuf_free(pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[idx]);
           }
       }
   }

   if (tx_buffer_count_pwr2 < 0) {
       adf_os_print("%s: Failed to round down Tx buffer count %d",
                   __func__, tx_buffer_count_pwr2);
       goto free_tx_comp_base;
   }

   pdev->ipa_uc_tx_rsc.alloc_tx_buf_cnt = tx_buffer_count_pwr2;

   return 0;

free_tx_comp_base:
   adf_os_mem_free_consistent(pdev->osdev,
                   ol_cfg_ipa_uc_tx_max_buf_cnt(pdev->ctrl_pdev) * 4,
                   pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr,
                   pdev->ipa_uc_tx_rsc.tx_comp_base.paddr,
                   adf_os_get_dma_mem_context(
                      (&pdev->ipa_uc_tx_rsc.tx_comp_base), memctx));
free_tx_ce_idx:
   adf_os_mem_free_consistent(pdev->osdev,
                   4,
                   pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr,
                   pdev->ipa_uc_tx_rsc.tx_ce_idx.paddr,
                   adf_os_get_dma_mem_context(
                      (&pdev->ipa_uc_tx_rsc.tx_ce_idx), memctx));
   return return_code;
}