Пример #1
0
int htt_tx_ipa_uc_detach(struct htt_pdev_t *pdev)
{
	uint16_t idx;

	if (pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr) {
		cdf_os_mem_free_consistent(
			pdev->osdev,
			4,
			pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr,
			pdev->ipa_uc_tx_rsc.tx_ce_idx.paddr,
			cdf_get_dma_mem_context(
				(&pdev->ipa_uc_tx_rsc.tx_ce_idx),
				memctx));
	}

	if (pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr) {
		cdf_os_mem_free_consistent(
			pdev->osdev,
			ol_cfg_ipa_uc_tx_max_buf_cnt(pdev->ctrl_pdev) * 4,
			pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr,
			pdev->ipa_uc_tx_rsc.tx_comp_base.paddr,
			cdf_get_dma_mem_context((&pdev->ipa_uc_tx_rsc.
						 tx_comp_base),
						memctx));
	}

	/* Free each single buffer */
	for (idx = 0; idx < pdev->ipa_uc_tx_rsc.alloc_tx_buf_cnt; idx++) {
		if (pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[idx]) {
			cdf_nbuf_unmap(pdev->osdev,
				       pdev->ipa_uc_tx_rsc.
				       tx_buf_pool_vaddr_strg[idx],
				       CDF_DMA_FROM_DEVICE);
			cdf_nbuf_free(pdev->ipa_uc_tx_rsc.
				      tx_buf_pool_vaddr_strg[idx]);
		}
	}

	/* Free storage */
	cdf_mem_free(pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg);

	return 0;
}
Пример #2
0
int htt_tx_ipa_uc_attach(struct htt_pdev_t *pdev,
    unsigned int uc_tx_buf_sz,
    unsigned int uc_tx_buf_cnt,
    unsigned int uc_tx_partition_base)
{
   unsigned int  tx_buffer_count;
   unsigned int  tx_buffer_count_pwr2;
   adf_nbuf_t    buffer_vaddr;
   u_int32_t     buffer_paddr;
   u_int32_t    *header_ptr;
   u_int32_t    *ring_vaddr;
   int           return_code = 0;
   uint16_t     idx;

   /* Allocate CE Write Index WORD */
   pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr =
       adf_os_mem_alloc_consistent(pdev->osdev,
                4,
                &pdev->ipa_uc_tx_rsc.tx_ce_idx.paddr,
                adf_os_get_dma_mem_context(
                   (&pdev->ipa_uc_tx_rsc.tx_ce_idx), memctx));
   if (!pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr) {
      adf_os_print("%s: CE Write Index WORD alloc fail", __func__);
      return -1;
   }

   /* Allocate TX COMP Ring */
   pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr =
       adf_os_mem_alloc_consistent(pdev->osdev,
                uc_tx_buf_cnt * 4,
                &pdev->ipa_uc_tx_rsc.tx_comp_base.paddr,
                adf_os_get_dma_mem_context(
                   (&pdev->ipa_uc_tx_rsc.tx_comp_base), memctx));
   if (!pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr) {
      adf_os_print("%s: TX COMP ring alloc fail", __func__);
      return_code = -2;
      goto free_tx_ce_idx;
   }

   adf_os_mem_zero(pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr, uc_tx_buf_cnt * 4);

   /* Allocate TX BUF vAddress Storage */
   pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg =
         (adf_nbuf_t *)adf_os_mem_alloc(pdev->osdev,
                          uc_tx_buf_cnt * sizeof(adf_nbuf_t));
   if (!pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg) {
      adf_os_print("%s: TX BUF POOL vaddr storage alloc fail",
                   __func__);
      return_code = -3;
      goto free_tx_comp_base;
   }
   adf_os_mem_zero(pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg,
                   uc_tx_buf_cnt * sizeof(adf_nbuf_t));

   ring_vaddr = pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr;
   /* Allocate TX buffers as many as possible */
   for (tx_buffer_count = 0;
        tx_buffer_count < (uc_tx_buf_cnt - 1);
        tx_buffer_count++) {
      buffer_vaddr = adf_nbuf_alloc(pdev->osdev,
                uc_tx_buf_sz, 0, 4, FALSE);
      if (!buffer_vaddr)
      {
         adf_os_print("%s: TX BUF alloc fail, allocated buffer count %d",
                      __func__, tx_buffer_count);
         break;
      }

      /* Init buffer */
      adf_os_mem_zero(adf_nbuf_data(buffer_vaddr), uc_tx_buf_sz);
      header_ptr = (u_int32_t *)adf_nbuf_data(buffer_vaddr);

      *header_ptr = HTT_IPA_UC_OFFLOAD_TX_HEADER_DEFAULT;
      header_ptr++;
      *header_ptr |= ((u_int16_t)uc_tx_partition_base + tx_buffer_count) << 16;

      adf_nbuf_map(pdev->osdev, buffer_vaddr, ADF_OS_DMA_BIDIRECTIONAL);
      buffer_paddr = adf_nbuf_get_frag_paddr_lo(buffer_vaddr, 0);
      header_ptr++;
      *header_ptr = (u_int32_t)(buffer_paddr + 16);

      header_ptr++;
      *header_ptr = 0xFFFFFFFF;

      /* FRAG Header */
      header_ptr++;
      *header_ptr = buffer_paddr + 32;

      *ring_vaddr = buffer_paddr;
      pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[tx_buffer_count] =
            buffer_vaddr;
      /* Memory barrier to ensure actual value updated */

      ring_vaddr++;
   }

   /*
    * Tx complete ring buffer count should be power of 2.
    * So, allocated Tx buffer count should be one less than ring buffer size.
    */
   tx_buffer_count_pwr2 = vos_rounddown_pow_of_two(tx_buffer_count + 1) - 1;
   if (tx_buffer_count > tx_buffer_count_pwr2) {
       adf_os_print("%s: Allocated Tx buffer count %d is rounded down to %d",
                   __func__, tx_buffer_count, tx_buffer_count_pwr2);

       /* Free over allocated buffers below power of 2 */
       for(idx = tx_buffer_count_pwr2; idx < tx_buffer_count; idx++) {
           if (pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[idx]) {
               adf_nbuf_unmap(pdev->osdev,
                   pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[idx],
                   ADF_OS_DMA_FROM_DEVICE);
               adf_nbuf_free(pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[idx]);
           }
       }
   }

   if (tx_buffer_count_pwr2 < 0) {
       adf_os_print("%s: Failed to round down Tx buffer count %d",
                   __func__, tx_buffer_count_pwr2);
       goto free_tx_comp_base;
   }

   pdev->ipa_uc_tx_rsc.alloc_tx_buf_cnt = tx_buffer_count_pwr2;

   return 0;

free_tx_comp_base:
   adf_os_mem_free_consistent(pdev->osdev,
                   ol_cfg_ipa_uc_tx_max_buf_cnt(pdev->ctrl_pdev) * 4,
                   pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr,
                   pdev->ipa_uc_tx_rsc.tx_comp_base.paddr,
                   adf_os_get_dma_mem_context(
                      (&pdev->ipa_uc_tx_rsc.tx_comp_base), memctx));
free_tx_ce_idx:
   adf_os_mem_free_consistent(pdev->osdev,
                   4,
                   pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr,
                   pdev->ipa_uc_tx_rsc.tx_ce_idx.paddr,
                   adf_os_get_dma_mem_context(
                      (&pdev->ipa_uc_tx_rsc.tx_ce_idx), memctx));
   return return_code;
}
Пример #3
0
int htt_tx_ipa_uc_attach(struct htt_pdev_t *pdev,
    unsigned int uc_tx_buf_sz,
    unsigned int uc_tx_buf_cnt,
    unsigned int uc_tx_partition_base)
{
   unsigned int  tx_buffer_count;
   adf_nbuf_t    buffer_vaddr;
   u_int32_t     buffer_paddr;
   u_int32_t    *header_ptr;
   u_int32_t    *ring_vaddr;
   int           return_code = 0;

   /* Allocate CE Write Index WORD */
   pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr =
       adf_os_mem_alloc_consistent(pdev->osdev,
                4,
                &pdev->ipa_uc_tx_rsc.tx_ce_idx.paddr,
                adf_os_get_dma_mem_context(
                   (&pdev->ipa_uc_tx_rsc.tx_ce_idx), memctx));
   if (!pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr) {
      adf_os_print("%s: CE Write Index WORD alloc fail", __func__);
      return -1;
   }

   /* Allocate TX COMP Ring */
   pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr =
       adf_os_mem_alloc_consistent(pdev->osdev,
                uc_tx_buf_cnt * 4,
                &pdev->ipa_uc_tx_rsc.tx_comp_base.paddr,
                adf_os_get_dma_mem_context(
                   (&pdev->ipa_uc_tx_rsc.tx_comp_base), memctx));
   if (!pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr) {
      adf_os_print("%s: TX COMP ring alloc fail", __func__);
      return_code = -2;
      goto free_tx_ce_idx;
   }

   adf_os_mem_zero(pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr, uc_tx_buf_cnt * 4);

   /* Allocate TX BUF vAddress Storage */
   pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg =
         (adf_nbuf_t *)adf_os_mem_alloc(pdev->osdev,
                          uc_tx_buf_cnt * sizeof(adf_nbuf_t));
   if (!pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg) {
      adf_os_print("%s: TX BUF POOL vaddr storage alloc fail",
                   __func__);
      return_code = -3;
      goto free_tx_comp_base;
   }
   adf_os_mem_zero(pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg,
                   uc_tx_buf_cnt * sizeof(adf_nbuf_t));

   ring_vaddr = pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr;
   /* Allocate TX buffers as many as possible */
   for (tx_buffer_count = 0;
        tx_buffer_count < (uc_tx_buf_cnt - 1);
        tx_buffer_count++) {
      buffer_vaddr = adf_nbuf_alloc(pdev->osdev,
                uc_tx_buf_sz, 0, 4, FALSE);
      if (!buffer_vaddr)
      {
         adf_os_print("%s: TX BUF alloc fail, allocated buffer count %d",
                      __func__, tx_buffer_count);
         return 0;
      }

      /* Init buffer */
      adf_os_mem_zero(adf_nbuf_data(buffer_vaddr), uc_tx_buf_sz);
      header_ptr = (u_int32_t *)adf_nbuf_data(buffer_vaddr);

      *header_ptr = HTT_IPA_UC_OFFLOAD_TX_HEADER_DEFAULT;
      header_ptr++;
      *header_ptr |= ((u_int16_t)uc_tx_partition_base + tx_buffer_count) << 16;

      adf_nbuf_map(pdev->osdev, buffer_vaddr, ADF_OS_DMA_BIDIRECTIONAL);
      buffer_paddr = adf_nbuf_get_frag_paddr_lo(buffer_vaddr, 0);
      header_ptr++;
      *header_ptr = (u_int32_t)(buffer_paddr + 16);

      header_ptr++;
      *header_ptr = 0xFFFFFFFF;

      /* FRAG Header */
      header_ptr++;
      *header_ptr = buffer_paddr + 32;

      *ring_vaddr = buffer_paddr;
      pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[tx_buffer_count] =
            buffer_vaddr;
      /* Memory barrier to ensure actual value updated */

      ring_vaddr++;
   }

   pdev->ipa_uc_tx_rsc.alloc_tx_buf_cnt = tx_buffer_count;

   return 0;

free_tx_comp_base:
   adf_os_mem_free_consistent(pdev->osdev,
                   ol_cfg_ipa_uc_tx_max_buf_cnt(pdev->ctrl_pdev) * 4,
                   pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr,
                   pdev->ipa_uc_tx_rsc.tx_comp_base.paddr,
                   adf_os_get_dma_mem_context(
                      (&pdev->ipa_uc_tx_rsc.tx_comp_base), memctx));
free_tx_ce_idx:
   adf_os_mem_free_consistent(pdev->osdev,
                   4,
                   pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr,
                   pdev->ipa_uc_tx_rsc.tx_ce_idx.paddr,
                   adf_os_get_dma_mem_context(
                      (&pdev->ipa_uc_tx_rsc.tx_ce_idx), memctx));
   return return_code;
}
Пример #4
0
/**
 * htt_tx_ipa_uc_attach() - attach htt ipa uc tx resource
 * @pdev: htt context
 * @uc_tx_buf_sz: single tx buffer size
 * @uc_tx_buf_cnt: total tx buffer count
 * @uc_tx_partition_base: tx buffer partition start
 *
 * Return: 0 success
 *         ENOBUFS No memory fail
 */
int htt_tx_ipa_uc_attach(struct htt_pdev_t *pdev,
			 unsigned int uc_tx_buf_sz,
			 unsigned int uc_tx_buf_cnt,
			 unsigned int uc_tx_partition_base)
{
	unsigned int tx_buffer_count;
	cdf_nbuf_t buffer_vaddr;
	cdf_dma_addr_t buffer_paddr;
	uint32_t *header_ptr;
	uint32_t *ring_vaddr;
	int return_code = 0;
	unsigned int tx_comp_ring_size;

	/* Allocate CE Write Index WORD */
	pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr =
		cdf_os_mem_alloc_consistent(
			pdev->osdev,
			4,
			&pdev->ipa_uc_tx_rsc.tx_ce_idx.paddr,
			cdf_get_dma_mem_context(
				(&pdev->ipa_uc_tx_rsc.tx_ce_idx),
				memctx));
	if (!pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr) {
		cdf_print("%s: CE Write Index WORD alloc fail", __func__);
		return -ENOBUFS;
	}

	/* Allocate TX COMP Ring */
	tx_comp_ring_size = uc_tx_buf_cnt * sizeof(cdf_nbuf_t);
	pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr =
		cdf_os_mem_alloc_consistent(
			pdev->osdev,
			tx_comp_ring_size,
			&pdev->ipa_uc_tx_rsc.tx_comp_base.paddr,
			cdf_get_dma_mem_context((&pdev->ipa_uc_tx_rsc.
						 tx_comp_base),
						memctx));
	if (!pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr) {
		cdf_print("%s: TX COMP ring alloc fail", __func__);
		return_code = -ENOBUFS;
		goto free_tx_ce_idx;
	}

	cdf_mem_zero(pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr, tx_comp_ring_size);

	/* Allocate TX BUF vAddress Storage */
	pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg =
		(cdf_nbuf_t *) cdf_mem_malloc(uc_tx_buf_cnt *
					      sizeof(cdf_nbuf_t));
	if (!pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg) {
		cdf_print("%s: TX BUF POOL vaddr storage alloc fail", __func__);
		return_code = -ENOBUFS;
		goto free_tx_comp_base;
	}
	cdf_mem_zero(pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg,
		     uc_tx_buf_cnt * sizeof(cdf_nbuf_t));

	ring_vaddr = pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr;
	/* Allocate TX buffers as many as possible */
	for (tx_buffer_count = 0;
	     tx_buffer_count < (uc_tx_buf_cnt - 1); tx_buffer_count++) {
		buffer_vaddr = cdf_nbuf_alloc(pdev->osdev,
					      uc_tx_buf_sz, 0, 4, false);
		if (!buffer_vaddr) {
			cdf_print("%s: TX BUF alloc fail, loop index: %d",
				  __func__, tx_buffer_count);
			return 0;
		}

		/* Init buffer */
		cdf_mem_zero(cdf_nbuf_data(buffer_vaddr), uc_tx_buf_sz);
		header_ptr = (uint32_t *) cdf_nbuf_data(buffer_vaddr);

		/* HTT control header */
		*header_ptr = HTT_IPA_UC_OFFLOAD_TX_HEADER_DEFAULT;
		header_ptr++;

		/* PKT ID */
		*header_ptr |= ((uint16_t) uc_tx_partition_base +
				tx_buffer_count) << 16;

		cdf_nbuf_map(pdev->osdev, buffer_vaddr, CDF_DMA_BIDIRECTIONAL);
		buffer_paddr = cdf_nbuf_get_frag_paddr_lo(buffer_vaddr, 0);
		header_ptr++;

		/* Frag Desc Pointer */
		/* 64bits descriptor, Low 32bits */
		*header_ptr = (uint32_t) (buffer_paddr + 20);
		header_ptr++;

		/* 64bits descriptor, high 32bits */
		*header_ptr = 0;
		header_ptr++;

		/* chanreq, peerid */
		*header_ptr = 0xFFFFFFFF;
		header_ptr++;

		/* FRAG Header */
		/* 6 words TSO header */
		header_ptr += 6;
		*header_ptr = buffer_paddr + 64;

		*ring_vaddr = buffer_paddr;
		pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[tx_buffer_count] =
			buffer_vaddr;
		/* Memory barrier to ensure actual value updated */

		ring_vaddr += 2;
	}

	pdev->ipa_uc_tx_rsc.alloc_tx_buf_cnt = tx_buffer_count;

	return 0;

free_tx_comp_base:
	cdf_os_mem_free_consistent(pdev->osdev,
				   ol_cfg_ipa_uc_tx_max_buf_cnt(pdev->
								ctrl_pdev) * 4,
				   pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr,
				   pdev->ipa_uc_tx_rsc.tx_comp_base.paddr,
				   cdf_get_dma_mem_context((&pdev->
							    ipa_uc_tx_rsc.
							    tx_comp_base),
							   memctx));
free_tx_ce_idx:
	cdf_os_mem_free_consistent(pdev->osdev,
				   4,
				   pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr,
				   pdev->ipa_uc_tx_rsc.tx_ce_idx.paddr,
				   cdf_get_dma_mem_context((&pdev->
							    ipa_uc_tx_rsc.
							    tx_ce_idx),
							   memctx));
	return return_code;
}
Пример #5
0
int htt_h2t_ipa_uc_rsc_cfg_msg(struct htt_pdev_t *pdev)
{
    struct htt_htc_pkt *pkt;
    adf_nbuf_t msg;
    u_int32_t *msg_word;

    pkt = htt_htc_pkt_alloc(pdev);
    if (!pkt) {
        return A_NO_MEMORY;
    }

    /* show that this is not a tx frame download (not required, but helpful) */
    pkt->msdu_id = HTT_TX_COMPL_INV_MSDU_ID;
    pkt->pdev_ctxt = NULL; /* not used during send-done callback */

    msg = adf_nbuf_alloc(
        pdev->osdev,
        HTT_MSG_BUF_SIZE(HTT_WDI_IPA_CFG_SZ),
        /* reserve room for HTC header */
        HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, FALSE);
    if (!msg) {
        htt_htc_pkt_free(pdev, pkt);
        return A_NO_MEMORY;
    }
    /* set the length of the message */
    adf_nbuf_put_tail(msg, HTT_WDI_IPA_CFG_SZ);

    /* fill in the message contents */
    msg_word = (u_int32_t *) adf_nbuf_data(msg);

    /* rewind beyond alignment pad to get to the HTC header reserved area */
    adf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);

    *msg_word = 0;
    HTT_WDI_IPA_CFG_TX_PKT_POOL_SIZE_SET(*msg_word,
         pdev->ipa_uc_tx_rsc.alloc_tx_buf_cnt);
    HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_WDI_IPA_CFG);

    msg_word++;
    *msg_word = 0;
    HTT_WDI_IPA_CFG_TX_COMP_RING_BASE_ADDR_SET(*msg_word,
        (unsigned int)pdev->ipa_uc_tx_rsc.tx_comp_base.paddr);

    msg_word++;
    *msg_word = 0;
    HTT_WDI_IPA_CFG_TX_COMP_RING_SIZE_SET(*msg_word,
        (unsigned int)ol_cfg_ipa_uc_tx_max_buf_cnt(pdev->ctrl_pdev));

    msg_word++;
    *msg_word = 0;
    HTT_WDI_IPA_CFG_TX_COMP_WR_IDX_ADDR_SET(*msg_word,
        (unsigned int)pdev->ipa_uc_tx_rsc.tx_comp_idx_paddr);

    msg_word++;
    *msg_word = 0;
    HTT_WDI_IPA_CFG_TX_CE_WR_IDX_ADDR_SET(*msg_word,
        (unsigned int)pdev->ipa_uc_tx_rsc.tx_ce_idx.paddr);

    msg_word++;
    *msg_word = 0;
    HTT_WDI_IPA_CFG_RX_IND_RING_BASE_ADDR_SET(*msg_word,
        (unsigned int)pdev->ipa_uc_rx_rsc.rx_ind_ring_base.paddr);

    msg_word++;
    *msg_word = 0;
    HTT_WDI_IPA_CFG_RX_IND_RING_SIZE_SET(*msg_word,
        (unsigned int)ol_cfg_ipa_uc_rx_ind_ring_size(pdev->ctrl_pdev));

    msg_word++;
    *msg_word = 0;
    HTT_WDI_IPA_CFG_RX_IND_RD_IDX_ADDR_SET(*msg_word,
        (unsigned int)pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx.paddr);

    msg_word++;
    *msg_word = 0;
    HTT_WDI_IPA_CFG_RX_IND_WR_IDX_ADDR_SET(*msg_word,
        (unsigned int)pdev->ipa_uc_rx_rsc.rx_rdy_idx_paddr);

    SET_HTC_PACKET_INFO_TX(
        &pkt->htc_pkt,
        htt_h2t_send_complete_free_netbuf,
        adf_nbuf_data(msg),
        adf_nbuf_len(msg),
        pdev->htc_endpoint,
        HTC_TX_PACKET_TAG_RUNTIME_PUT);

    SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);

#ifdef ATH_11AC_TXCOMPACT
    if (HTCSendPkt(pdev->htc_pdev, &pkt->htc_pkt) == A_OK)
        htt_htc_misc_pkt_list_add(pdev, pkt);
#else
    HTCSendPkt(pdev->htc_pdev, &pkt->htc_pkt);
#endif

    return A_OK;
}