void *
wmi_unified_attach(ol_scn_t scn_handle, wma_wow_tx_complete_cbk func)
{
    struct wmi_unified *wmi_handle;
    wmi_handle = (struct wmi_unified *)OS_MALLOC(NULL, sizeof(struct wmi_unified), GFP_ATOMIC);
    if (wmi_handle == NULL) {
        printk("allocation of wmi handle failed %zu \n", sizeof(struct wmi_unified));
        return NULL;
    }
    OS_MEMZERO(wmi_handle, sizeof(struct wmi_unified));
    wmi_handle->scn_handle = scn_handle;
    adf_os_atomic_init(&wmi_handle->pending_cmds);
    adf_os_atomic_init(&wmi_handle->is_target_suspended);
#ifdef FEATURE_RUNTIME_PM
    adf_os_atomic_init(&wmi_handle->runtime_pm_inprogress);
#endif
    adf_os_spinlock_init(&wmi_handle->eventq_lock);
    adf_nbuf_queue_init(&wmi_handle->event_queue);
#ifdef CONFIG_CNSS
    cnss_init_work(&wmi_handle->rx_event_work, wmi_rx_event_work);
#else
    INIT_WORK(&wmi_handle->rx_event_work, wmi_rx_event_work);
#endif
#ifdef WMI_INTERFACE_EVENT_LOGGING
    adf_os_spinlock_init(&wmi_handle->wmi_record_lock);
#endif
    wmi_handle->wma_wow_tx_complete_cbk = func;
    return wmi_handle;
}
void epping_register_tx_copier(HTC_ENDPOINT_ID eid, epping_context_t *pEpping_ctx)
{
   epping_poll_t *epping_poll = &pEpping_ctx->epping_poll[eid];
   epping_poll->eid = eid;
   epping_poll->arg = pEpping_ctx->epping_adapter;
   epping_poll->done = false;
   EPPING_LOG(VOS_TRACE_LEVEL_FATAL, "%s: eid = %d, arg = %p",
              __func__, eid, pEpping_ctx->epping_adapter);
   sema_init(&epping_poll->sem, 0);
   adf_os_atomic_init(&epping_poll->atm);
   epping_poll->inited = true;
   epping_poll->pid = kthread_create(epping_tx_thread_fn,
                                     epping_poll, EPPING_TX_THREAD);
   wake_up_process(epping_poll->pid);
}
Пример #3
0
static void ResetEndpointStates(HTC_TARGET *target)
{
    HTC_ENDPOINT        *pEndpoint;
    int                  i;

    for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
        pEndpoint = &target->EndPoint[i];
        pEndpoint->ServiceID = 0;
        pEndpoint->MaxMsgLength = 0;
        pEndpoint->MaxTxQueueDepth = 0;
        pEndpoint->Id = i;
        INIT_HTC_PACKET_QUEUE(&pEndpoint->TxQueue);
        INIT_HTC_PACKET_QUEUE(&pEndpoint->TxLookupQueue);
        INIT_HTC_PACKET_QUEUE(&pEndpoint->RxBufferHoldQueue);
        pEndpoint->target = target;
        //pEndpoint->TxCreditFlowEnabled = (A_BOOL)htc_credit_flow;
        pEndpoint->TxCreditFlowEnabled = (A_BOOL)1;
        adf_os_atomic_init(&pEndpoint->TxProcessCount);
    }
}
void ol_tx_desc_frame_list_free(
    struct ol_txrx_pdev_t *pdev,
    ol_tx_desc_list *tx_descs,
    int had_error)
{
    struct ol_tx_desc_t *tx_desc, *tmp;
    adf_nbuf_t msdus = NULL;

    TAILQ_FOREACH_SAFE(tx_desc, tx_descs, tx_desc_list_elem, tmp) {
        adf_nbuf_t msdu = tx_desc->netbuf;

        adf_os_atomic_init(&tx_desc->ref_cnt); /* clear the ref cnt */
#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
        OL_TX_RESTORE_HDR(tx_desc, msdu); /* restore original hdr offset */
#endif
        adf_nbuf_unmap(pdev->osdev, msdu, ADF_OS_DMA_TO_DEVICE);
        /* free the tx desc */
        ol_tx_desc_free(pdev, tx_desc);
        /* link the netbuf into a list to free as a batch */
        adf_nbuf_set_next(msdu, msdus);
        msdus = msdu;
    }
Пример #5
0
int
htt_tx_attach(struct htt_pdev_t *pdev, int desc_pool_elems)
{
    int i, i_int, pool_size;
    uint32_t **p;
    adf_os_dma_addr_t pool_paddr = {0};
    struct htt_tx_desc_page_t *page_info;
    unsigned int num_link = 0;
    uint32_t page_size;

    if (pdev->cfg.is_high_latency) {
        pdev->tx_descs.size = sizeof(struct htt_host_tx_desc_t);
    } else {
        pdev->tx_descs.size =
            /*
             * Start with the size of the base struct
             * that actually gets downloaded.
             */
            sizeof(struct htt_host_tx_desc_t)
            /*
             * Add the fragmentation descriptor elements.
             * Add the most that the OS may deliver, plus one more in
             * case the txrx code adds a prefix fragment (for TSO or
             * audio interworking SNAP header)
             */
            + (ol_cfg_netbuf_frags_max(pdev->ctrl_pdev)+1) * 8 // 2x u_int32_t
            + 4; /* u_int32_t fragmentation list terminator */
    }

    /*
     * Make sure tx_descs.size is a multiple of 4-bytes.
     * It should be, but round up just to be sure.
     */
    pdev->tx_descs.size = (pdev->tx_descs.size + 3) & (~0x3);
    pdev->tx_descs.pool_elems = desc_pool_elems;
    pdev->tx_descs.alloc_cnt = 0;

    pool_size = pdev->tx_descs.pool_elems * pdev->tx_descs.size;

   /* Calculate required page count first */
    page_size = adf_os_mem_get_page_size();
    pdev->num_pages = pool_size / page_size;
    if (pool_size % page_size)
        pdev->num_pages++;

    /* Put in as many as possible descriptors into single page */
    /* calculate how many descriptors can put in single page */
    pdev->num_desc_per_page = page_size / pdev->tx_descs.size;

    /* Pages information storage */
    pdev->desc_pages = (struct htt_tx_desc_page_t *)adf_os_mem_alloc(
        pdev->osdev, pdev->num_pages * sizeof(struct htt_tx_desc_page_t));
    if (!pdev->desc_pages) {
        adf_os_print("HTT Attach, desc page alloc fail");
        goto fail1;
    }

    page_info = pdev->desc_pages;
    p = (uint32_t **) pdev->tx_descs.freelist;
    /* Allocate required memory with multiple pages */
    for(i = 0; i < pdev->num_pages; i++) {
        if (pdev->cfg.is_high_latency) {
            page_info->page_v_addr_start = adf_os_mem_alloc(
                pdev->osdev, page_size);
            page_info->page_p_addr = pool_paddr;
            if (!page_info->page_v_addr_start) {
               page_info = pdev->desc_pages;
               for (i_int = 0 ; i_int < i; i_int++) {
                    page_info = pdev->desc_pages + i_int;
                    adf_os_mem_free(page_info->page_v_addr_start);
               }
               goto fail2;
            }
        } else {
            page_info->page_v_addr_start = adf_os_mem_alloc_consistent(
                pdev->osdev,
                page_size,
                &page_info->page_p_addr,
                adf_os_get_dma_mem_context((&pdev->tx_descs), memctx));
            if (!page_info->page_v_addr_start) {
               page_info = pdev->desc_pages;
               for (i_int = 0 ; i_int < i; i_int++) {
                    page_info = pdev->desc_pages + i_int;
                    adf_os_mem_free_consistent(
                        pdev->osdev,
                        pdev->num_desc_per_page * pdev->tx_descs.size,
                        page_info->page_v_addr_start,
                        page_info->page_p_addr,
                        adf_os_get_dma_mem_context((&pdev->tx_descs), memctx));
               }
               goto fail2;
            }
        }
        page_info->page_v_addr_end = page_info->page_v_addr_start +
            pdev->num_desc_per_page * pdev->tx_descs.size;
        page_info++;
    }

    page_info = pdev->desc_pages;
    pdev->tx_descs.freelist = (uint32_t *)page_info->page_v_addr_start;
    p = (uint32_t **) pdev->tx_descs.freelist;
    for(i = 0; i < pdev->num_pages; i++) {
        for (i_int = 0; i_int < pdev->num_desc_per_page; i_int++) {
            if (i_int == (pdev->num_desc_per_page - 1)) {
                /* Last element on this page, should pint next page */
                if (!page_info->page_v_addr_start) {
                    adf_os_print("over flow num link %d\n", num_link);
                    goto fail3;
                }
                page_info++;
                *p = (uint32_t *)page_info->page_v_addr_start;
            }
            else {
                *p = (uint32_t *)(((char *) p) + pdev->tx_descs.size);
            }
            num_link++;
            p = (uint32_t **) *p;
            /* Last link established exit */
            if (num_link == (pdev->tx_descs.pool_elems - 1))
               break;
        }
    }
    *p = NULL;

    if (pdev->cfg.is_high_latency) {
        adf_os_atomic_init(&pdev->htt_tx_credit.target_delta);
        adf_os_atomic_init(&pdev->htt_tx_credit.bus_delta);
        adf_os_atomic_add(HTT_MAX_BUS_CREDIT,&pdev->htt_tx_credit.bus_delta);
    }
    return 0; /* success */

fail3:
    if (pdev->cfg.is_high_latency) {
        page_info = pdev->desc_pages;
        for (i_int = 0 ; i_int < pdev->num_pages; i_int++) {
            page_info = pdev->desc_pages + i_int;
            adf_os_mem_free(page_info->page_v_addr_start);
        }
    } else {
        page_info = pdev->desc_pages;
        for (i_int = 0 ; i_int < pdev->num_pages; i_int++) {
            page_info = pdev->desc_pages + i_int;
            adf_os_mem_free_consistent(
                pdev->osdev,
                pdev->num_desc_per_page * pdev->tx_descs.size,
                page_info->page_v_addr_start,
                page_info->page_p_addr,
                adf_os_get_dma_mem_context((&pdev->tx_descs), memctx));
        }
    }

fail2:
    adf_os_mem_free(pdev->desc_pages);

fail1:
    return -1;
}