LOCAL void _buf_pool_static_create_pool(pool_handle_t handle, BUF_POOL_ID poolId, int nItems, int nSize)
{
    int i;
    VBUF *buf;
    VDESC *desc;
    
    //BUF_POOL_STATIC_CONTEXT *ctx = (BUF_POOL_STATIC_CONTEXT *)handle;
    
    for ( i = 0; i < nItems; i++) {
        buf = VBUF_alloc_vbuf();
        desc = VDESC_alloc_vdesc();

        desc->buf_addr = (A_UINT8 *)adf_os_mem_alloc(nSize);
        desc->buf_size = nSize;
        desc->data_offset = 0;
        desc->data_size = 0;
        
        buf->buf_length = 0;        
        buf->desc_list = desc;
        
        if ( g_poolCtx.bufQ[poolId] == NULL ) {
            g_poolCtx.bufQ[poolId] = buf;
        } else {
            buf->next_buf = g_poolCtx.bufQ[poolId];
            g_poolCtx.bufQ[poolId] = buf;
        }
    }
}
Beispiel #2
0
int 
tx99_attach(struct ath_softc *sc)
{	
    struct ath_tx99 *tx99 = sc->sc_tx99;
    if (tx99 != NULL) {
		adf_os_print("%s: sc_tx99 was not NULL\n", __FUNCTION__);
		return EINVAL;
	}
    tx99 = adf_os_mem_alloc(NULL, sizeof(struct ath_tx99));
    if (tx99 == NULL) {
		adf_os_print("%s: no memory for tx99 attach\n", __FUNCTION__);
		return ENOMEM;
	}
    adf_os_mem_set(tx99, 0, sizeof(struct ath_tx99));
    sc->sc_tx99 = tx99;
    
    tx99->stop = tx99_stop;
    tx99->start = tx99_start;
    tx99->tx99_state = 0;
    tx99->txpower = 60;
    tx99->txrate = 300000;
    tx99->txfreq = 6;/* ieee channel number */
    tx99->txmode = IEEE80211_MODE_11NG_HT40PLUS;
    tx99->phymode = IEEE80211_MODE_AUTO;
    tx99->chanmask = 1;
    tx99->recv = 0;
    tx99->testmode = TX99_TESTMODE_TX_PN;
    
    return EOK;
}	
void _DMAengine_config_rx_queue(struct zsDmaQueue *q, int num_desc, int buf_size)
{
    int i;
    VDESC *desc;
    VDESC *head = NULL;
        
    for(i=0; i < num_desc; i++)
    {
        desc = VDESC_alloc_vdesc();
        
        adf_os_assert(desc != NULL);
        
        desc->buf_addr = (A_UINT8 *)adf_os_mem_alloc(buf_size);
        desc->buf_size = buf_size;
        desc->next_desc = NULL;
        desc->data_offset = 0;
        desc->data_size = 0;
        desc->control = 0;
               
        if ( head == NULL )
        {
            head = desc;
        }
        else
        {
            desc->next_desc = head;
            head = desc;
        }
    }         
    
    config_queue(q, head);          
}
Beispiel #4
0
void
ol_rx_addba_handler(
    ol_txrx_pdev_handle pdev,
    u_int16_t peer_id,
    u_int8_t tid,
    u_int8_t win_sz)
{
    unsigned round_pwr2_win_sz, array_size;
    struct ol_txrx_peer_t *peer;
    struct ol_rx_reorder_t *rx_reorder;

    peer = ol_txrx_peer_find_by_id(pdev, peer_id);
    if (peer == NULL) {
        return;
    }
    rx_reorder = &peer->tids_rx_reorder[tid];

    TXRX_ASSERT2(win_sz <= 64);
    round_pwr2_win_sz = OL_RX_REORDER_ROUND_PWR2(win_sz);
    array_size = round_pwr2_win_sz * sizeof(struct ol_rx_reorder_array_elem_t);
    rx_reorder->array = adf_os_mem_alloc(pdev->osdev, array_size);
    TXRX_ASSERT1(rx_reorder->array);
    adf_os_mem_set(rx_reorder->array, 0x0, array_size);

    rx_reorder->win_sz_mask = round_pwr2_win_sz - 1;
}
Beispiel #5
0
ieee80211_resmgr_t ol_resmgr_create(struct ieee80211com *ic, ieee80211_resmgr_mode mode)
{
    struct ol_ath_softc_net80211 *scn = OL_ATH_SOFTC_NET80211(ic);
    ieee80211_resmgr_t    resmgr;

printk("OL Resmgr Init-ed\n");

    if (ic->ic_resmgr) {
        printk("%s : ResMgr already exists \n", __func__); 
        return NULL;
    }

    /* Allocate ResMgr data structures */
    resmgr = (ieee80211_resmgr_t) adf_os_mem_alloc(scn->adf_dev, 
                                            sizeof(struct ieee80211_resmgr));
    if (resmgr == NULL) {
        printk("%s : ResMgr memory alloction failed\n", __func__); 
        return NULL;
    }

    OS_MEMZERO(resmgr, sizeof(struct ieee80211_resmgr));
    resmgr->ic = ic;
    resmgr->mode = mode;
    /* Indicate the device is capable of multi-chan operation*/
    ic->ic_caps_ext |= IEEE80211_CEXT_MULTICHAN;

    /* initialize function pointer table */
    resmgr->resmgr_func_table.resmgr_create_complete = _ieee80211_resmgr_create_complete; 
    resmgr->resmgr_func_table.resmgr_delete = _ieee80211_resmgr_delete;
    resmgr->resmgr_func_table.resmgr_delete_prepare = _ieee80211_resmgr_delete_prepare; 

    resmgr->resmgr_func_table.resmgr_register_notification_handler = _ieee80211_resmgr_register_notification_handler; 
    resmgr->resmgr_func_table.resmgr_unregister_notification_handler = _ieee80211_resmgr_unregister_notification_handler; 

    resmgr->resmgr_func_table.resmgr_request_offchan = _ieee80211_resmgr_request_offchan; 
    resmgr->resmgr_func_table.resmgr_request_bsschan = _ieee80211_resmgr_request_bsschan; 
    resmgr->resmgr_func_table.resmgr_request_chanswitch = _ieee80211_resmgr_request_chanswitch; 

    resmgr->resmgr_func_table.resmgr_vap_start = _ieee80211_resmgr_vap_start; 

    resmgr->resmgr_func_table.resmgr_vattach = _ieee80211_resmgr_vattach; 
    resmgr->resmgr_func_table.resmgr_vdetach = _ieee80211_resmgr_vdetach; 
    resmgr->resmgr_func_table.resmgr_get_notification_type_name = _ieee80211_resmgr_get_notification_type_name; 
    resmgr->resmgr_func_table.resmgr_register_noa_event_handler = _ieee80211_resmgr_register_noa_event_handler; 
    resmgr->resmgr_func_table.resmgr_unregister_noa_event_handler = _ieee80211_resmgr_unregister_noa_event_handler; 
    resmgr->resmgr_func_table.resmgr_off_chan_sched_set_air_time_limit = _ieee80211_resmgr_off_chan_sched_set_air_time_limit; 
    resmgr->resmgr_func_table.resmgr_off_chan_sched_get_air_time_limit = _ieee80211_resmgr_off_chan_sched_get_air_time_limit; 

    resmgr->resmgr_func_table.resmgr_vap_stop = _ieee80211_resmgr_vap_stop; 

    adf_os_spinlock_init(&resmgr->rm_lock);
    adf_os_spinlock_init(&resmgr->rm_handler_lock);

    /* Register WMI event handlers */
    wmi_unified_register_event_handler(scn->wmi_handle, WMI_VDEV_START_RESP_EVENTID,
                                        ol_vdev_wmi_event_handler, resmgr);

    return resmgr;
}
LOCAL pool_handle_t _buf_pool_dynamic_init(adf_os_handle_t handle)
{
    BUF_POOL_DYNAMIC_CONTEXT *ctx;
    
    ctx = (BUF_POOL_DYNAMIC_CONTEXT *)adf_os_mem_alloc(sizeof(BUF_POOL_DYNAMIC_CONTEXT));
    ctx->OSHandle = handle;
    
    return ctx; 
}      
/*
 * Allocate/free memory.
 */
void * __ahdecl
ath_hal_malloc(adf_os_size_t size)
{
	void *p;

	p = adf_os_mem_alloc(size);
	if (p)
		adf_os_mem_zero(p, size);

	return p;
}
Beispiel #8
0
ol_pdev_handle ol_pdev_cfg_attach(adf_os_device_t osdev,
                                   struct txrx_pdev_cfg_param_t cfg_param)
{
	struct txrx_pdev_cfg_t *cfg_ctx;

	cfg_ctx = adf_os_mem_alloc(osdev, sizeof(*cfg_ctx));
	if (!cfg_ctx) {
		printk(KERN_ERR "cfg ctx allocation failed\n");
		return NULL;
	}

#ifdef CONFIG_HL_SUPPORT
	cfg_ctx->is_high_latency = 1;
	/* 802.1Q and SNAP / LLC headers are accounted for elsewhere */
	cfg_ctx->tx_download_size = 1500;
	cfg_ctx->tx_free_at_download = 0;
#else
	/*
	 * Need to change HTT_LL_TX_HDR_SIZE_IP accordingly.
	 * Include payload, up to the end of UDP header for IPv4 case
	 */
	cfg_ctx->tx_download_size = 16;
#endif
	/* temporarily diabled PN check for Riva/Pronto */
	cfg_ctx->rx_pn_check = 1;
#if CFG_TGT_DEFAULT_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK
	cfg_ctx->defrag_timeout_check = 1;
#else
	cfg_ctx->defrag_timeout_check = 0;
#endif
	cfg_ctx->max_peer_id = 511;
	cfg_ctx->max_vdev = CFG_TGT_NUM_VDEV;
	cfg_ctx->pn_rx_fwd_check = 1;
	cfg_ctx->frame_type = wlan_frm_fmt_802_3;
	cfg_ctx->max_thruput_mbps = 800;
	cfg_ctx->max_nbuf_frags = 1;
	cfg_ctx->vow_config = vow_config;
	cfg_ctx->target_tx_credit = CFG_TGT_NUM_MSDU_DESC;
	cfg_ctx->throttle_period_ms = 40;
	cfg_ctx->rx_fwd_disabled = 0;
	cfg_ctx->is_packet_log_enabled = 0;
	cfg_ctx->is_full_reorder_offload = cfg_param.is_full_reorder_offload;
#ifdef IPA_UC_OFFLOAD
	cfg_ctx->ipa_uc_rsc.uc_offload_enabled = cfg_param.is_uc_offload_enabled;
	cfg_ctx->ipa_uc_rsc.tx_max_buf_cnt = cfg_param.uc_tx_buffer_count;
	cfg_ctx->ipa_uc_rsc.tx_buf_size = cfg_param.uc_tx_buffer_size;
	cfg_ctx->ipa_uc_rsc.rx_ind_ring_size = cfg_param.uc_rx_indication_ring_count;
	cfg_ctx->ipa_uc_rsc.tx_partition_base = cfg_param.uc_tx_partition_base;
#endif /* IPA_UC_OFFLOAD */
	return (ol_pdev_handle) cfg_ctx;
}
Beispiel #9
0
//spinlock need free when unload
static void *HIFInit(adf_os_handle_t os_hdl)
{
    HIF_DEVICE_USB *hif_dev;

    /* allocate memory for HIF_DEVICE */
    hif_dev = (HIF_DEVICE_USB *) adf_os_mem_alloc(os_hdl, sizeof(HIF_DEVICE_USB));
    if (hif_dev == NULL) {
        return NULL;
    }

    adf_os_mem_zero(hif_dev, sizeof(HIF_DEVICE_USB));

    hif_dev->os_hdl = os_hdl;
    
    return hif_dev;
}
Beispiel #10
0
A_STATUS
ol_rx_pn_trace_attach(ol_txrx_pdev_handle pdev)
{
    int num_elems;

    num_elems = 1 << TXRX_RX_PN_TRACE_SIZE_LOG2;
    pdev->rx_pn_trace.idx = 0;
    pdev->rx_pn_trace.cnt = 0;
    pdev->rx_pn_trace.mask = num_elems - 1;
    pdev->rx_pn_trace.data = adf_os_mem_alloc(
        pdev->osdev, sizeof(*pdev->rx_pn_trace.data) * num_elems);
    if (! pdev->rx_pn_trace.data) {
        return A_ERROR;
    }
    return A_OK;
}
Beispiel #11
0
struct htt_htc_pkt *
htt_htc_pkt_alloc(struct htt_pdev_t *pdev)
{
    struct htt_htc_pkt_union *pkt = NULL;

    HTT_TX_MUTEX_ACQUIRE(&pdev->htt_tx_mutex);
    if (pdev->htt_htc_pkt_freelist) {
        pkt = pdev->htt_htc_pkt_freelist;
        pdev->htt_htc_pkt_freelist = pdev->htt_htc_pkt_freelist->u.next;
    } 
    HTT_TX_MUTEX_RELEASE(&pdev->htt_tx_mutex);
   
    if (pkt == NULL) {
        pkt = adf_os_mem_alloc(pdev->osdev, sizeof(*pkt));
    }
    return &pkt->u.pkt; /* not actually a dereference */
}
Beispiel #12
0
A_STATUS
fwd_device_inserted(HIF_HANDLE hif, adf_os_handle_t os_hdl)
{
    fwd_softc_t    *sc;
    HTC_CALLBACKS   fwd_cb;

    sc = adf_os_mem_alloc(os_hdl, sizeof(fwd_softc_t));
    if (!sc) {
      adf_os_print("FWD: No memory for fwd context\n");
      return -1;
    }

//    adf_os_print("fwd  : ctx allocation done = %p\n",sc);

    adf_os_mem_set(sc, 0, sizeof(fwd_softc_t));

    sc->hif_handle = hif;

    adf_os_timer_init(NULL, &sc->tmr, fwd_timer_expire, sc);
    HIFGetDefaultPipe(hif, &sc->rx_pipe, &sc->tx_pipe);

    sc->image                   = (a_uint8_t *)zcFwImage;
    sc->size                    = zcFwImageSize;
    sc->target_upload_addr      = fw_target_addr;

    fwd_cb.Context              = sc;
    fwd_cb.rxCompletionHandler  = fwd_recv;
    fwd_cb.txCompletionHandler  = fwd_txdone;

    sc->hif_handle              = hif;

adf_os_print("%s, hif: 0x%08x\n", __FUNCTION__, (a_uint32_t)hif);

    HIFPostInit(hif, NULL, &fwd_cb);

adf_os_print("%s, hif: 0x%08x\n", __FUNCTION__, (a_uint32_t)hif);

    hif_boot_start(hif);

    adf_os_print("Downloading\t");

    fwd_start_upload(sc);

    return A_STATUS_OK;
}
struct ath_ratectrl *
ath_rate_attach(struct ath_softc_tgt *sc)
{
	struct atheros_softc *asc;

	asc = adf_os_mem_alloc(sizeof(struct atheros_softc));
	if (asc == NULL)
		return NULL;

	adf_os_mem_set(asc, 0, sizeof(struct atheros_softc));
	asc->arc.arc_space = sizeof(struct atheros_node);

	ar5416AttachRateTables(asc);

	asc->tx_chainmask = 1;
    
	return &asc->arc;
}
Beispiel #14
0
hif_status_t
fwd_device_inserted(HIF_HANDLE hif, adf_os_handle_t  os_hdl)
{
    fwd_softc_t    *sc;
    HTC_CALLBACKS   fwd_cb = {0};

    sc = adf_os_mem_alloc(os_hdl ,sizeof(fwd_softc_t));
    if (!sc) {
      adf_os_print("FWD: No memory for fwd context\n");
      return -1;
    }

//    adf_os_print("fwd  : ctx allocation done = %p\n",sc);

    adf_os_mem_set(sc, 0, sizeof(fwd_softc_t));

    sc->hif_handle = hif;

    /*adf_os_timer_init(NULL, &sc->tmr, fwd_timer_expire, sc);*/
    HIFGetDefaultPipe(hif, &sc->rx_pipe, &sc->tx_pipe);

    sc->image                   = (a_uint8_t *)zcFwImage;
    sc->size                    = zcFwImageSize;
    /* #ifdef MDIO_BOOT_LOAD    */
	sc->target_upload_addr      = fw_load_addr;
    /* #else */
    /* sc->target_upload_addr      = fw_target_addr; */
    /* #endif */
    fwd_cb.Context              = sc;
    fwd_cb.rxCompletionHandler  = fwd_recv;
    fwd_cb.txCompletionHandler  = fwd_txdone;

    sc->hif_handle              = hif;

    hif_boot_start(hif);
    
	HIFPostInit(hif, sc, &fwd_cb);

    adf_os_print("Downloading\t");

    fwd_start_upload(sc);

    return HIF_OK;
}
Beispiel #15
0
A_STATUS
wdi_event_attach(struct ol_txrx_pdev_t *txrx_pdev)
{
    /* Input validation */
    if (!txrx_pdev) {
        adf_os_print(
            "Invalid device in %s\nWDI event attach failed", __FUNCTION__);
        return A_ERROR;
    }
    /* Separate subscriber list for each event */ 
    txrx_pdev->wdi_event_list = (wdi_event_subscribe **)
        adf_os_mem_alloc(
            txrx_pdev->osdev, sizeof(wdi_event_subscribe *) * WDI_NUM_EVENTS);
    if (!txrx_pdev->wdi_event_list) {
        adf_os_print("Insufficient memory for the WDI event lists\n");
        return A_NO_MEMORY;
    }
    return A_OK; 
}
LOCAL pool_handle_t _buf_pool_static_init(adf_os_handle_t handle)
{
#if 1
    int i;
    
    for(i=0; i < POOL_ID_MAX; i++) {
        g_poolCtx.bufQ[i] = NULL;
    }
    
    return &g_poolCtx;
#else    
    BUF_POOL_STATIC_CONTEXT *ctx;
    
    //ctx = (BUF_POOL_static_CONTEXT *)A_ALLOCRAM(sizeof(BUF_POOL_static_CONTEXT));
    ctx = (BUF_POOL_STATIC_CONTEXT *)adf_os_mem_alloc(sizeof(BUF_POOL_STATIC_CONTEXT));
    ctx->NetHandle = handle;
    
    return ctx; 
#endif    
}      
Beispiel #17
0
A_STATUS
ol_rx_reorder_trace_attach(ol_txrx_pdev_handle pdev)
{
    int num_elems;

    num_elems = 1 << TXRX_RX_REORDER_TRACE_SIZE_LOG2;
    pdev->rx_reorder_trace.idx = 0;
    pdev->rx_reorder_trace.cnt = 0;
    pdev->rx_reorder_trace.mask = num_elems - 1;
    pdev->rx_reorder_trace.data = adf_os_mem_alloc(
        pdev->osdev, sizeof(*pdev->rx_reorder_trace.data) * num_elems);
    if (! pdev->rx_reorder_trace.data) {
        return A_ERROR;
    }
    while (--num_elems >= 0) {
        pdev->rx_reorder_trace.data[num_elems].seq_num = 0xffff;
    }

    return A_OK;
}
Beispiel #18
0
/**
 * @brief Allocate & initialize the S/W descriptor & H/W
 *        Descriptor
 * 
 * @param osdev
 * @param sc
 * @param num_desc
 */
static void
pci_dma_alloc_swdesc(adf_os_device_t osdev, pci_dma_softc_t *sc,
                     a_uint32_t  num_desc)
{
    a_uint32_t  size_sw, size_hw, i = 0;
    adf_os_dma_addr_t  paddr;
    zdma_swdesc_t  *swdesc;

    struct zsDmaDesc  *hwdesc;
//    struct zsDmaDesc  *tmpdesc;


    size_sw = sizeof(struct zdma_swdesc) * num_desc;
    size_hw = sizeof(struct zsDmaDesc) * num_desc;

    sc->sw_ring = adf_os_mem_alloc(osdev, size_sw);
    adf_os_assert(sc->sw_ring);

    sc->hw_ring = adf_os_dmamem_alloc(osdev, size_hw, PCI_DMA_MAPPING, 
                                      &sc->dmap);

//    printk("sc->hw_ring  %x paddr is %x \n",(unsigned int )sc->hw_ring, (unsigned int) sc->dmap->seg[0].daddr);
    adf_os_assert(sc->hw_ring);

    swdesc = sc->sw_ring;
    hwdesc = sc->hw_ring;
    paddr  = adf_os_dmamem_map2addr(sc->dmap);
    
//    printk("sc->hw_ring paddr  %x \n",paddr );
    for (i = 0; i < num_desc; i++) {
        swdesc[i].descp = &hwdesc[i];
        swdesc[i].hwaddr = paddr;
        paddr = (adf_os_dma_addr_t)((struct zsDmaDesc *)paddr + 1);
    }
    sc->num_desc = num_desc;

    pci_dma_init_ring(swdesc, num_desc);

}
static wmi_handle_t _WMI_Init(WMI_SVC_CONFIG *pWmiConfig)
{
	WMI_SVC_CONTEXT *pWMI = NULL;
	int eventSize = WMI_SVC_MAX_BUFFERED_EVENT_SIZE + sizeof(WMI_CMD_HDR) + HTC_HDR_SZ;

	pWMI = (WMI_SVC_CONTEXT *)adf_os_mem_alloc(sizeof(WMI_SVC_CONTEXT));
	if (pWMI == NULL) {
		return NULL;
	}

	pWMI->pDispatchHead = NULL;
	pWMI->PoolHandle = pWmiConfig->PoolHandle;
	pWMI->HtcHandle = pWmiConfig->HtcHandle;

	BUF_Pool_create_pool(pWmiConfig->PoolHandle, POOL_ID_WMI_SVC_CMD_REPLY,
			     pWmiConfig->MaxCmdReplyEvts, eventSize);

	BUF_Pool_create_pool(pWmiConfig->PoolHandle, POOL_ID_WMI_SVC_EVENT,
			     pWmiConfig->MaxEventEvts, eventSize);

	/* NOTE: since RAM allocation is zero-initialized, there is nothing to do for the
	 * direct event pool */

        /* register the WMI control service */
	pWMI->WMIControlService.ProcessRecvMsg = A_INDIR(wmi_svc_api._WMI_RecvMessageHandler);
	pWMI->WMIControlService.ProcessSendBufferComplete = A_INDIR(wmi_svc_api._WMI_SendCompleteHandler);
	pWMI->WMIControlService.ProcessConnect = A_INDIR(wmi_svc_api._WMI_ServiceConnect);
	pWMI->WMIControlService.MaxSvcMsgSize = WMI_SVC_MSG_SIZE + sizeof(WMI_CMD_HDR);
        /* all buffers that are sent through the control endpoint are at least WMI_SVC_MAX_BUFFERED_EVENT_SIZE
         * in size.  Any WMI event that supplies a data buffer must insure that the space in the buffer
         * is at least this size. */
	pWMI->WMIControlService.TrailerSpcCheckLimit = WMI_SVC_MAX_BUFFERED_EVENT_SIZE;
	pWMI->WMIControlService.ServiceID = WMI_CONTROL_SVC;
	pWMI->WMIControlService.ServiceCtx = pWMI;
	HTC_RegisterService(pWmiConfig->HtcHandle, &pWMI->WMIControlService);

	return pWMI;
}
static adf_drv_handle_t
ath_pci_probe(adf_os_resource_t *res,a_int32_t count, adf_os_attach_data_t *data,
	      adf_os_device_t osdev)
{
	struct ath_pci_softc *sc;
	a_uint8_t csz = 32;
	adf_os_pci_dev_id_t *id = (adf_os_pci_dev_id_t *)data;

	adf_os_pci_config_write8(osdev, ATH_PCI_CACHE_LINE_SIZE, csz);
	adf_os_pci_config_write8(osdev, ATH_PCI_LATENCY_TIMER, 0xa8);

	sc = adf_os_mem_alloc(sizeof(struct ath_pci_softc));

	if (sc == NULL) {
		adf_os_print("ath_pci: no memory for device state\n");
		goto bad2;
	}
	adf_os_mem_set(sc, 0, sizeof(struct ath_pci_softc));

	/*
	 * Mark the device as detached to avoid processing
	 * interrupts until setup is complete.
	 */
	sc->aps_sc.sc_invalid = 1;

	adf_os_print("ath_pci_probe %x\n",id->device);

	if (ath_tgt_attach(id->device, res->start, &sc->aps_sc, osdev) != 0)
		goto bad3;

	/* ready to process interrupts */
	sc->aps_sc.sc_invalid = 0;
	adf_os_setup_intr(osdev, ath_intr);
	return (adf_drv_handle_t)sc;
bad3:
bad2:
	return NULL;
}
Beispiel #21
0
int
htt_tx_attach(struct htt_pdev_t *pdev, int desc_pool_elems)
{
    int i, i_int, pool_size;
    uint32_t **p;
    adf_os_dma_addr_t pool_paddr = {0};
    struct htt_tx_desc_page_t *page_info;
    unsigned int num_link = 0;
    uint32_t page_size;

    if (pdev->cfg.is_high_latency) {
        pdev->tx_descs.size = sizeof(struct htt_host_tx_desc_t);
    } else {
        pdev->tx_descs.size =
            /*
             * Start with the size of the base struct
             * that actually gets downloaded.
             */
            sizeof(struct htt_host_tx_desc_t)
            /*
             * Add the fragmentation descriptor elements.
             * Add the most that the OS may deliver, plus one more in
             * case the txrx code adds a prefix fragment (for TSO or
             * audio interworking SNAP header)
             */
            + (ol_cfg_netbuf_frags_max(pdev->ctrl_pdev)+1) * 8 // 2x u_int32_t
            + 4; /* u_int32_t fragmentation list terminator */
    }

    /*
     * Make sure tx_descs.size is a multiple of 4-bytes.
     * It should be, but round up just to be sure.
     */
    pdev->tx_descs.size = (pdev->tx_descs.size + 3) & (~0x3);
    pdev->tx_descs.pool_elems = desc_pool_elems;
    pdev->tx_descs.alloc_cnt = 0;

    pool_size = pdev->tx_descs.pool_elems * pdev->tx_descs.size;

   /* Calculate required page count first */
    page_size = adf_os_mem_get_page_size();
    pdev->num_pages = pool_size / page_size;
    if (pool_size % page_size)
        pdev->num_pages++;

    /* Put in as many as possible descriptors into single page */
    /* calculate how many descriptors can put in single page */
    pdev->num_desc_per_page = page_size / pdev->tx_descs.size;

    /* Pages information storage */
    pdev->desc_pages = (struct htt_tx_desc_page_t *)adf_os_mem_alloc(
        pdev->osdev, pdev->num_pages * sizeof(struct htt_tx_desc_page_t));
    if (!pdev->desc_pages) {
        adf_os_print("HTT Attach, desc page alloc fail");
        goto fail1;
    }

    page_info = pdev->desc_pages;
    p = (uint32_t **) pdev->tx_descs.freelist;
    /* Allocate required memory with multiple pages */
    for(i = 0; i < pdev->num_pages; i++) {
        if (pdev->cfg.is_high_latency) {
            page_info->page_v_addr_start = adf_os_mem_alloc(
                pdev->osdev, page_size);
            page_info->page_p_addr = pool_paddr;
            if (!page_info->page_v_addr_start) {
               page_info = pdev->desc_pages;
               for (i_int = 0 ; i_int < i; i_int++) {
                    page_info = pdev->desc_pages + i_int;
                    adf_os_mem_free(page_info->page_v_addr_start);
               }
               goto fail2;
            }
        } else {
            page_info->page_v_addr_start = adf_os_mem_alloc_consistent(
                pdev->osdev,
                page_size,
                &page_info->page_p_addr,
                adf_os_get_dma_mem_context((&pdev->tx_descs), memctx));
            if (!page_info->page_v_addr_start) {
               page_info = pdev->desc_pages;
               for (i_int = 0 ; i_int < i; i_int++) {
                    page_info = pdev->desc_pages + i_int;
                    adf_os_mem_free_consistent(
                        pdev->osdev,
                        pdev->num_desc_per_page * pdev->tx_descs.size,
                        page_info->page_v_addr_start,
                        page_info->page_p_addr,
                        adf_os_get_dma_mem_context((&pdev->tx_descs), memctx));
               }
               goto fail2;
            }
        }
        page_info->page_v_addr_end = page_info->page_v_addr_start +
            pdev->num_desc_per_page * pdev->tx_descs.size;
        page_info++;
    }

    page_info = pdev->desc_pages;
    pdev->tx_descs.freelist = (uint32_t *)page_info->page_v_addr_start;
    p = (uint32_t **) pdev->tx_descs.freelist;
    for(i = 0; i < pdev->num_pages; i++) {
        for (i_int = 0; i_int < pdev->num_desc_per_page; i_int++) {
            if (i_int == (pdev->num_desc_per_page - 1)) {
                /* Last element on this page, should pint next page */
                if (!page_info->page_v_addr_start) {
                    adf_os_print("over flow num link %d\n", num_link);
                    goto fail3;
                }
                page_info++;
                *p = (uint32_t *)page_info->page_v_addr_start;
            }
            else {
                *p = (uint32_t *)(((char *) p) + pdev->tx_descs.size);
            }
            num_link++;
            p = (uint32_t **) *p;
            /* Last link established exit */
            if (num_link == (pdev->tx_descs.pool_elems - 1))
               break;
        }
    }
    *p = NULL;

    if (pdev->cfg.is_high_latency) {
        adf_os_atomic_init(&pdev->htt_tx_credit.target_delta);
        adf_os_atomic_init(&pdev->htt_tx_credit.bus_delta);
        adf_os_atomic_add(HTT_MAX_BUS_CREDIT,&pdev->htt_tx_credit.bus_delta);
    }
    return 0; /* success */

fail3:
    if (pdev->cfg.is_high_latency) {
        page_info = pdev->desc_pages;
        for (i_int = 0 ; i_int < pdev->num_pages; i_int++) {
            page_info = pdev->desc_pages + i_int;
            adf_os_mem_free(page_info->page_v_addr_start);
        }
    } else {
        page_info = pdev->desc_pages;
        for (i_int = 0 ; i_int < pdev->num_pages; i_int++) {
            page_info = pdev->desc_pages + i_int;
            adf_os_mem_free_consistent(
                pdev->osdev,
                pdev->num_desc_per_page * pdev->tx_descs.size,
                page_info->page_v_addr_start,
                page_info->page_p_addr,
                adf_os_get_dma_mem_context((&pdev->tx_descs), memctx));
        }
    }

fail2:
    adf_os_mem_free(pdev->desc_pages);

fail1:
    return -1;
}
Beispiel #22
0
int htt_tx_ipa_uc_attach(struct htt_pdev_t *pdev,
    unsigned int uc_tx_buf_sz,
    unsigned int uc_tx_buf_cnt,
    unsigned int uc_tx_partition_base)
{
   unsigned int  tx_buffer_count;
   adf_nbuf_t    buffer_vaddr;
   u_int32_t     buffer_paddr;
   u_int32_t    *header_ptr;
   u_int32_t    *ring_vaddr;
   int           return_code = 0;

   /* Allocate CE Write Index WORD */
   pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr =
       adf_os_mem_alloc_consistent(pdev->osdev,
                4,
                &pdev->ipa_uc_tx_rsc.tx_ce_idx.paddr,
                adf_os_get_dma_mem_context(
                   (&pdev->ipa_uc_tx_rsc.tx_ce_idx), memctx));
   if (!pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr) {
      adf_os_print("%s: CE Write Index WORD alloc fail", __func__);
      return -1;
   }

   /* Allocate TX COMP Ring */
   pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr =
       adf_os_mem_alloc_consistent(pdev->osdev,
                uc_tx_buf_cnt * 4,
                &pdev->ipa_uc_tx_rsc.tx_comp_base.paddr,
                adf_os_get_dma_mem_context(
                   (&pdev->ipa_uc_tx_rsc.tx_comp_base), memctx));
   if (!pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr) {
      adf_os_print("%s: TX COMP ring alloc fail", __func__);
      return_code = -2;
      goto free_tx_ce_idx;
   }

   adf_os_mem_zero(pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr, uc_tx_buf_cnt * 4);

   /* Allocate TX BUF vAddress Storage */
   pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg =
         (adf_nbuf_t *)adf_os_mem_alloc(pdev->osdev,
                          uc_tx_buf_cnt * sizeof(adf_nbuf_t));
   if (!pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg) {
      adf_os_print("%s: TX BUF POOL vaddr storage alloc fail",
                   __func__);
      return_code = -3;
      goto free_tx_comp_base;
   }
   adf_os_mem_zero(pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg,
                   uc_tx_buf_cnt * sizeof(adf_nbuf_t));

   ring_vaddr = pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr;
   /* Allocate TX buffers as many as possible */
   for (tx_buffer_count = 0;
        tx_buffer_count < (uc_tx_buf_cnt - 1);
        tx_buffer_count++) {
      buffer_vaddr = adf_nbuf_alloc(pdev->osdev,
                uc_tx_buf_sz, 0, 4, FALSE);
      if (!buffer_vaddr)
      {
         adf_os_print("%s: TX BUF alloc fail, allocated buffer count %d",
                      __func__, tx_buffer_count);
         return 0;
      }

      /* Init buffer */
      adf_os_mem_zero(adf_nbuf_data(buffer_vaddr), uc_tx_buf_sz);
      header_ptr = (u_int32_t *)adf_nbuf_data(buffer_vaddr);

      *header_ptr = HTT_IPA_UC_OFFLOAD_TX_HEADER_DEFAULT;
      header_ptr++;
      *header_ptr |= ((u_int16_t)uc_tx_partition_base + tx_buffer_count) << 16;

      adf_nbuf_map(pdev->osdev, buffer_vaddr, ADF_OS_DMA_BIDIRECTIONAL);
      buffer_paddr = adf_nbuf_get_frag_paddr_lo(buffer_vaddr, 0);
      header_ptr++;
      *header_ptr = (u_int32_t)(buffer_paddr + 16);

      header_ptr++;
      *header_ptr = 0xFFFFFFFF;

      /* FRAG Header */
      header_ptr++;
      *header_ptr = buffer_paddr + 32;

      *ring_vaddr = buffer_paddr;
      pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[tx_buffer_count] =
            buffer_vaddr;
      /* Memory barrier to ensure actual value updated */

      ring_vaddr++;
   }

   pdev->ipa_uc_tx_rsc.alloc_tx_buf_cnt = tx_buffer_count;

   return 0;

free_tx_comp_base:
   adf_os_mem_free_consistent(pdev->osdev,
                   ol_cfg_ipa_uc_tx_max_buf_cnt(pdev->ctrl_pdev) * 4,
                   pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr,
                   pdev->ipa_uc_tx_rsc.tx_comp_base.paddr,
                   adf_os_get_dma_mem_context(
                      (&pdev->ipa_uc_tx_rsc.tx_comp_base), memctx));
free_tx_ce_idx:
   adf_os_mem_free_consistent(pdev->osdev,
                   4,
                   pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr,
                   pdev->ipa_uc_tx_rsc.tx_ce_idx.paddr,
                   adf_os_get_dma_mem_context(
                      (&pdev->ipa_uc_tx_rsc.tx_ce_idx), memctx));
   return return_code;
}
Beispiel #23
0
htt_pdev_handle
htt_attach(
    ol_txrx_pdev_handle txrx_pdev,
    ol_pdev_handle ctrl_pdev,
    HTC_HANDLE htc_pdev,
    adf_os_device_t osdev,
    int desc_pool_size)
{
    struct htt_pdev_t *pdev;
    int i;

    pdev = adf_os_mem_alloc(osdev, sizeof(*pdev));

    if (!pdev) {
        goto fail1;
    }

    pdev->osdev = osdev;
    pdev->ctrl_pdev = ctrl_pdev;
    pdev->txrx_pdev = txrx_pdev;
    pdev->htc_pdev = htc_pdev;

    adf_os_mem_set(&pdev->stats, 0, sizeof(pdev->stats));
    pdev->htt_htc_pkt_freelist = NULL;

    /* for efficiency, store a local copy of the is_high_latency flag */
    pdev->cfg.is_high_latency = ol_cfg_is_high_latency(pdev->ctrl_pdev);

    /*
     * Connect to HTC service.
     * This has to be done before calling htt_rx_attach,
     * since htt_rx_attach involves sending a rx ring configure
     * message to the target.
     */
//AR6004 don't need HTT layer.
#ifndef AR6004_HW
    if (htt_htc_attach(pdev)) {
        goto fail2;
    }
#endif
    if (htt_tx_attach(pdev, desc_pool_size)) {
        goto fail2;
    }

    if (htt_rx_attach(pdev)) {
        goto fail3;
    }

    HTT_TX_MUTEX_INIT(&pdev->htt_tx_mutex); 
    HTT_TX_NBUF_QUEUE_MUTEX_INIT(pdev);

    /* pre-allocate some HTC_PACKET objects */
    for (i = 0; i < HTT_HTC_PKT_POOL_INIT_SIZE; i++) {
        struct htt_htc_pkt_union *pkt;
        pkt = adf_os_mem_alloc(pdev->osdev, sizeof(*pkt));
        if (! pkt) {
            break;
        }
        htt_htc_pkt_free(pdev, &pkt->u.pkt);
    }

    if (pdev->cfg.is_high_latency) {
        /*
         * HL - download the whole frame.
         * Specify a download length greater than the max MSDU size,
         * so the downloads will be limited by the actual frame sizes.
         */
        pdev->download_len = 5000;
        if (ol_cfg_tx_free_at_download(pdev->ctrl_pdev)) {
            pdev->tx_send_complete_part2 = ol_tx_download_done_hl_free;
        } else {
            pdev->tx_send_complete_part2 = ol_tx_download_done_hl_retain;
        }

        /*
         * For LL, the FW rx desc directly referenced at its location
         * inside the rx indication message.
         */
/*
 * CHECK THIS LATER: does the HL HTT version of htt_rx_mpdu_desc_list_next
 * (which is not currently implemented) present the adf_nbuf_data(rx_ind_msg)
 * as the abstract rx descriptor?
 * If not, the rx_fw_desc_offset initialization here will have to be
 * adjusted accordingly.
 * NOTE: for HL, because fw rx desc is in ind msg, not in rx desc, so the
 * offset should be negtive value
 */
        pdev->rx_fw_desc_offset =
            HTT_ENDIAN_BYTE_IDX_SWAP(
                    HTT_RX_IND_FW_RX_DESC_BYTE_OFFSET
                    - HTT_RX_IND_HL_BYTES);

        htt_h2t_rx_ring_cfg_msg = htt_h2t_rx_ring_cfg_msg_hl;
    } else {
        /*
         * LL - download just the initial portion of the frame.
         * Download enough to cover the encapsulation headers checked
         * by the target's tx classification descriptor engine.
         */
        /* Get the packet download length */
        pdev->download_len = htt_pkt_dl_len_get(pdev);

        /*
         * Account for the HTT tx descriptor, including the
         * HTC header + alignment padding.
         */
        pdev->download_len += sizeof(struct htt_host_tx_desc_t);

        pdev->tx_send_complete_part2 = ol_tx_download_done_ll;

        /*
         * For LL, the FW rx desc is alongside the HW rx desc fields in
         * the htt_host_rx_desc_base struct/.
         */
        pdev->rx_fw_desc_offset = RX_STD_DESC_FW_MSDU_OFFSET;

        htt_h2t_rx_ring_cfg_msg = htt_h2t_rx_ring_cfg_msg_ll;
    }

    return pdev;

fail3:
    htt_tx_detach(pdev);

fail2:
    adf_os_mem_free(pdev);

fail1:
    return NULL;
}
/* Target to host Msg/event  handler  for low priority messages*/
void
htt_t2h_lp_msg_handler(void *context, adf_nbuf_t htt_t2h_msg )
{
    struct htt_pdev_t *pdev = (struct htt_pdev_t *) context;
    u_int32_t *msg_word;
    enum htt_t2h_msg_type msg_type;

    msg_word = (u_int32_t *) adf_nbuf_data(htt_t2h_msg);
    msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
    switch (msg_type) {
    case HTT_T2H_MSG_TYPE_VERSION_CONF:
        {
            htc_pm_runtime_put(pdev->htc_pdev);
            pdev->tgt_ver.major = HTT_VER_CONF_MAJOR_GET(*msg_word);
            pdev->tgt_ver.minor = HTT_VER_CONF_MINOR_GET(*msg_word);
            adf_os_print("target uses HTT version %d.%d; host uses %d.%d\n",
                pdev->tgt_ver.major, pdev->tgt_ver.minor,
                HTT_CURRENT_VERSION_MAJOR, HTT_CURRENT_VERSION_MINOR);
            if (pdev->tgt_ver.major != HTT_CURRENT_VERSION_MAJOR) {
                adf_os_print("*** Incompatible host/target HTT versions!\n");
            }
            /* abort if the target is incompatible with the host */
            adf_os_assert(pdev->tgt_ver.major == HTT_CURRENT_VERSION_MAJOR);
            if (pdev->tgt_ver.minor != HTT_CURRENT_VERSION_MINOR) {
                adf_os_print(
                    "*** Warning: host/target HTT versions are different, "
                    "though compatible!\n");
            }
            break;
        }
    case HTT_T2H_MSG_TYPE_RX_FLUSH:
        {
            u_int16_t peer_id;
            u_int8_t tid;
            int seq_num_start, seq_num_end;
            enum htt_rx_flush_action action;

            peer_id = HTT_RX_FLUSH_PEER_ID_GET(*msg_word);
            tid = HTT_RX_FLUSH_TID_GET(*msg_word);
            seq_num_start = HTT_RX_FLUSH_SEQ_NUM_START_GET(*(msg_word+1));
            seq_num_end = HTT_RX_FLUSH_SEQ_NUM_END_GET(*(msg_word+1));
            action =
                HTT_RX_FLUSH_MPDU_STATUS_GET(*(msg_word+1)) == 1 ?
                htt_rx_flush_release : htt_rx_flush_discard;
            ol_rx_flush_handler(
                pdev->txrx_pdev,
                peer_id, tid,
                seq_num_start,
                seq_num_end,
                action);
            break;
        }
    case  HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND:
        {
            int msdu_cnt;
            msdu_cnt = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_CNT_GET(*msg_word);
            ol_rx_offload_deliver_ind_handler(
                pdev->txrx_pdev,
                htt_t2h_msg,
                msdu_cnt);
            break;
        }
    case  HTT_T2H_MSG_TYPE_RX_FRAG_IND:
        {
            u_int16_t peer_id;
            u_int8_t tid;

            peer_id = HTT_RX_FRAG_IND_PEER_ID_GET(*msg_word);
            tid = HTT_RX_FRAG_IND_EXT_TID_GET(*msg_word);
            HTT_RX_FRAG_SET_LAST_MSDU(pdev, htt_t2h_msg);

            ol_rx_frag_indication_handler(
                pdev->txrx_pdev,
                htt_t2h_msg,
                peer_id,
                tid);
            break;
        }
    case HTT_T2H_MSG_TYPE_RX_ADDBA:
        {
            u_int16_t peer_id;
            u_int8_t tid;
            u_int8_t win_sz;
            u_int16_t start_seq_num;

            /*
             * FOR NOW, the host doesn't need to know the initial
             * sequence number for rx aggregation.
             * Thus, any value will do - specify 0.
             */
            start_seq_num = 0;
            peer_id = HTT_RX_ADDBA_PEER_ID_GET(*msg_word);
            tid = HTT_RX_ADDBA_TID_GET(*msg_word);
            win_sz = HTT_RX_ADDBA_WIN_SIZE_GET(*msg_word);
            ol_rx_addba_handler(
                pdev->txrx_pdev, peer_id, tid, win_sz, start_seq_num,
                0 /* success */);
            break;
        }
    case HTT_T2H_MSG_TYPE_RX_DELBA:
        {
            u_int16_t peer_id;
            u_int8_t tid;

            peer_id = HTT_RX_DELBA_PEER_ID_GET(*msg_word);
            tid = HTT_RX_DELBA_TID_GET(*msg_word);
            ol_rx_delba_handler(pdev->txrx_pdev, peer_id, tid);
            break;
        }
    case HTT_T2H_MSG_TYPE_PEER_MAP:
        {
            u_int8_t mac_addr_deswizzle_buf[HTT_MAC_ADDR_LEN];
            u_int8_t *peer_mac_addr;
            u_int16_t peer_id;
            u_int8_t vdev_id;

            peer_id = HTT_RX_PEER_MAP_PEER_ID_GET(*msg_word);
            vdev_id = HTT_RX_PEER_MAP_VDEV_ID_GET(*msg_word);
            peer_mac_addr = htt_t2h_mac_addr_deswizzle(
                (u_int8_t *) (msg_word+1), &mac_addr_deswizzle_buf[0]);

            ol_rx_peer_map_handler(
                pdev->txrx_pdev, peer_id, vdev_id, peer_mac_addr, 1/*can tx*/);
            break;
        }
    case HTT_T2H_MSG_TYPE_PEER_UNMAP:
        {
            u_int16_t peer_id;
            peer_id = HTT_RX_PEER_UNMAP_PEER_ID_GET(*msg_word);

            ol_rx_peer_unmap_handler(pdev->txrx_pdev, peer_id);
            break;
        }
    case HTT_T2H_MSG_TYPE_SEC_IND:
        {
            u_int16_t peer_id;
            enum htt_sec_type sec_type;
            int is_unicast;

            peer_id = HTT_SEC_IND_PEER_ID_GET(*msg_word);
            sec_type = HTT_SEC_IND_SEC_TYPE_GET(*msg_word);
            is_unicast = HTT_SEC_IND_UNICAST_GET(*msg_word);
            msg_word++; /* point to the first part of the Michael key */
            ol_rx_sec_ind_handler(
                pdev->txrx_pdev, peer_id, sec_type, is_unicast, msg_word, msg_word+2);
            break;
        }
    case HTT_T2H_MSG_TYPE_MGMT_TX_COMPL_IND:
        {
            struct htt_mgmt_tx_compl_ind *compl_msg;

            compl_msg = (struct htt_mgmt_tx_compl_ind *)(msg_word + 1);
            if (pdev->cfg.is_high_latency) {
                ol_tx_target_credit_update(pdev->txrx_pdev, 1);
            }
            ol_tx_single_completion_handler(
                pdev->txrx_pdev, compl_msg->status, compl_msg->desc_id);
            htc_pm_runtime_put(pdev->htc_pdev);
            HTT_TX_SCHED(pdev);
            break;
        }
#if TXRX_STATS_LEVEL != TXRX_STATS_LEVEL_OFF
    case HTT_T2H_MSG_TYPE_STATS_CONF:
        {
            u_int64_t cookie;
            u_int8_t *stats_info_list;

            cookie = *(msg_word + 1);
            cookie |= ((u_int64_t) (*(msg_word + 2))) << 32;

            stats_info_list = (u_int8_t *) (msg_word + 3);
            htc_pm_runtime_put(pdev->htc_pdev);
            ol_txrx_fw_stats_handler(pdev->txrx_pdev, cookie, stats_info_list);
            break;
        }
#endif
#ifndef REMOVE_PKT_LOG
    case HTT_T2H_MSG_TYPE_PKTLOG:
        {
            u_int32_t *pl_hdr;
            u_int32_t log_type;
            pl_hdr = (msg_word + 1);
            log_type = (*(pl_hdr + 1) & ATH_PKTLOG_HDR_LOG_TYPE_MASK) >>
                                            ATH_PKTLOG_HDR_LOG_TYPE_SHIFT;
            if (log_type == PKTLOG_TYPE_TX_CTRL ||
               (log_type) == PKTLOG_TYPE_TX_STAT ||
               (log_type) == PKTLOG_TYPE_TX_MSDU_ID ||
               (log_type) == PKTLOG_TYPE_TX_FRM_HDR ||
               (log_type) == PKTLOG_TYPE_TX_VIRT_ADDR) {
                wdi_event_handler(WDI_EVENT_TX_STATUS, pdev->txrx_pdev, pl_hdr);
            } else if ((log_type) == PKTLOG_TYPE_RC_FIND) {
                wdi_event_handler(WDI_EVENT_RATE_FIND, pdev->txrx_pdev, pl_hdr);
            } else if ((log_type) == PKTLOG_TYPE_RC_UPDATE) {
                wdi_event_handler(
                    WDI_EVENT_RATE_UPDATE, pdev->txrx_pdev, pl_hdr);
            } else if ((log_type) == PKTLOG_TYPE_RX_STAT) {
                wdi_event_handler(WDI_EVENT_RX_DESC, pdev->txrx_pdev, pl_hdr);
            }
            break;
        }
#endif
    case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND:
    {
        u_int32_t htt_credit_delta_abs;
        int32_t htt_credit_delta;
        int sign;

        htt_credit_delta_abs = HTT_TX_CREDIT_DELTA_ABS_GET(*msg_word);
        sign = HTT_TX_CREDIT_SIGN_BIT_GET(*msg_word) ? -1 : 1;
        htt_credit_delta = sign * htt_credit_delta_abs;
        ol_tx_credit_completion_handler(pdev->txrx_pdev, htt_credit_delta);
        break;
    }

#ifdef IPA_UC_OFFLOAD
    case HTT_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE:
        {
            u_int8_t op_code;
            u_int16_t len;
            u_int8_t *op_msg_buffer;
            u_int8_t *msg_start_ptr;

            htc_pm_runtime_put(pdev->htc_pdev);
            msg_start_ptr = (u_int8_t *)msg_word;
            op_code = HTT_WDI_IPA_OP_RESPONSE_OP_CODE_GET(*msg_word);
            msg_word++;
            len = HTT_WDI_IPA_OP_RESPONSE_RSP_LEN_GET(*msg_word);

            op_msg_buffer = adf_os_mem_alloc(NULL,
                sizeof(struct htt_wdi_ipa_op_response_t) + len);
            if (!op_msg_buffer) {
                adf_os_print("OPCODE messsage buffer alloc fail");
                break;
            }
            adf_os_mem_copy(op_msg_buffer,
                    msg_start_ptr,
                    sizeof(struct htt_wdi_ipa_op_response_t) + len);
            ol_txrx_ipa_uc_op_response(pdev->txrx_pdev, op_msg_buffer);
            break;
        }
#endif /* IPA_UC_OFFLOAD */

    default:
        break;
    };
    /* Free the indication buffer */
    adf_nbuf_free(htt_t2h_msg);
}
/* WMI command API */
int wmi_unified_cmd_send(wmi_unified_t wmi_handle, wmi_buf_t buf, int len,
			 WMI_CMD_ID cmd_id)
{
	HTC_PACKET *pkt;
	A_STATUS status;
	void *vos_context;
	struct ol_softc *scn;
	A_UINT16 htc_tag = 0;

	if (wmi_get_runtime_pm_inprogress(wmi_handle))
		goto skip_suspend_check;

	if (adf_os_atomic_read(&wmi_handle->is_target_suspended) &&
			( (WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID != cmd_id) &&
			  (WMI_PDEV_RESUME_CMDID != cmd_id)) ) {
		pr_err("%s: Target is suspended  could not send WMI command: %d\n",
				__func__, cmd_id);
		VOS_ASSERT(0);
		return -EBUSY;
	} else
		goto dont_tag;

skip_suspend_check:
	switch(cmd_id) {
	case WMI_WOW_ENABLE_CMDID:
	case WMI_PDEV_SUSPEND_CMDID:
	case WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID:
	case WMI_WOW_ADD_WAKE_PATTERN_CMDID:
	case WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID:
	case WMI_PDEV_RESUME_CMDID:
	case WMI_WOW_DEL_WAKE_PATTERN_CMDID:
#ifdef FEATURE_WLAN_D0WOW
	case WMI_D0_WOW_ENABLE_DISABLE_CMDID:
#endif
		htc_tag = HTC_TX_PACKET_TAG_AUTO_PM;
	default:
		break;
	}

dont_tag:
	/* Do sanity check on the TLV parameter structure */
	{
		void *buf_ptr = (void *) adf_nbuf_data(buf);

		if (wmitlv_check_command_tlv_params(NULL, buf_ptr, len, cmd_id) != 0)
		{
			adf_os_print("\nERROR: %s: Invalid WMI Parameter Buffer for Cmd:%d\n",
				     __func__, cmd_id);
			return -1;
		}
	}

	if (adf_nbuf_push_head(buf, sizeof(WMI_CMD_HDR)) == NULL) {
		pr_err("%s, Failed to send cmd %x, no memory\n",
		       __func__, cmd_id);
		return -ENOMEM;
	}

	WMI_SET_FIELD(adf_nbuf_data(buf), WMI_CMD_HDR, COMMANDID, cmd_id);

	adf_os_atomic_inc(&wmi_handle->pending_cmds);
	if (adf_os_atomic_read(&wmi_handle->pending_cmds) >= WMI_MAX_CMDS) {
		vos_context = vos_get_global_context(VOS_MODULE_ID_WDA, NULL);
		scn = vos_get_context(VOS_MODULE_ID_HIF, vos_context);
		pr_err("\n%s: hostcredits = %d\n", __func__,
		       wmi_get_host_credits(wmi_handle));
		HTC_dump_counter_info(wmi_handle->htc_handle);
		//dump_CE_register(scn);
		//dump_CE_debug_register(scn->hif_sc);
		adf_os_atomic_dec(&wmi_handle->pending_cmds);
		pr_err("%s: MAX 1024 WMI Pending cmds reached.\n", __func__);
		vos_set_logp_in_progress(VOS_MODULE_ID_VOSS, TRUE);
		schedule_work(&recovery_work);
		return -EBUSY;
	}

	pkt = adf_os_mem_alloc(NULL, sizeof(*pkt));
	if (!pkt) {
		adf_os_atomic_dec(&wmi_handle->pending_cmds);
		pr_err("%s, Failed to alloc htc packet %x, no memory\n",
		       __func__, cmd_id);
		return -ENOMEM;
	}

	SET_HTC_PACKET_INFO_TX(pkt,
			NULL,
			adf_nbuf_data(buf),
			len + sizeof(WMI_CMD_HDR),
			/* htt_host_data_dl_len(buf)+20 */
			wmi_handle->wmi_endpoint_id,
			htc_tag);

	SET_HTC_PACKET_NET_BUF_CONTEXT(pkt, buf);

	WMA_LOGD("Send WMI command:%s command_id:%d",
			get_wmi_cmd_string(cmd_id), cmd_id);

#ifdef WMI_INTERFACE_EVENT_LOGGING
	adf_os_spin_lock_bh(&wmi_handle->wmi_record_lock);
        /*Record 16 bytes of WMI cmd data - exclude TLV and WMI headers*/
        WMI_COMMAND_RECORD(cmd_id ,((u_int32_t *)adf_nbuf_data(buf) + 2));
	adf_os_spin_unlock_bh(&wmi_handle->wmi_record_lock);
#endif

	status = HTCSendPkt(wmi_handle->htc_handle, pkt);

	if (A_OK != status) {
		adf_os_atomic_dec(&wmi_handle->pending_cmds);
		pr_err("%s %d, HTCSendPkt failed\n", __func__, __LINE__);
	}


	return ((status == A_OK) ? EOK : -1);
}
Beispiel #26
0
int htt_tx_ipa_uc_attach(struct htt_pdev_t *pdev,
    unsigned int uc_tx_buf_sz,
    unsigned int uc_tx_buf_cnt,
    unsigned int uc_tx_partition_base)
{
   unsigned int  tx_buffer_count;
   unsigned int  tx_buffer_count_pwr2;
   adf_nbuf_t    buffer_vaddr;
   u_int32_t     buffer_paddr;
   u_int32_t    *header_ptr;
   u_int32_t    *ring_vaddr;
   int           return_code = 0;
   uint16_t     idx;

   /* Allocate CE Write Index WORD */
   pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr =
       adf_os_mem_alloc_consistent(pdev->osdev,
                4,
                &pdev->ipa_uc_tx_rsc.tx_ce_idx.paddr,
                adf_os_get_dma_mem_context(
                   (&pdev->ipa_uc_tx_rsc.tx_ce_idx), memctx));
   if (!pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr) {
      adf_os_print("%s: CE Write Index WORD alloc fail", __func__);
      return -1;
   }

   /* Allocate TX COMP Ring */
   pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr =
       adf_os_mem_alloc_consistent(pdev->osdev,
                uc_tx_buf_cnt * 4,
                &pdev->ipa_uc_tx_rsc.tx_comp_base.paddr,
                adf_os_get_dma_mem_context(
                   (&pdev->ipa_uc_tx_rsc.tx_comp_base), memctx));
   if (!pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr) {
      adf_os_print("%s: TX COMP ring alloc fail", __func__);
      return_code = -2;
      goto free_tx_ce_idx;
   }

   adf_os_mem_zero(pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr, uc_tx_buf_cnt * 4);

   /* Allocate TX BUF vAddress Storage */
   pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg =
         (adf_nbuf_t *)adf_os_mem_alloc(pdev->osdev,
                          uc_tx_buf_cnt * sizeof(adf_nbuf_t));
   if (!pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg) {
      adf_os_print("%s: TX BUF POOL vaddr storage alloc fail",
                   __func__);
      return_code = -3;
      goto free_tx_comp_base;
   }
   adf_os_mem_zero(pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg,
                   uc_tx_buf_cnt * sizeof(adf_nbuf_t));

   ring_vaddr = pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr;
   /* Allocate TX buffers as many as possible */
   for (tx_buffer_count = 0;
        tx_buffer_count < (uc_tx_buf_cnt - 1);
        tx_buffer_count++) {
      buffer_vaddr = adf_nbuf_alloc(pdev->osdev,
                uc_tx_buf_sz, 0, 4, FALSE);
      if (!buffer_vaddr)
      {
         adf_os_print("%s: TX BUF alloc fail, allocated buffer count %d",
                      __func__, tx_buffer_count);
         break;
      }

      /* Init buffer */
      adf_os_mem_zero(adf_nbuf_data(buffer_vaddr), uc_tx_buf_sz);
      header_ptr = (u_int32_t *)adf_nbuf_data(buffer_vaddr);

      *header_ptr = HTT_IPA_UC_OFFLOAD_TX_HEADER_DEFAULT;
      header_ptr++;
      *header_ptr |= ((u_int16_t)uc_tx_partition_base + tx_buffer_count) << 16;

      adf_nbuf_map(pdev->osdev, buffer_vaddr, ADF_OS_DMA_BIDIRECTIONAL);
      buffer_paddr = adf_nbuf_get_frag_paddr_lo(buffer_vaddr, 0);
      header_ptr++;
      *header_ptr = (u_int32_t)(buffer_paddr + 16);

      header_ptr++;
      *header_ptr = 0xFFFFFFFF;

      /* FRAG Header */
      header_ptr++;
      *header_ptr = buffer_paddr + 32;

      *ring_vaddr = buffer_paddr;
      pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[tx_buffer_count] =
            buffer_vaddr;
      /* Memory barrier to ensure actual value updated */

      ring_vaddr++;
   }

   /*
    * Tx complete ring buffer count should be power of 2.
    * So, allocated Tx buffer count should be one less than ring buffer size.
    */
   tx_buffer_count_pwr2 = vos_rounddown_pow_of_two(tx_buffer_count + 1) - 1;
   if (tx_buffer_count > tx_buffer_count_pwr2) {
       adf_os_print("%s: Allocated Tx buffer count %d is rounded down to %d",
                   __func__, tx_buffer_count, tx_buffer_count_pwr2);

       /* Free over allocated buffers below power of 2 */
       for(idx = tx_buffer_count_pwr2; idx < tx_buffer_count; idx++) {
           if (pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[idx]) {
               adf_nbuf_unmap(pdev->osdev,
                   pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[idx],
                   ADF_OS_DMA_FROM_DEVICE);
               adf_nbuf_free(pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[idx]);
           }
       }
   }

   if (tx_buffer_count_pwr2 < 0) {
       adf_os_print("%s: Failed to round down Tx buffer count %d",
                   __func__, tx_buffer_count_pwr2);
       goto free_tx_comp_base;
   }

   pdev->ipa_uc_tx_rsc.alloc_tx_buf_cnt = tx_buffer_count_pwr2;

   return 0;

free_tx_comp_base:
   adf_os_mem_free_consistent(pdev->osdev,
                   ol_cfg_ipa_uc_tx_max_buf_cnt(pdev->ctrl_pdev) * 4,
                   pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr,
                   pdev->ipa_uc_tx_rsc.tx_comp_base.paddr,
                   adf_os_get_dma_mem_context(
                      (&pdev->ipa_uc_tx_rsc.tx_comp_base), memctx));
free_tx_ce_idx:
   adf_os_mem_free_consistent(pdev->osdev,
                   4,
                   pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr,
                   pdev->ipa_uc_tx_rsc.tx_ce_idx.paddr,
                   adf_os_get_dma_mem_context(
                      (&pdev->ipa_uc_tx_rsc.tx_ce_idx), memctx));
   return return_code;
}
/**
 * adf_os_mem_multi_pages_alloc() - allocate large size of kernel memory
 * @osdev:          OS device handle pointer
 * @pages:          Multi page information storage
 * @element_size:   Each element size
 * @element_num:    Total number of elements should be allocated
 * @memctxt:        Memory context
 * @cacheable:      Coherent memory or cacheable memory
 *
 * This function will allocate large size of memory over multiple pages.
 * Large size of contiguous memory allocation will fail frequentely, so
 * instead of allocate large memory by one shot, allocate through multiple, non
 * contiguous memory and combine pages when actual usage
 *
 * Return: None
 */
void adf_os_mem_multi_pages_alloc(adf_os_device_t osdev,
			struct adf_os_mem_multi_page_t *pages,
			size_t element_size,
			uint16_t element_num,
			adf_os_dma_context_t memctxt,
			bool cacheable)
{
	uint16_t page_idx;
	struct adf_os_mem_dma_page_t *dma_pages;
	void **cacheable_pages = NULL;
	uint16_t i;

	pages->num_element_per_page = PAGE_SIZE / element_size;
	if (!pages->num_element_per_page) {
		adf_os_print("Invalid page %d or element size %d",
			(int)PAGE_SIZE, (int)element_size);
		goto out_fail;
	}

	pages->num_pages = element_num / pages->num_element_per_page;
	if (element_num % pages->num_element_per_page)
		pages->num_pages++;

	if (cacheable) {
		/* Pages information storage */
		pages->cacheable_pages = adf_os_mem_alloc(osdev,
			pages->num_pages * sizeof(pages->cacheable_pages));
		if (!pages->cacheable_pages) {
			adf_os_print("Cacheable page storage alloc fail");
			goto out_fail;
		}

		cacheable_pages = pages->cacheable_pages;
		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
			cacheable_pages[page_idx] = adf_os_mem_alloc(
				osdev, PAGE_SIZE);
			if (!cacheable_pages[page_idx]) {
				adf_os_print("cacheable page alloc fail, pi %d",
					page_idx);
				goto page_alloc_fail;
			}
		}
		pages->dma_pages = NULL;
	} else {
		pages->dma_pages = adf_os_mem_alloc(osdev,
		       pages->num_pages * sizeof(struct adf_os_mem_dma_page_t));
		if (!pages->dma_pages) {
			adf_os_print("dmaable page storage alloc fail");
			goto out_fail;
		}

		dma_pages = pages->dma_pages;
		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
			dma_pages->page_v_addr_start =
				adf_os_mem_alloc_consistent(osdev, PAGE_SIZE,
				&dma_pages->page_p_addr, memctxt);
			if (!dma_pages->page_v_addr_start) {
				adf_os_print("dmaable page alloc fail pi %d",
					page_idx);
				goto page_alloc_fail;
			}
			dma_pages->page_v_addr_end =
				dma_pages->page_v_addr_start + PAGE_SIZE;
			dma_pages++;
		}
		pages->cacheable_pages = NULL;
	}
	return;

page_alloc_fail:
	if (cacheable) {
		for (i = 0; i < page_idx; i++)
			adf_os_mem_free(pages->cacheable_pages[i]);
		adf_os_mem_free(pages->cacheable_pages);
	} else {
		dma_pages = pages->dma_pages;
		for (i = 0; i < page_idx; i++) {
			adf_os_mem_free_consistent(osdev, PAGE_SIZE,
				dma_pages->page_v_addr_start,
				dma_pages->page_p_addr, memctxt);
			dma_pages++;
		}
		adf_os_mem_free(pages->dma_pages);
	}

out_fail:
	pages->cacheable_pages = NULL;
	pages->dma_pages = NULL;
	pages->num_pages = 0;
	return;
}
Beispiel #28
0
LOCAL htc_handle_t _HTC_Init(HTC_SETUP_COMPLETE_CB SetupComplete,
                             HTC_CONFIG *pConfig)
{
	HIF_CALLBACK hifCBConfig;
	HTC_CONTEXT *pHTC;    
    
        pHTC = (HTC_CONTEXT *)adf_os_mem_alloc(sizeof(HTC_CONTEXT));
    
	adf_os_mem_zero(pHTC, sizeof(HTC_CONTEXT));

	pHTC->OSHandle = pConfig->OSHandle;
	pHTC->PoolHandle = pConfig->PoolHandle;
	pHTC->hifHandle = pConfig->HIFHandle;
                        
	hifCBConfig.send_buf_done = A_INDIR(htc._HTC_SendDoneHandler);
	hifCBConfig.recv_buf = A_INDIR(htc._HTC_MsgRecvHandler);
	hifCBConfig.context = pHTC;
    
	/* initialize hardware layer */
	HIF_register_callback(pConfig->HIFHandle, &hifCBConfig);
                             
        /* see if the host wants us to override the number of ctrl buffers */
	pHTC->NumBuffersForCreditRpts = 0;
    
	if (0 == pHTC->NumBuffersForCreditRpts) {
		/* nothing to override, simply set default */
		pHTC->NumBuffersForCreditRpts = HTC_DEFAULT_NUM_CTRL_BUFFERS; 
	}    
    
	pHTC->MaxEpPendingCreditRpts = 0;
    
	if (0 == pHTC->MaxEpPendingCreditRpts) {
		pHTC->MaxEpPendingCreditRpts = HTC_DEFAULT_MAX_EP_PENDING_CREDIT_REPORTS;    
	}
	/* calculate the total allocation size based on the number of credit report buffers */
	pHTC->CtrlBufferAllocSize = MIN_CREDIT_BUFFER_ALLOC_SIZE * pHTC->NumBuffersForCreditRpts;
	/* we need at least enough buffer space for 1 ctrl message */
	pHTC->CtrlBufferAllocSize = A_MAX(pHTC->CtrlBufferAllocSize,MAX_HTC_SETUP_MSG_SIZE);
    
	/* save the size of each buffer/credit we will receive */
	pHTC->RecvBufferSize = pConfig->CreditSize; //RecvBufferSize;
	pHTC->TotalCredits = pConfig->CreditNumber;
	pHTC->TotalCreditsAssigned = 0;
     
	/* setup the pseudo service that handles HTC control messages */
	pHTC->HTCControlService.ProcessRecvMsg = A_INDIR(htc._HTC_ControlSvcProcessMsg);
	pHTC->HTCControlService.ProcessSendBufferComplete = A_INDIR(htc._HTC_ControlSvcProcessSendComplete);
	pHTC->HTCControlService.TrailerSpcCheckLimit = HTC_CTRL_BUFFER_CHECK_SIZE;
	pHTC->HTCControlService.MaxSvcMsgSize = MAX_HTC_SETUP_MSG_SIZE;
	pHTC->HTCControlService.ServiceCtx = pHTC;
    
	/* automatically register this pseudo service to endpoint 1 */
	pHTC->Endpoints[ENDPOINT0].pService = &pHTC->HTCControlService;
	HIF_get_default_pipe(pHTC->hifHandle, &pHTC->Endpoints[ENDPOINT0].UpLinkPipeID, 
			     &pHTC->Endpoints[ENDPOINT0].DownLinkPipeID);
    
	/* Initialize control pipe so we could receive the HTC control packets */
	// @TODO: msg size!
	HIF_config_pipe(pHTC->hifHandle, pHTC->Endpoints[ENDPOINT0].UpLinkPipeID, 1);    
    
	/* set the first free endpoint */
	pHTC->CurrentEpIndex = ENDPOINT1;
	pHTC->SetupCompleteCb = SetupComplete;
    
        /* setup buffers for just the setup phase, we only need 1 buffer to handle
	 * setup */
	HTC_AssembleBuffers(pHTC, 4, MAX_HTC_SETUP_MSG_SIZE);
   
	/* start hardware layer so that we can queue buffers */
	HIF_start(pHTC->hifHandle);
    
	return pHTC;
}
Beispiel #29
0
A_STATUS HIFDevSendBuffer(HIF_SDIO_DEVICE *pDev, unsigned int transferID, a_uint8_t pipe,
        unsigned int nbytes, adf_nbuf_t buf)
{
    A_STATUS status;
    A_UINT32 paddedLength;
    int frag_count = 0, i, head_data_len;
    struct HIFSendContext *pSendContext;
    unsigned char *pData;
    A_UINT32 request = HIF_WR_ASYNC_BLOCK_INC;
    A_UINT8 mboxIndex = HIFDevMapPipeToMailBox(pDev, pipe);

    paddedLength = DEV_CALC_SEND_PADDED_LEN(pDev, nbytes);
#ifdef ENABLE_MBOX_DUMMY_SPACE_FEATURE
    A_ASSERT(paddedLength - nbytes < HIF_DUMMY_SPACE_MASK + 1);
    /*
     * two most significant bytes to save dummy data count
     * data written into the dummy space will not put into the final mbox FIFO
     *
     */
    request |= ((paddedLength - nbytes) << 16);
#endif

    frag_count = adf_nbuf_get_num_frags(buf);

    if (frag_count > 1){
        /* header data length should be total sending length substract internal data length of netbuf */
        /*
         * | HIFSendContext | fragments except internal buffer | netbuf->data
         */
        head_data_len = sizeof(struct HIFSendContext) +
                (nbytes - adf_nbuf_get_frag_len(buf, frag_count - 1));
    } else {
        /*
         * | HIFSendContext | netbuf->data
         */
        head_data_len = sizeof(struct HIFSendContext);
    }

    /* Check whether head room is enough to save extra head data */
    if ((head_data_len <= adf_nbuf_headroom(buf)) &&
                (adf_nbuf_tailroom(buf) >= (paddedLength - nbytes))){
        pSendContext = (struct HIFSendContext*)adf_nbuf_push_head(buf, head_data_len);
        pSendContext->bNewAlloc = FALSE;
    } else {
        pSendContext = (struct HIFSendContext*)adf_os_mem_alloc(NULL,
                sizeof(struct HIFSendContext) + paddedLength);
        pSendContext->bNewAlloc = TRUE;
    }

    pSendContext->netbuf = buf;
    pSendContext->pDev = pDev;
    pSendContext->transferID = transferID;
    pSendContext->head_data_len = head_data_len;
    /*
     * Copy data to head part of netbuf or head of allocated buffer.
     * if buffer is new allocated, the last buffer should be copied also.
     * It assume last fragment is internal buffer of netbuf
     * sometime total length of fragments larger than nbytes
     */
    pData = (unsigned char *)pSendContext + sizeof(struct HIFSendContext);
    for (i = 0; i < (pSendContext->bNewAlloc ? frag_count : frag_count - 1); i ++){
        int frag_len = adf_nbuf_get_frag_len(buf, i);
        unsigned char *frag_addr = adf_nbuf_get_frag_vaddr(buf, i);
        if (frag_len > nbytes){
            frag_len = nbytes;
        }
        memcpy(pData, frag_addr, frag_len);
        pData += frag_len;
        nbytes -= frag_len;
        if (nbytes <= 0) {
            break;
        }
    }

    /* Reset pData pointer and send out */
    pData = (unsigned char *)pSendContext + sizeof(struct HIFSendContext);
    status = HIFReadWrite(pDev->HIFDevice,
            pDev->MailBoxInfo.MboxProp[mboxIndex].ExtendedAddress,
            (char*) pData,
            paddedLength,
            request,
            (void*)pSendContext);

    if (status == A_PENDING){
        /*
         * it will return A_PENDING in native HIF implementation,
         * which should be treated as successful result here.
         */
        status = A_OK;
    }
    /* release buffer or move back data pointer when failed */
    if (status != A_OK){
        if (pSendContext->bNewAlloc){
            adf_os_mem_free(pSendContext);
        } else {
            adf_nbuf_pull_head(buf, head_data_len);
        }
    }

    return status;
}
Beispiel #30
0
static A_STATUS usb_hif_alloc_pipe_resources(HIF_USB_PIPE *pipe, int urb_cnt)
{
	A_STATUS status = A_OK;
	int i;
	HIF_URB_CONTEXT *urb_context;

	DL_LIST_INIT(&pipe->urb_list_head);
	DL_LIST_INIT(&pipe->urb_pending_list);

	for (i = 0; i < urb_cnt; i++) {
		urb_context = adf_os_mem_alloc(NULL, sizeof(*urb_context));
		if (NULL == urb_context) {
			status = A_NO_MEMORY;
			AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
					("urb_context is null\n"));
			break;
		}
		adf_os_mem_zero(urb_context, sizeof(HIF_URB_CONTEXT));
		urb_context->pipe = pipe;
		urb_context->urb = usb_alloc_urb(0, GFP_KERNEL);

		if (NULL == urb_context->urb) {
			status = A_NO_MEMORY;
			adf_os_mem_free(urb_context);
			AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
					("urb_context->urb is null\n"));
			break;
		}

		/* note we are only allocate the urb contexts here, the actual
		 * URB is
		 * allocated from the kernel as needed to do a transaction
		 */
		pipe->urb_alloc++;

		if (htc_bundle_send) {
			/* In tx bundle mode, only pre-allocate bundle buffers
			 * for data
			 * pipes
			 */
			if (pipe->logical_pipe_num >= HIF_TX_DATA_LP_PIPE &&
			    pipe->logical_pipe_num <= HIF_TX_DATA_HP_PIPE) {
				urb_context->buf = adf_nbuf_alloc(NULL,
						  HIF_USB_TX_BUNDLE_BUFFER_SIZE,
						  0, 4, FALSE);
				if (NULL == urb_context->buf) {
					status = A_NO_MEMORY;
					usb_free_urb(urb_context->urb);
					urb_context->urb = NULL;
					adf_os_mem_free(urb_context);
					AR_DEBUG_PRINTF(ATH_DEBUG_ERR, (
					 "athusb: alloc send bundle buffer %d-byte failed\n",
					 HIF_USB_TX_BUNDLE_BUFFER_SIZE));
					break;
				}
			}
			skb_queue_head_init(&urb_context->comp_queue);
		}

		usb_hif_free_urb_to_pipe(pipe, urb_context);
	}

	AR_DEBUG_PRINTF(USB_HIF_DEBUG_ENUM, (
			 "athusb: alloc resources lpipe:%d hpipe:0x%X urbs:%d\n",
			 pipe->logical_pipe_num,
			 pipe->usb_pipe_handle,
			 pipe->urb_alloc));
	return status;
}