/* registered target arrival callback from the HIF layer */ HTC_HANDLE HTCCreate(void *hHIF, HTC_INIT_INFO *pInfo) { A_STATUS status = A_OK; HTC_TARGET *target = NULL; do { if ((target = (HTC_TARGET *)A_MALLOC(sizeof(HTC_TARGET))) == NULL) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("HTC : Unable to allocate memory\n")); status = A_ERROR; break; } A_MEMCPY(&target->HTCInitInfo,pInfo,sizeof(HTC_INIT_INFO)); adf_os_mem_zero(target, sizeof(HTC_TARGET)); adf_os_spinlock_init(&target->HTCLock); adf_os_spinlock_init(&target->HTCRxLock); adf_os_spinlock_init(&target->HTCTxLock); /* setup HIF layer callbacks */ adf_os_mem_zero(&htcCallbacks, sizeof(completion_callbacks_t)); htcCallbacks.Context = target; htcCallbacks.rxCompletionHandler = HTCRxCompletionHandler; htcCallbacks.txCompletionHandler = HTCTxCompletionHandler; HIFPostInit(hHIF, target, &htcCallbacks); target->hif_dev = hHIF; adf_os_init_mutex(&target->htc_rdy_mutex); adf_os_mutex_acquire(&target->htc_rdy_mutex); /* Get HIF default pipe for HTC message exchange */ pEndpoint = &target->EndPoint[ENDPOINT0]; HIFGetDefaultPipe(hHIF, &pEndpoint->UL_PipeID, &pEndpoint->DL_PipeID); adf_os_print("[Default Pipe]UL: %x, DL: %x\n", pEndpoint->UL_PipeID, pEndpoint->DL_PipeID); /* TODO : other init */ } while (FALSE); target->host_handle = htcInfo.pContext; /* TODO INTEGRATION supply from host os handle for any os specific calls*/ target->os_handle = NULL; if (A_FAILED(status)) { HTCCleanup(target); target = NULL; } return (HTC_HANDLE)target; }
/* * Allocate/free memory. */ void * __ahdecl ath_hal_malloc(adf_os_size_t size) { void *p; p = adf_os_mem_alloc(size); if (p) adf_os_mem_zero(p, size); return p; }
//spinlock need free when unload static void *HIFInit(adf_os_handle_t os_hdl) { HIF_DEVICE_USB *hif_dev; /* allocate memory for HIF_DEVICE */ hif_dev = (HIF_DEVICE_USB *) adf_os_mem_alloc(os_hdl, sizeof(HIF_DEVICE_USB)); if (hif_dev == NULL) { return NULL; } adf_os_mem_zero(hif_dev, sizeof(HIF_DEVICE_USB)); hif_dev->os_hdl = os_hdl; return hif_dev; }
void __adf_nbuf_trace_update(struct sk_buff *buf, char *event_string) { char string_buf[NBUF_PKT_TRAC_MAX_STRING]; if ((!trace_update_cb) || (!event_string)) { return; } if (!adf_nbuf_trace_get_proto_type(buf)) { return; } /* Buffer over flow */ if (NBUF_PKT_TRAC_MAX_STRING <= (adf_os_str_len(event_string) + NBUF_PKT_TRAC_PROTO_STRING)) { return; } adf_os_mem_zero(string_buf, NBUF_PKT_TRAC_MAX_STRING); adf_os_mem_copy(string_buf, event_string, adf_os_str_len(event_string)); if (NBUF_PKT_TRAC_TYPE_EAPOL & adf_nbuf_trace_get_proto_type(buf)) { adf_os_mem_copy(string_buf + adf_os_str_len(event_string), "EPL", NBUF_PKT_TRAC_PROTO_STRING); } else if (NBUF_PKT_TRAC_TYPE_DHCP & adf_nbuf_trace_get_proto_type(buf)) { adf_os_mem_copy(string_buf + adf_os_str_len(event_string), "DHC", NBUF_PKT_TRAC_PROTO_STRING); } else if (NBUF_PKT_TRAC_TYPE_MGMT_ACTION & adf_nbuf_trace_get_proto_type(buf)) { adf_os_mem_copy(string_buf + adf_os_str_len(event_string), "MACT", NBUF_PKT_TRAC_PROTO_STRING); } trace_update_cb(string_buf); return; }
void ieee80211_update_vap_target(struct ieee80211vap *vap) { struct ieee80211com *ic = vap->iv_ic; struct ieee80211vap_update_tgt tmp_vap_update; u_int8_t vapindex = 0; adf_os_mem_zero(&tmp_vap_update , sizeof(struct ieee80211vap_update_tgt)); /*searching vapindex */ for (vapindex = 0; vapindex < HTC_MAX_VAP_NUM; vapindex++) { if ((ic->target_vap_bitmap[vapindex].vap_valid) && (IEEE80211_ADDR_EQ(ic->target_vap_bitmap[vapindex].vap_macaddr,vap->iv_myaddr))) break; } tmp_vap_update.iv_flags = htonl(vap->iv_flags); tmp_vap_update.iv_flags_ext = htonl(vap->iv_flags_ext); tmp_vap_update.iv_vapindex = htonl(vapindex); tmp_vap_update.iv_rtsthreshold = htons(vap->iv_rtsthreshold); ic->ic_update_vap_target(ic, &tmp_vap_update, sizeof(tmp_vap_update)); }
LOCAL htc_handle_t _HTC_Init(HTC_SETUP_COMPLETE_CB SetupComplete, HTC_CONFIG *pConfig) { HIF_CALLBACK hifCBConfig; HTC_CONTEXT *pHTC; pHTC = (HTC_CONTEXT *)adf_os_mem_alloc(sizeof(HTC_CONTEXT)); adf_os_mem_zero(pHTC, sizeof(HTC_CONTEXT)); pHTC->OSHandle = pConfig->OSHandle; pHTC->PoolHandle = pConfig->PoolHandle; pHTC->hifHandle = pConfig->HIFHandle; hifCBConfig.send_buf_done = A_INDIR(htc._HTC_SendDoneHandler); hifCBConfig.recv_buf = A_INDIR(htc._HTC_MsgRecvHandler); hifCBConfig.context = pHTC; /* initialize hardware layer */ HIF_register_callback(pConfig->HIFHandle, &hifCBConfig); /* see if the host wants us to override the number of ctrl buffers */ pHTC->NumBuffersForCreditRpts = 0; if (0 == pHTC->NumBuffersForCreditRpts) { /* nothing to override, simply set default */ pHTC->NumBuffersForCreditRpts = HTC_DEFAULT_NUM_CTRL_BUFFERS; } pHTC->MaxEpPendingCreditRpts = 0; if (0 == pHTC->MaxEpPendingCreditRpts) { pHTC->MaxEpPendingCreditRpts = HTC_DEFAULT_MAX_EP_PENDING_CREDIT_REPORTS; } /* calculate the total allocation size based on the number of credit report buffers */ pHTC->CtrlBufferAllocSize = MIN_CREDIT_BUFFER_ALLOC_SIZE * pHTC->NumBuffersForCreditRpts; /* we need at least enough buffer space for 1 ctrl message */ pHTC->CtrlBufferAllocSize = A_MAX(pHTC->CtrlBufferAllocSize,MAX_HTC_SETUP_MSG_SIZE); /* save the size of each buffer/credit we will receive */ pHTC->RecvBufferSize = pConfig->CreditSize; //RecvBufferSize; pHTC->TotalCredits = pConfig->CreditNumber; pHTC->TotalCreditsAssigned = 0; /* setup the pseudo service that handles HTC control messages */ pHTC->HTCControlService.ProcessRecvMsg = A_INDIR(htc._HTC_ControlSvcProcessMsg); pHTC->HTCControlService.ProcessSendBufferComplete = A_INDIR(htc._HTC_ControlSvcProcessSendComplete); pHTC->HTCControlService.TrailerSpcCheckLimit = HTC_CTRL_BUFFER_CHECK_SIZE; pHTC->HTCControlService.MaxSvcMsgSize = MAX_HTC_SETUP_MSG_SIZE; pHTC->HTCControlService.ServiceCtx = pHTC; /* automatically register this pseudo service to endpoint 1 */ pHTC->Endpoints[ENDPOINT0].pService = &pHTC->HTCControlService; HIF_get_default_pipe(pHTC->hifHandle, &pHTC->Endpoints[ENDPOINT0].UpLinkPipeID, &pHTC->Endpoints[ENDPOINT0].DownLinkPipeID); /* Initialize control pipe so we could receive the HTC control packets */ // @TODO: msg size! HIF_config_pipe(pHTC->hifHandle, pHTC->Endpoints[ENDPOINT0].UpLinkPipeID, 1); /* set the first free endpoint */ pHTC->CurrentEpIndex = ENDPOINT1; pHTC->SetupCompleteCb = SetupComplete; /* setup buffers for just the setup phase, we only need 1 buffer to handle * setup */ HTC_AssembleBuffers(pHTC, 4, MAX_HTC_SETUP_MSG_SIZE); /* start hardware layer so that we can queue buffers */ HIF_start(pHTC->hifHandle); return pHTC; }
int htt_tx_ipa_uc_attach(struct htt_pdev_t *pdev, unsigned int uc_tx_buf_sz, unsigned int uc_tx_buf_cnt, unsigned int uc_tx_partition_base) { unsigned int tx_buffer_count; unsigned int tx_buffer_count_pwr2; adf_nbuf_t buffer_vaddr; u_int32_t buffer_paddr; u_int32_t *header_ptr; u_int32_t *ring_vaddr; int return_code = 0; uint16_t idx; /* Allocate CE Write Index WORD */ pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr = adf_os_mem_alloc_consistent(pdev->osdev, 4, &pdev->ipa_uc_tx_rsc.tx_ce_idx.paddr, adf_os_get_dma_mem_context( (&pdev->ipa_uc_tx_rsc.tx_ce_idx), memctx)); if (!pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr) { adf_os_print("%s: CE Write Index WORD alloc fail", __func__); return -1; } /* Allocate TX COMP Ring */ pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr = adf_os_mem_alloc_consistent(pdev->osdev, uc_tx_buf_cnt * 4, &pdev->ipa_uc_tx_rsc.tx_comp_base.paddr, adf_os_get_dma_mem_context( (&pdev->ipa_uc_tx_rsc.tx_comp_base), memctx)); if (!pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr) { adf_os_print("%s: TX COMP ring alloc fail", __func__); return_code = -2; goto free_tx_ce_idx; } adf_os_mem_zero(pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr, uc_tx_buf_cnt * 4); /* Allocate TX BUF vAddress Storage */ pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg = (adf_nbuf_t *)adf_os_mem_alloc(pdev->osdev, uc_tx_buf_cnt * sizeof(adf_nbuf_t)); if (!pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg) { adf_os_print("%s: TX BUF POOL vaddr storage alloc fail", __func__); return_code = -3; goto free_tx_comp_base; } adf_os_mem_zero(pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg, uc_tx_buf_cnt * sizeof(adf_nbuf_t)); ring_vaddr = pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr; /* Allocate TX buffers as many as possible */ for (tx_buffer_count = 0; tx_buffer_count < (uc_tx_buf_cnt - 1); tx_buffer_count++) { buffer_vaddr = adf_nbuf_alloc(pdev->osdev, uc_tx_buf_sz, 0, 4, FALSE); if (!buffer_vaddr) { adf_os_print("%s: TX BUF alloc fail, allocated buffer count %d", __func__, tx_buffer_count); break; } /* Init buffer */ adf_os_mem_zero(adf_nbuf_data(buffer_vaddr), uc_tx_buf_sz); header_ptr = (u_int32_t *)adf_nbuf_data(buffer_vaddr); *header_ptr = HTT_IPA_UC_OFFLOAD_TX_HEADER_DEFAULT; header_ptr++; *header_ptr |= ((u_int16_t)uc_tx_partition_base + tx_buffer_count) << 16; adf_nbuf_map(pdev->osdev, buffer_vaddr, ADF_OS_DMA_BIDIRECTIONAL); buffer_paddr = adf_nbuf_get_frag_paddr_lo(buffer_vaddr, 0); header_ptr++; *header_ptr = (u_int32_t)(buffer_paddr + 16); header_ptr++; *header_ptr = 0xFFFFFFFF; /* FRAG Header */ header_ptr++; *header_ptr = buffer_paddr + 32; *ring_vaddr = buffer_paddr; pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[tx_buffer_count] = buffer_vaddr; /* Memory barrier to ensure actual value updated */ ring_vaddr++; } /* * Tx complete ring buffer count should be power of 2. * So, allocated Tx buffer count should be one less than ring buffer size. */ tx_buffer_count_pwr2 = vos_rounddown_pow_of_two(tx_buffer_count + 1) - 1; if (tx_buffer_count > tx_buffer_count_pwr2) { adf_os_print("%s: Allocated Tx buffer count %d is rounded down to %d", __func__, tx_buffer_count, tx_buffer_count_pwr2); /* Free over allocated buffers below power of 2 */ for(idx = tx_buffer_count_pwr2; idx < tx_buffer_count; idx++) { if (pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[idx]) { adf_nbuf_unmap(pdev->osdev, pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[idx], ADF_OS_DMA_FROM_DEVICE); adf_nbuf_free(pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[idx]); } } } if (tx_buffer_count_pwr2 < 0) { adf_os_print("%s: Failed to round down Tx buffer count %d", __func__, tx_buffer_count_pwr2); goto free_tx_comp_base; } pdev->ipa_uc_tx_rsc.alloc_tx_buf_cnt = tx_buffer_count_pwr2; return 0; free_tx_comp_base: adf_os_mem_free_consistent(pdev->osdev, ol_cfg_ipa_uc_tx_max_buf_cnt(pdev->ctrl_pdev) * 4, pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr, pdev->ipa_uc_tx_rsc.tx_comp_base.paddr, adf_os_get_dma_mem_context( (&pdev->ipa_uc_tx_rsc.tx_comp_base), memctx)); free_tx_ce_idx: adf_os_mem_free_consistent(pdev->osdev, 4, pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr, pdev->ipa_uc_tx_rsc.tx_ce_idx.paddr, adf_os_get_dma_mem_context( (&pdev->ipa_uc_tx_rsc.tx_ce_idx), memctx)); return return_code; }
void Magpie_init(void) { A_PRINTF("[+++Magpie_init]\n\r"); A_PRINTF("[+++VBUF_init(%d)]\n\r", MAX_BUF_NUM); VBUF_init(MAX_BUF_NUM); A_PRINTF("[+++VBUF_init(%d)]\n\r", MAX_DESC_NUM); VDESC_init(MAX_DESC_NUM); #if MAGPIE_ENABLE_WLAN == 0 aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa hif_handle = HIF_init(0); #if ZM_FM_LOOPBACK == 1 HIF_config_pipe(hif_handle, HIF_USB_PIPE_TX, 5); HIF_config_pipe(hif_handle, HIF_USB_PIPE_COMMAND, 2); #if SYSTEM_MODULE_HP_EP5 HIF_config_pipe(hif_handle, HIF_USB_PIPE_HP_TX, 3); #endif #if SYSTEM_MODULE_HP_EP6 HIF_config_pipe(hif_handle, HIF_USB_PIPE_MP_TX, 3); #endif A_PRINTF("[+++HIF_init(0)]\n\r"); HIF_start(hif_handle); #else /* ZM_FM_LOOPBACK == 0 */ // initialize HTC htcConf.CreditSize = 320; htcConf.CreditNumber = 10; #if 1 htcConf.ControlDownLinkPipeID = HIF_USB_PIPE_INTERRUPT; // Target -> Host htcConf.ControlUpLinkPipeID = HIF_USB_PIPE_COMMAND; // Host -> Target #else htcConf.ControlDownLinkPipeID = HIF_USB_PIPE_RX; htcConf.ControlUpLinkPipeID = HIF_USB_PIPE_TX; #endif htcConf.HIFHandle = hif_handle; htcConf.OSHandle = 0; // not used htcConf.PoolHandle = pool_handle; htc_handle = HTC_init(htc_setup_comp, &htcConf); // Initialize HTC services HTC_Loopback_Init(htc_handle); adf_os_mem_zero(&wmiConfig, sizeof(WMI_SVC_CONFIG)); wmiConfig.HtcHandle = htc_handle; wmiConfig.PoolHandle = pool_handle; wmiConfig.MaxCmdReplyEvts = 1; wmiConfig.MaxEventEvts = 1; wmi_handle = WMI_Init(&wmiConfig); Magpie_Sys_Commands_Tbl.pContext = wmi_handle; WMI_RegisterDispatchTable(Magpie_Sys_Commands_Tbl.pContext, &Magpie_Sys_Commands_Tbl); #endif/* ZM_FM_LOOPBACK == 0 */ #endif /* MAGPIE_ENABLE_WLAN */ }
int htt_tx_ipa_uc_attach(struct htt_pdev_t *pdev, unsigned int uc_tx_buf_sz, unsigned int uc_tx_buf_cnt, unsigned int uc_tx_partition_base) { unsigned int tx_buffer_count; adf_nbuf_t buffer_vaddr; u_int32_t buffer_paddr; u_int32_t *header_ptr; u_int32_t *ring_vaddr; int return_code = 0; /* Allocate CE Write Index WORD */ pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr = adf_os_mem_alloc_consistent(pdev->osdev, 4, &pdev->ipa_uc_tx_rsc.tx_ce_idx.paddr, adf_os_get_dma_mem_context( (&pdev->ipa_uc_tx_rsc.tx_ce_idx), memctx)); if (!pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr) { adf_os_print("%s: CE Write Index WORD alloc fail", __func__); return -1; } /* Allocate TX COMP Ring */ pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr = adf_os_mem_alloc_consistent(pdev->osdev, uc_tx_buf_cnt * 4, &pdev->ipa_uc_tx_rsc.tx_comp_base.paddr, adf_os_get_dma_mem_context( (&pdev->ipa_uc_tx_rsc.tx_comp_base), memctx)); if (!pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr) { adf_os_print("%s: TX COMP ring alloc fail", __func__); return_code = -2; goto free_tx_ce_idx; } adf_os_mem_zero(pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr, uc_tx_buf_cnt * 4); /* Allocate TX BUF vAddress Storage */ pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg = (adf_nbuf_t *)adf_os_mem_alloc(pdev->osdev, uc_tx_buf_cnt * sizeof(adf_nbuf_t)); if (!pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg) { adf_os_print("%s: TX BUF POOL vaddr storage alloc fail", __func__); return_code = -3; goto free_tx_comp_base; } adf_os_mem_zero(pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg, uc_tx_buf_cnt * sizeof(adf_nbuf_t)); ring_vaddr = pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr; /* Allocate TX buffers as many as possible */ for (tx_buffer_count = 0; tx_buffer_count < (uc_tx_buf_cnt - 1); tx_buffer_count++) { buffer_vaddr = adf_nbuf_alloc(pdev->osdev, uc_tx_buf_sz, 0, 4, FALSE); if (!buffer_vaddr) { adf_os_print("%s: TX BUF alloc fail, allocated buffer count %d", __func__, tx_buffer_count); return 0; } /* Init buffer */ adf_os_mem_zero(adf_nbuf_data(buffer_vaddr), uc_tx_buf_sz); header_ptr = (u_int32_t *)adf_nbuf_data(buffer_vaddr); *header_ptr = HTT_IPA_UC_OFFLOAD_TX_HEADER_DEFAULT; header_ptr++; *header_ptr |= ((u_int16_t)uc_tx_partition_base + tx_buffer_count) << 16; adf_nbuf_map(pdev->osdev, buffer_vaddr, ADF_OS_DMA_BIDIRECTIONAL); buffer_paddr = adf_nbuf_get_frag_paddr_lo(buffer_vaddr, 0); header_ptr++; *header_ptr = (u_int32_t)(buffer_paddr + 16); header_ptr++; *header_ptr = 0xFFFFFFFF; /* FRAG Header */ header_ptr++; *header_ptr = buffer_paddr + 32; *ring_vaddr = buffer_paddr; pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[tx_buffer_count] = buffer_vaddr; /* Memory barrier to ensure actual value updated */ ring_vaddr++; } pdev->ipa_uc_tx_rsc.alloc_tx_buf_cnt = tx_buffer_count; return 0; free_tx_comp_base: adf_os_mem_free_consistent(pdev->osdev, ol_cfg_ipa_uc_tx_max_buf_cnt(pdev->ctrl_pdev) * 4, pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr, pdev->ipa_uc_tx_rsc.tx_comp_base.paddr, adf_os_get_dma_mem_context( (&pdev->ipa_uc_tx_rsc.tx_comp_base), memctx)); free_tx_ce_idx: adf_os_mem_free_consistent(pdev->osdev, 4, pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr, pdev->ipa_uc_tx_rsc.tx_ce_idx.paddr, adf_os_get_dma_mem_context( (&pdev->ipa_uc_tx_rsc.tx_ce_idx), memctx)); return return_code; }
static A_STATUS usb_hif_alloc_pipe_resources(HIF_USB_PIPE *pipe, int urb_cnt) { A_STATUS status = A_OK; int i; HIF_URB_CONTEXT *urb_context; DL_LIST_INIT(&pipe->urb_list_head); DL_LIST_INIT(&pipe->urb_pending_list); for (i = 0; i < urb_cnt; i++) { urb_context = adf_os_mem_alloc(NULL, sizeof(*urb_context)); if (NULL == urb_context) { status = A_NO_MEMORY; AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("urb_context is null\n")); break; } adf_os_mem_zero(urb_context, sizeof(HIF_URB_CONTEXT)); urb_context->pipe = pipe; urb_context->urb = usb_alloc_urb(0, GFP_KERNEL); if (NULL == urb_context->urb) { status = A_NO_MEMORY; adf_os_mem_free(urb_context); AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("urb_context->urb is null\n")); break; } /* note we are only allocate the urb contexts here, the actual * URB is * allocated from the kernel as needed to do a transaction */ pipe->urb_alloc++; if (htc_bundle_send) { /* In tx bundle mode, only pre-allocate bundle buffers * for data * pipes */ if (pipe->logical_pipe_num >= HIF_TX_DATA_LP_PIPE && pipe->logical_pipe_num <= HIF_TX_DATA_HP_PIPE) { urb_context->buf = adf_nbuf_alloc(NULL, HIF_USB_TX_BUNDLE_BUFFER_SIZE, 0, 4, FALSE); if (NULL == urb_context->buf) { status = A_NO_MEMORY; usb_free_urb(urb_context->urb); urb_context->urb = NULL; adf_os_mem_free(urb_context); AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ( "athusb: alloc send bundle buffer %d-byte failed\n", HIF_USB_TX_BUNDLE_BUFFER_SIZE)); break; } } skb_queue_head_init(&urb_context->comp_queue); } usb_hif_free_urb_to_pipe(pipe, urb_context); } AR_DEBUG_PRINTF(USB_HIF_DEBUG_ENUM, ( "athusb: alloc resources lpipe:%d hpipe:0x%X urbs:%d\n", pipe->logical_pipe_num, pipe->usb_pipe_handle, pipe->urb_alloc)); return status; }