static void rpc_dbus_state_change(void *handle, int state) { rpc_tp_info_t *rpcb = handle; if (rpcb == NULL) return; /* FIX: DBUS is down, need to do something? */ if (state == DBUS_STATE_DOWN) { RPC_TP_ERR(("%s: DBUS is down\n", __FUNCTION__)); } }
rpc_tp_info_t * BCMATTACHFN(bcm_rpc_tp_attach)(osl_t * osh, void *bus) { rpc_tp_info_t *rpc_th; hndrte_dev_t *ctx = (hndrte_dev_t *)bus; rpc_th = (rpc_tp_info_t *)MALLOC(osh, sizeof(rpc_tp_info_t)); if (rpc_th == NULL) { RPC_TP_ERR(("%s: rpc_tp_info_t malloc failed\n", __FUNCTION__)); return NULL; } memset(rpc_th, 0, sizeof(rpc_tp_info_t)); rpc_th->osh = osh; rpc_th->ctx = ctx; /* Init for flow control */ rpc_th->tx_flowctl = FALSE; rpc_th->tx_q_flowctl_segcnt = 0; rpc_th->tx_flowctlq = (struct spktq *)MALLOC(osh, sizeof(struct spktq)); if (rpc_th->tx_flowctlq == NULL) { RPC_TP_ERR(("%s: txflowctlq malloc failed\n", __FUNCTION__)); MFREE(rpc_th->osh, rpc_th, sizeof(rpc_tp_info_t)); return NULL; } pktqinit(rpc_th->tx_flowctlq, BCM_RPC_TP_Q_MAX); rpc_th->tx_q_flowctl_hiwm = BCM_RPC_TP_FLOWCTL_QWM_HIGH; rpc_th->tx_q_flowctl_lowm = BCM_RPC_TP_FLOWCTL_QWM_LOW; rpc_th->tp_dngl_agg_lazy = 0; rpc_th->tp_dngl_agg_sframes_limit = BCM_RPC_TP_DNGL_AGG_MAX_SFRAME; rpc_th->tp_dngl_agg_bytes_max = BCM_RPC_TP_DNGL_AGG_MAX_BYTE; #ifdef BCMUSBDEV_EP_FOR_RPCRETURN rpc_th->has_2nd_bulk_in_ep = 1; #endif /* BCMUSBDEV_EP_FOR_RPCRETURN */ return rpc_th; }
static int bcm_rpc_tp_buf_send_internal(rpc_tp_info_t * rpcb, rpc_buf_t *b, uint32 tx_ep_index) { int err; struct lbuf *lb = (struct lbuf *)b; hndrte_dev_t *chained = rpcb->ctx->chained; uint pktlen; ASSERT(chained); ASSERT(b != NULL); pktlen = bcm_rpc_buf_totlen_get(rpcb, b); if (pktlen == BCM_RPC_TP_DNGL_TOTLEN_BAD) { RPC_TP_AGG(("%s, pkt is %d bytes, padding %d bytes\n", __FUNCTION__, BCM_RPC_TP_DNGL_TOTLEN_BAD, BCM_RPC_TP_DNGL_TOTLEN_BAD_PAD)); bcm_rpc_tp_buf_pad(rpcb, b, BCM_RPC_TP_DNGL_TOTLEN_BAD_PAD); } else if (pktlen % BCM_RPC_TP_DNGL_BULKEP_MPS == 0) { RPC_TP_AGG(("%s, tp pkt is multiple of %d bytes, padding %d bytes\n", __FUNCTION__, BCM_RPC_TP_DNGL_BULKEP_MPS, BCM_RPC_TP_DNGL_ZLP_PAD)); bcm_rpc_tp_buf_pad(rpcb, b, BCM_RPC_TP_DNGL_ZLP_PAD); } lb = PKTTONATIVE(rpcb->osh, b); /* send through data endpoint */ if ((err = chained->funcs->xmit(rpcb->ctx, chained, lb)) != 0) { RPC_TP_ERR(("%s: xmit failed; free pkt %p\n", __FUNCTION__, lb)); rpcb->txerr_cnt++; lb_free(lb); } else { rpcb->tx_cnt++; /* give pkt ownership to usb driver, decrement the counter */ rpcb->buf_cnt_inuse -= pktsegcnt(rpcb->osh, b); } return err; }
int bcm_rpc_tp_send_callreturn(rpc_tp_info_t * rpc_th, rpc_buf_t *b) { int err, pktlen; struct lbuf *lb; hndrte_dev_t *chained = rpc_th->ctx->chained; ASSERT(chained); /* Add the TP encapsulation */ bcm_rpc_tp_tx_encap(rpc_th, b); /* Pad if pkt size is a multiple of MPS */ pktlen = bcm_rpc_buf_totlen_get(rpc_th, b); if (pktlen % BCM_RPC_TP_DNGL_CTRLEP_MPS == 0) { RPC_TP_AGG(("%s, tp pkt is multiple of %d bytes, padding %d bytes\n", __FUNCTION__, BCM_RPC_TP_DNGL_CTRLEP_MPS, BCM_RPC_TP_DNGL_ZLP_PAD)); bcm_rpc_tp_buf_pad(rpc_th, b, BCM_RPC_TP_DNGL_ZLP_PAD); } lb = PKTTONATIVE(rpc_th->osh, b); if (rpc_th->has_2nd_bulk_in_ep) { err = chained->funcs->xmit2(rpc_th->ctx, chained, lb, USBDEV_BULK_IN_EP2); } else { err = chained->funcs->xmit_ctl(rpc_th->ctx, chained, lb); } /* send through control endpoint */ if (err != 0) { RPC_TP_ERR(("%s: xmit failed; free pkt %p\n", __FUNCTION__, lb)); rpc_th->txerr_cnt++; lb_free(lb); } else { rpc_th->tx_cnt++; /* give pkt ownership to usb driver, decrement the counter */ rpc_th->buf_cnt_inuse -= pktsegcnt(rpc_th->osh, b); } return err; }
static void bcm_rpc_tp_buf_send_enq(rpc_tp_info_t * rpc_th, rpc_buf_t *b) { pktenq(rpc_th->tx_flowctlq, (void*)b); rpc_th->tx_q_flowctl_segcnt += pktsegcnt(rpc_th->osh, b); /* if hiwm is reached, throttle wldriver * TODO, count more(average 3?) if agg is ON */ if (rpc_th->tx_q_flowctl_segcnt > rpc_th->tx_q_flowctl_hiwm) { rpc_th->tx_q_flowctl_highwm_cnt++; RPC_TP_ERR(("bcm_rpc_tp_buf_send_enq, wm hit high!\n")); rpc_th->txflowctl_cb(rpc_th->txflowctl_ctx, ON); } /* If tx_flowctlq gets full, set a bigger BCM_RPC_TP_Q_MAX */ ASSERT(!pktq_full(rpc_th->tx_flowctlq)); }
static void rpc_dbus_ctl_complete(void *handle, int type, int status) { rpc_tp_info_t *rpcb = (rpc_tp_info_t *)handle; void *pkt; RPC_TP_LOCK(rpcb); pkt = rpcb->rx_rtn_pkt; rpcb->rx_rtn_pkt = NULL; RPC_TP_UNLOCK(rpcb); if (!status) { bcm_rpc_buf_pull(rpcb, pkt, BCM_RPC_TP_ENCAP_LEN); (rpcb->rx_pkt)(rpcb->rx_context, pkt); } else { RPC_TP_ERR(("%s: no rpc rx ctl, dropping 0x%x\n", __FUNCTION__, status)); bcm_rpc_tp_pktfree(rpcb, pkt, TRUE); } }
static int bcm_rpc_tp_dngl_agg_release(rpc_tp_info_t * rpcb) { int err; rpc_buf_t *b; if (rpcb->tp_dngl_agg_p == NULL) { /* no aggregation formed */ return 0; } RPC_TP_AGG(("%s, send %d, sframe %d\n", __FUNCTION__, rpcb->tp_dngl_agg_bytes, rpcb->tp_dngl_agg_sframes)); b = rpcb->tp_dngl_agg_p; rpcb->tp_dngl_agg_cnt_chain++; rpcb->tp_dngl_agg_cnt_sf += rpcb->tp_dngl_agg_sframes; rpcb->tp_dngl_agg_cnt_bytes += rpcb->tp_dngl_agg_bytes; if (rpcb->tp_dngl_agg_sframes == 1) rpcb->tp_dngl_agg_cnt_noagg++; bcm_rpc_tp_dngl_agg_initstate(rpcb); rpcb->tp_dngl_agg_txpending++; if (rpcb->tx_flowctl) { bcm_rpc_tp_buf_send_enq(rpcb, b); err = 0; } else { err = bcm_rpc_tp_buf_send_internal(rpcb, b, USBDEV_BULK_IN_EP1); } if (err != 0) { RPC_TP_ERR(("bcm_rpc_tp_dngl_agg_release: send err!!!\n")); /* ASSERT(0) */ } return err; }
void bcm_rpc_tp_rx_from_dnglbus(rpc_tp_info_t *rpc_th, struct lbuf *lb) { void *orig_p, *p; void *rpc_p, *rpc_prev; uint pktlen, tp_len, iter = 0; osl_t *osh; bool dbg_agg; uint dbg_data[16], i; /* must fit host agg limit BCM_RPC_TP_HOST_AGG_MAX_SFRAME+1 */ dbg_agg = FALSE; rpc_th->rx_cnt++; if (rpc_th->rx_pkt == NULL) { RPC_TP_ERR(("%s: no rpc rx fn, dropping\n", __FUNCTION__)); rpc_th->rxdrop_cnt++; lb_free(lb); return; } orig_p = PKTFRMNATIVE(rpc_th->osh, lb); osh = rpc_th->osh; /* take ownership of the dnglbus packet chain * since it will be freed by bcm_rpc_tp_buf_free() */ rpc_th->buf_cnt_inuse += pktsegcnt(rpc_th->osh, orig_p); dbg_data[0] = pktsegcnt(rpc_th->osh, orig_p); pktlen = PKTLEN(osh, orig_p); p = orig_p; /* while we have more data in the TP frame's packet chain, * create a packet chain(could be cloned) for the next RPC frame * then give it away to high layer for process(buffer not freed) */ while (p != NULL) { iter++; /* read TP_HDR(len of rpc frame) and pull the data pointer past the length word */ if (pktlen >= BCM_RPC_TP_ENCAP_LEN) { ASSERT(((uint)PKTDATA(osh, p) & 0x3) == 0); /* ensure aligned word read */ tp_len = ltoh32(*(uint32*)PKTDATA(osh, p)); PKTPULL(osh, p, BCM_RPC_TP_ENCAP_LEN); pktlen -= BCM_RPC_TP_ENCAP_LEN; } else { /* error case: less data than the encapsulation size * treat as an empty tp buffer, at end of current buffer */ tp_len = 0; pktlen = 0; rpc_th->tp_dngl_deagg_cnt_badsflen++; /* bad sf len */ } /* if TP header finished a buffer(rpc header in next chained buffer), open next */ if (pktlen == 0) { void *next_p = PKTNEXT(osh, p); PKTSETNEXT(osh, p, NULL); rpc_th->buf_cnt_inuse--; PKTFREE(osh, p, FALSE); p = next_p; if (p) pktlen = PKTLEN(osh, p); } dbg_data[iter] = tp_len; if (tp_len < pktlen || dbg_agg) { dbg_agg = TRUE; RPC_TP_DEAGG(("DEAGG: [%d] p %p data %p pktlen %d tp_len %d\n", iter, p, PKTDATA(osh, p), pktlen, tp_len)); rpc_th->tp_dngl_deagg_cnt_sf++; rpc_th->tp_dngl_deagg_cnt_bytes += tp_len; } /* empty TP buffer (special case: use tp_len to pad for some USB pktsize bugs) */ if (tp_len == 0) { rpc_th->tp_dngl_deagg_cnt_pass++; continue; } else if (tp_len > 10000 ) { /* something is wrong */ /* print out msgs according to value of p -- in case it is NULL */ if (p != NULL) { RPC_TP_ERR(("DEAGG: iter %d, p(%p data %p pktlen %d)\n", iter, p, PKTDATA(osh, p), PKTLEN(osh, p))); } else { RPC_TP_ERR(("DEAGG: iter %d, p is NULL", iter)); } } /* ========= For this TP subframe, find the end, build a chain, sendup ========= */ /* RPC frame packet chain starts with this packet */ rpc_prev = NULL; rpc_p = p; ASSERT(p != NULL); /* find the last frag in this rpc chain */ while ((tp_len >= pktlen) && p) { if (dbg_agg) RPC_TP_DEAGG(("DEAGG: tp_len %d consumes p(%p pktlen %d)\n", tp_len, p, pktlen)); rpc_prev = p; p = PKTNEXT(osh, p); tp_len -= pktlen; if (p != NULL) { pktlen = PKTLEN(osh, p); } else { if (tp_len != 0) { uint totlen, seg; totlen = pkttotlen(osh, rpc_p); seg = pktsegcnt(rpc_th->osh, rpc_p); RPC_TP_ERR(("DEAGG, toss[%d], orig_p %p segcnt %d", iter, orig_p, dbg_data[0])); RPC_TP_ERR(("DEAGG,rpc_p %p totlen %d pktl %d tp_len %d\n", rpc_p, totlen, pktlen, tp_len)); for (i = 1; i <= iter; i++) RPC_TP_ERR(("tplen[%d] = %d ", i, dbg_data[i])); RPC_TP_ERR(("\n")); p = rpc_p; while (p != NULL) { RPC_TP_ERR(("this seg len %d\n", PKTLEN(osh, p))); p = PKTNEXT(osh, p); } rpc_th->buf_cnt_inuse -= seg; PKTFREE(osh, rpc_p, FALSE); rpc_th->tp_dngl_deagg_cnt_badfmt++; /* big hammer to recover USB * extern void dngl_reboot(void); dngl_reboot(); */ goto end; } pktlen = 0; break; } } /* fix up the last frag */ if (tp_len == 0) { /* if the whole RPC buffer chain ended at the end of the prev TP buffer, * end the RPC buffer chain. we are done */ if (dbg_agg) RPC_TP_DEAGG(("DEAGG: END rpc chain p %p len %d\n\n", rpc_prev, pktlen)); PKTSETNEXT(osh, rpc_prev, NULL); if (iter > 1) { rpc_th->tp_dngl_deagg_cnt_chain++; RPC_TP_DEAGG(("this frag %d totlen %d\n", pktlen, pkttotlen(osh, orig_p))); } } else { /* if pktlen has more bytes than tp_len, another tp frame must follow * create a clone of the sub-range of the current TP buffer covered * by the RPC buffer, attach to the end of the RPC buffer chain * (cut off the original chain link) * continue chain looping(p != NULL) */ void *new_p; ASSERT(p != NULL); RPC_TP_DEAGG(("DEAGG: cloning %d bytes out of p(%p data %p) len %d\n", tp_len, p, PKTDATA(osh, p), pktlen)); new_p = osl_pktclone(osh, p, 0, tp_len); rpc_th->buf_cnt_inuse++; rpc_th->tp_dngl_deagg_cnt_clone++; RPC_TP_DEAGG(("DEAGG: after clone, newp(%p data %p pktlen %d)\n", new_p, PKTDATA(osh, new_p), PKTLEN(osh, new_p))); if (rpc_prev) { RPC_TP_DEAGG(("DEAGG: chaining: %p->%p(clone)\n", rpc_prev, new_p)); PKTSETNEXT(osh, rpc_prev, new_p); } else { RPC_TP_DEAGG(("DEAGG: clone %p is a complete rpc pkt\n", new_p)); rpc_p = new_p; } PKTPULL(osh, p, tp_len); pktlen -= tp_len; RPC_TP_DEAGG(("DEAGG: remainder packet p %p data %p pktlen %d\n", p, PKTDATA(osh, p), PKTLEN(osh, p))); } /* !! send up */ (rpc_th->rx_pkt)(rpc_th->rx_context, rpc_p); } end: ASSERT(p == NULL); }
static void BCMFASTPATH bcm_rpc_tp_pktfree(rpc_tp_info_t * rpcb, rpc_buf_t *b, bool send) { uint32 free_cnt = 0; #if defined(NDIS) struct lbuf *lb = (struct lbuf*)b; struct lbuf *next; ASSERT(rpcb); ASSERT(lb != NULL); do { next = lb->next; lb->next = NULL; ASSERT(lb->p == NULL); shared_lb_put(rpcb->sh, lb->l, lb); free_cnt++; lb = next; } while (lb); #else struct sk_buff *skb = (struct sk_buff*)b, *next; #if defined(CTFPOOL) next = skb; while (next != NULL) { next = next->next; free_cnt++; } PKTFREE(rpcb->osh, skb, FALSE); #else while (skb) { next = skb->next; if (skb->destructor) { /* cannot kfree_skb() on hard IRQ (net/core/skbuff.c) if destructor exists */ dev_kfree_skb_any(skb); } else { /* can free immediately (even in_irq()) if destructor does not exist */ dev_kfree_skb(skb); } skb = next; free_cnt++; } #endif /* defined(CTFPOOL) */ RPC_TP_LOCK(rpcb); rpcb->buf_cnt_inuse -= free_cnt; if (rpcb->rxflowctrl && (rpcb->buf_cnt_inuse < RPCRX_WM_LO)) { rpcb->rxflowctrl = FALSE; RPC_TP_ERR(("%s, rxflowctrl change to %d\n", __FUNCTION__, rpcb->rxflowctrl)); dbus_flowctrl_rx(rpcb->bus, FALSE); } RPC_TP_UNLOCK(rpcb); #endif /* NDIS */ }
/* internal pkt allocation, no BCM_RPC_TP_ENCAP_LEN */ static rpc_buf_t * bcm_rpc_tp_pktget(rpc_tp_info_t * rpcb, int len, bool send) { rpc_buf_t* b; #if defined(NDIS) struct lbuf *lb; if (len > LBDATASZ) return (NULL); if (send) lb = shared_lb_get(rpcb->sh, &rpcb->sh->txfree); else lb = shared_lb_get(rpcb->sh, &rpcb->sh->rxfree); if (lb != NULL) lb->len = len; b = (rpc_buf_t*)lb; #else struct sk_buff *skb; #if defined(CTFPOOL) skb = PKTGET(rpcb->osh, len, FALSE); #else if ((skb = dev_alloc_skb(len))) { skb_put(skb, len); skb->priority = 0; } #endif /* defined(CTFPOOL) */ b = (rpc_buf_t*)skb; if (b != NULL) { #ifdef CTFMAP /* Clear the ctf buf flag to allow full dma map */ PKTCLRCTF(rpcb->osh, skb); CTFMAPPTR(rpcb->osh, skb) = NULL; #endif /* CTFMAP */ RPC_TP_LOCK(rpcb); rpcb->bufalloc++; if (!rpcb->rxflowctrl && (rpcb->buf_cnt_inuse >= RPCRX_WM_HI)) { rpcb->rxflowctrl = TRUE; RPC_TP_ERR(("%s, rxflowctrl change to %d\n", __FUNCTION__, rpcb->rxflowctrl)); dbus_flowctrl_rx(rpcb->bus, TRUE); } rpcb->buf_cnt_inuse++; if (rpcb->buf_cnt_inuse > (int)rpcb->buf_cnt_max) rpcb->buf_cnt_max = rpcb->buf_cnt_inuse; RPC_TP_UNLOCK(rpcb); } else { printf("%s: buf alloc failed buf_cnt_inuse %d rxflowctrl:%d\n", __FUNCTION__, rpcb->buf_cnt_inuse, rpcb->rxflowctrl); ASSERT(0); } #endif /* NDIS */ return b; }
rpc_tp_info_t * bcm_rpc_tp_attach(osl_t * osh, shared_info_t *shared, void *bus) #endif { rpc_tp_info_t *rpcb; dbus_pub_t *dbus = NULL; dbus_attrib_t attrib; dbus_config_t config; #if defined(linux) void *shared = NULL; #endif /* linux */ rpcb = (rpc_tp_info_t*)MALLOC(osh, sizeof(rpc_tp_info_t)); if (rpcb == NULL) { printf("%s: rpc_tp_info_t malloc failed\n", __FUNCTION__); return NULL; } memset(rpcb, 0, sizeof(rpc_tp_info_t)); bcm_rpc_tp_tx_agg_initstate(rpcb); #if defined(NDIS) NdisAllocateSpinLock(&rpcb->lock); #else spin_lock_init(&rpcb->lock); #endif rpcb->osh = osh; /* FIX: Need to determine rx size and pass it here */ dbus = (struct dbus_pub *)dbus_attach(osh, DBUS_RX_BUFFER_SIZE_RPC, BCM_RPC_TP_DBUS_NRXQ, BCM_RPC_TP_DBUS_NTXQ, rpcb /* info */, &rpc_dbus_cbs, &dbus_extdl, shared); if (dbus == NULL) { printf("%s: dbus_attach failed\n", __FUNCTION__); goto error; } rpcb->bus = (struct dbus_pub *)dbus; dbus_get_attrib(dbus, &attrib); rpcb->has_2nd_bulk_in_ep = attrib.has_2nd_bulk_in_ep; rpcb->bus_mtu = attrib.mtu; rpcb->bus_txdepth = BCM_RPC_TP_DBUS_NTXQ; config.rxctl_deferrespok = TRUE; dbus_set_config(dbus, &config); rpcb->tp_tx_agg_sframes_limit = BCM_RPC_TP_HOST_AGG_MAX_SFRAME; rpcb->tp_tx_agg_bytes_max = BCM_RPC_TP_HOST_AGG_MAX_BYTE; #if defined(NDIS) rpcb->sh = shared; #endif /* NDIS */ /* Bring DBUS up right away so RPC can start receiving */ if (dbus_up(dbus)) { RPC_TP_ERR(("%s: dbus_up failed\n", __FUNCTION__)); goto error; } return rpcb; error: if (rpcb) bcm_rpc_tp_detach(rpcb); return NULL; }
static void BCMFASTPATH rpc_dbus_recv_buf(void *handle, uint8 *buf, int len) { rpc_tp_info_t *rpcb = handle; void *pkt; uint32 rpc_len; uint frag; uint agglen; if ((rpcb == NULL) || (buf == NULL)) return; frag = rpcb->tp_host_deagg_cnt_sf; agglen = len; /* TP pkt should have more than encapsulation header */ if (len <= BCM_RPC_TP_ENCAP_LEN) { RPC_TP_ERR(("%s: wrong len %d\n", __FUNCTION__, len)); goto error; } while (len > BCM_RPC_TP_ENCAP_LEN) { rpc_len = ltoh32_ua(buf); if (rpc_len > (uint32)(len - BCM_RPC_TP_ENCAP_LEN)) { rpcb->tp_host_deagg_cnt_badsflen++; return; } /* RPC_BUFFER_RX: allocate */ #if defined(BCM_RPC_ROC) if ((pkt = PKTGET(rpcb->osh, rpc_len, FALSE)) == NULL) { #else if ((pkt = bcm_rpc_tp_pktget(rpcb, rpc_len, FALSE)) == NULL) { #endif printf("%s: bcm_rpc_tp_pktget failed (len %d)\n", __FUNCTION__, len); goto error; } /* RPC_BUFFER_RX: BYTE_COPY from dbus buffer */ bcopy(buf + BCM_RPC_TP_ENCAP_LEN, bcm_rpc_buf_data(rpcb, pkt), rpc_len); /* !! send up */ bcm_rpc_tp_rx(rpcb, pkt); len -= (BCM_RPC_TP_ENCAP_LEN + rpc_len); buf += (BCM_RPC_TP_ENCAP_LEN + rpc_len); if (len > BCM_RPC_TP_ENCAP_LEN) { /* more frag */ rpcb->tp_host_deagg_cnt_sf++; RPC_TP_DEAGG(("%s: deagg %d(remaining %d) bytes\n", __FUNCTION__, rpc_len, len)); } else { if (len != 0) { printf("%s: deagg, remaining len %d is not 0\n", __FUNCTION__, len); } rpcb->tp_host_deagg_cnt_pass++; } } if (frag < rpcb->tp_host_deagg_cnt_sf) { /* aggregated frames */ rpcb->tp_host_deagg_cnt_sf++; /* last one was not counted */ rpcb->tp_host_deagg_cnt_chain++; rpcb->tp_host_deagg_cnt_bytes += agglen; } error: return; } int BCMFASTPATH bcm_rpc_tp_recv_rtn(rpc_tp_info_t *rpcb) { void *pkt; int status = 0; if (!rpcb) return BCME_BADARG; if ((pkt = bcm_rpc_tp_pktget(rpcb, PKTBUFSZ, FALSE)) == NULL) { return BCME_NORESOURCE; } RPC_TP_LOCK(rpcb); if (rpcb->rx_rtn_pkt != NULL) { RPC_TP_UNLOCK(rpcb); if (pkt != NULL) bcm_rpc_tp_pktfree(rpcb, pkt, FALSE); return BCME_BUSY; } rpcb->rx_rtn_pkt = pkt; RPC_TP_UNLOCK(rpcb); #ifndef BCMUSBDEV_EP_FOR_RPCRETURN status = dbus_recv_ctl(rpcb->bus, bcm_rpc_buf_data(rpcb, rpcb->rx_rtn_pkt), PKTBUFSZ); #else if (rpcb->has_2nd_bulk_in_ep) { status = dbus_recv_bulk(rpcb->bus, USBDEV_BULK_IN_EP2); } else { status = dbus_recv_ctl(rpcb->bus, bcm_rpc_buf_data(rpcb, rpcb->rx_rtn_pkt), PKTBUFSZ); } #endif /* BCMUSBDEV_EP_FOR_RPCRETURN */ if (status) { /* May have been cleared by complete routine */ RPC_TP_LOCK(rpcb); pkt = rpcb->rx_rtn_pkt; rpcb->rx_rtn_pkt = NULL; RPC_TP_UNLOCK(rpcb); if (pkt != NULL) bcm_rpc_tp_pktfree(rpcb, pkt, FALSE); if (status == DBUS_ERR_RXFAIL) status = BCME_RXFAIL; else if (status == DBUS_ERR_NODEVICE) status = BCME_NODEVICE; else status = BCME_ERROR; } return status; }