void bcm_rpc_tp_watchdog(rpc_tp_info_t *rpcb) { static uint old = 0; uint delta; /* (1) close agg periodically to avoid stale aggregation */ bcm_rpc_tp_dngl_agg_release(rpcb); delta = rpcb->tp_dngl_agg_cnt_sf - old; old = rpcb->tp_dngl_agg_cnt_sf; RPC_TP_DBG(("agg delta %d tp flowcontrol queue pending (qlen %d subframe %d)\n", delta, pktq_len(rpcb->tx_flowctlq), rpcb->tx_q_flowctl_segcnt)); if (rpcb->tp_dngl_agg_lazy) rpcb->tp_dngl_agg_lazy = (delta < BCM_RPC_TP_AGG_LAZY_WM_LO) ? 0 : 1; else rpcb->tp_dngl_agg_lazy = (delta > BCM_RPC_TP_AGG_LAZY_WM_HI) ? 1 : 0; }
static int BCMFASTPATH bcm_rpc_tp_buf_send_internal(rpc_tp_info_t * rpcb, rpc_buf_t *b) { int err; bool tx_flow_control; int timeout = RPC_BUS_SEND_WAIT_TIMEOUT_MSEC; bool timedout = FALSE; uint pktlen; UNUSED_PARAMETER(pktlen); RPC_TP_LOCK(rpcb); rpcb->rpctp_dbus_hist[rpcb->bus_txpending]++; /* Increment before sending to avoid race condition */ rpcb->bus_txpending++; tx_flow_control = (rpcb->bus_txpending >= rpcb->bus_txdepth); if (rpcb->tx_flowctl != tx_flow_control) { rpcb->tx_flowctl = tx_flow_control; RPC_TP_DBG(("%s, tx_flowctl change to %d pending %d\n", __FUNCTION__, rpcb->tx_flowctl, rpcb->bus_txpending)); } rpcb->tx_flowctl_cnt += rpcb->tx_flowctl ? 1 : 0; RPC_TP_UNLOCK(rpcb); if (rpcb->tx_flowctl_override) { timeout = RPC_BUS_SEND_WAIT_EXT_TIMEOUT_MSEC; } if (rpcb->tx_flowctl || rpcb->tx_flowctl_override) { err = RPC_OSL_WAIT(rpcb->rpc_osh, timeout, &timedout); if (timedout) { printf("%s: RPC_OSL_WAIT error %d timeout %d(ms)\n", __FUNCTION__, err, RPC_BUS_SEND_WAIT_TIMEOUT_MSEC); RPC_TP_LOCK(rpcb); rpcb->txerr_cnt++; rpcb->bus_txpending--; RPC_TP_UNLOCK(rpcb); return err; } } #if defined(BCMUSB) || defined(USBAP) if (rpcb->tp_tx_agg_bytes != 0) { ASSERT(rpcb->tp_tx_agg_p == b); ASSERT(rpcb->tp_tx_agg_ptail != NULL); /* Make sure pktlen is not multiple of 512 bytes even after possible dbus padding */ if ((ROUNDUP(rpcb->tp_tx_agg_bytes, sizeof(uint32)) % BCM_RPC_TP_HOST_TOTALLEN_ZLP) == 0) { uint32 *tp_lenp = (uint32 *)bcm_rpc_buf_data(rpcb, rpcb->tp_tx_agg_ptail); uint32 tp_len = ltoh32(*tp_lenp); pktlen = bcm_rpc_buf_len_get(rpcb, rpcb->tp_tx_agg_ptail); ASSERT(tp_len + BCM_RPC_TP_ENCAP_LEN == pktlen); RPC_TP_DBG(("%s, agg pkt is multiple of 512 bytes\n", __FUNCTION__)); tp_len += BCM_RPC_TP_HOST_TOTALLEN_ZLP_PAD; pktlen += BCM_RPC_TP_HOST_TOTALLEN_ZLP_PAD; *tp_lenp = htol32(tp_len); bcm_rpc_buf_len_set(rpcb, rpcb->tp_tx_agg_ptail, pktlen); } } else { /* not aggregated */ ASSERT(b != NULL); pktlen = bcm_rpc_buf_len_get(rpcb, b); /* Make sure pktlen is not multiple of 512 bytes even after possible dbus padding */ if ((pktlen != 0) && ((ROUNDUP(pktlen, sizeof(uint32)) % BCM_RPC_TP_HOST_TOTALLEN_ZLP) == 0)) { uint32 *tp_lenp = (uint32 *)bcm_rpc_buf_data(rpcb, b); uint32 tp_len = ltoh32(*tp_lenp); ASSERT(tp_len + BCM_RPC_TP_ENCAP_LEN == pktlen); RPC_TP_DBG(("%s, nonagg pkt is multiple of 512 bytes\n", __FUNCTION__)); tp_len += BCM_RPC_TP_HOST_TOTALLEN_ZLP_PAD; pktlen += BCM_RPC_TP_HOST_TOTALLEN_ZLP_PAD; *tp_lenp = htol32(tp_len); bcm_rpc_buf_len_set(rpcb, b, pktlen); } } #endif /* defined(BCMUSB) || defined(USBAP) */ #ifdef EHCI_FASTPATH_TX /* With optimization, submit code is lockless, use RPC_TP_LOCK */ RPC_TP_LOCK(rpcb); err = dbus_send_pkt(rpcb->bus, b, b); #else err = dbus_send_pkt(rpcb->bus, b, b); RPC_TP_LOCK(rpcb); #endif /* EHCI_FASTPATH_TX */ if (err != 0) { printf("%s: dbus_send_pkt failed\n", __FUNCTION__); rpcb->txerr_cnt++; rpcb->bus_txpending--; } else { rpcb->tx_cnt++; } RPC_TP_UNLOCK(rpcb); return err; }