Exemplo n.º 1
0
void
bcm_rpc_tp_agg_set(rpc_tp_info_t *rpcb, uint32 reason, bool set)
{
	static int i = 0;

	if (set) {
		RPC_TP_AGG(("%s: agg start\n", __FUNCTION__));

		mboolset(rpcb->tp_dngl_aggregation, reason);

	} else if (rpcb->tp_dngl_aggregation) {

		RPC_TP_AGG(("%s: agg end\n", __FUNCTION__));

		if (i > 0) {
			i--;
			return;
		} else {
			i = rpcb->tp_dngl_agg_lazy;
		}

		mboolclr(rpcb->tp_dngl_aggregation, reason);
		if (!rpcb->tp_dngl_aggregation)
			bcm_rpc_tp_dngl_agg_release(rpcb);
	}
}
Exemplo n.º 2
0
static int
bcm_rpc_tp_tx_agg_release(rpc_tp_info_t * rpcb)
{
	rpc_buf_t *b;
	int err;

	/* no aggregation formed */
	if (rpcb->tp_tx_agg_p == NULL)
		return 0;

	RPC_TP_AGG(("%s: send %d, sframe %d\n", __FUNCTION__,
		rpcb->tp_tx_agg_bytes, rpcb->tp_tx_agg_sframes));

	b = rpcb->tp_tx_agg_p;
	rpcb->tp_tx_agg_cnt_chain++;
	rpcb->tp_tx_agg_cnt_sf += rpcb->tp_tx_agg_sframes;
	rpcb->tp_tx_agg_cnt_bytes += rpcb->tp_tx_agg_bytes;

	if (rpcb->tp_tx_agg_sframes == 1)
		rpcb->tp_tx_agg_cnt_noagg++;

	err = bcm_rpc_tp_buf_send_internal(rpcb, b);
	bcm_rpc_tp_tx_agg_initstate(rpcb);
	return err;
}
Exemplo n.º 3
0
/*
 * tp_tx_agg_p points to the header lbuf, tp_tx_agg_ptail points to the tail lbuf
 *
 * The TP agg format typically will be below
 *   | TP header(len) | subframe1 rpc_header | subframe1 data |
 *     | TP header(len) | subframe2 rpc_header | subframe2 data |
 *          ...
 *           | TP header(len) | subframeN rpc_header | subframeN data |
 * no padding
*/
static void
bcm_rpc_tp_tx_agg_append(rpc_tp_info_t * rpcb, rpc_buf_t *b)
{
	uint tp_len;

	tp_len = pkttotlen(rpcb->osh, b);

	if (rpcb->tp_tx_agg_p == NULL) {
		/* toc, set tail to last fragment */
		if (PKTNEXT(rpcb->osh, b)) {
			rpcb->tp_tx_agg_p = b;
			rpcb->tp_tx_agg_ptail = pktlast(rpcb->osh, b);
		} else
			rpcb->tp_tx_agg_p =  rpcb->tp_tx_agg_ptail = b;
	} else {
		/* chain the pkts at the end of current one */
		ASSERT(rpcb->tp_tx_agg_ptail != NULL);
		PKTSETNEXT(rpcb->osh, rpcb->tp_tx_agg_ptail, b);
		/* toc, set tail to last fragment */
		if (PKTNEXT(rpcb->osh, b)) {
			rpcb->tp_tx_agg_ptail = pktlast(rpcb->osh, b);
		} else
			rpcb->tp_tx_agg_ptail = b;

	}

	rpcb->tp_tx_agg_sframes++;
	rpcb->tp_tx_agg_bytes += tp_len;

	RPC_TP_AGG(("%s: tp_len %d tot %d, sframe %d\n", __FUNCTION__, tp_len,
	                rpcb->tp_tx_agg_bytes, rpcb->tp_tx_agg_sframes));
}
Exemplo n.º 4
0
static int
bcm_rpc_tp_tx_agg(rpc_tp_info_t *rpcb, rpc_buf_t *b)
{
	uint totlen;
	uint pktlen;
	int err = 0;

	ASSERT(rpcb->tp_tx_aggregation);

	pktlen = pkttotlen(rpcb->osh, b);
	totlen = pktlen + rpcb->tp_tx_agg_bytes;

	if ((totlen > rpcb->tp_tx_agg_bytes_max) ||
		(rpcb->tp_tx_agg_sframes + 1 > rpcb->tp_tx_agg_sframes_limit)) {

		RPC_TP_AGG(("%s: terminte TP agg for txbyte %d or txframe %d\n", __FUNCTION__,
			rpcb->tp_tx_agg_bytes_max, rpcb->tp_tx_agg_sframes_limit));

		/* release current agg, continue with new agg */
		err = bcm_rpc_tp_tx_agg_release(rpcb);
	}

	bcm_rpc_tp_tx_agg_append(rpcb, b);

	/* if the new frag is also already over the agg limit, release it */
	if (pktlen >= rpcb->tp_tx_agg_bytes_max) {
		int new_err;
		new_err = bcm_rpc_tp_tx_agg_release(rpcb);
		if (!err)
			err = new_err;
	}

	return err;
}
Exemplo n.º 5
0
/* TP aggregation: set, init, agg, append, close, flush */
void
bcm_rpc_tp_agg_set(rpc_tp_info_t *rpcb, uint32 reason, bool set)
{
	if (set) {
		RPC_TP_AGG(("%s: agg start 0x%x\n", __FUNCTION__, reason));

		mboolset(rpcb->tp_tx_aggregation, reason);

	} else if (rpcb->tp_tx_aggregation) {
		RPC_TP_AGG(("%s: agg end 0x%x\n", __FUNCTION__, reason));

		mboolclr(rpcb->tp_tx_aggregation, reason);
		if (!rpcb->tp_tx_aggregation)
			bcm_rpc_tp_tx_agg_release(rpcb);
	}
}
Exemplo n.º 6
0
static int
bcm_rpc_tp_buf_send_internal(rpc_tp_info_t * rpcb, rpc_buf_t *b, uint32 tx_ep_index)
{
	int err;
	struct lbuf *lb = (struct lbuf *)b;
	hndrte_dev_t *chained = rpcb->ctx->chained;
	uint pktlen;

	ASSERT(chained);

		ASSERT(b != NULL);
		pktlen = bcm_rpc_buf_totlen_get(rpcb, b);

		if (pktlen == BCM_RPC_TP_DNGL_TOTLEN_BAD) {
			RPC_TP_AGG(("%s, pkt is %d bytes, padding %d bytes\n", __FUNCTION__,
				BCM_RPC_TP_DNGL_TOTLEN_BAD, BCM_RPC_TP_DNGL_TOTLEN_BAD_PAD));

			bcm_rpc_tp_buf_pad(rpcb, b, BCM_RPC_TP_DNGL_TOTLEN_BAD_PAD);

		} else if (pktlen % BCM_RPC_TP_DNGL_BULKEP_MPS == 0) {
			RPC_TP_AGG(("%s, tp pkt is multiple of %d bytes, padding %d bytes\n",
				__FUNCTION__,
				BCM_RPC_TP_DNGL_BULKEP_MPS, BCM_RPC_TP_DNGL_ZLP_PAD));

			bcm_rpc_tp_buf_pad(rpcb, b, BCM_RPC_TP_DNGL_ZLP_PAD);
		}


	lb = PKTTONATIVE(rpcb->osh, b);
	/* send through data endpoint */
	if ((err = chained->funcs->xmit(rpcb->ctx, chained, lb)) != 0) {
		RPC_TP_ERR(("%s: xmit failed; free pkt %p\n", __FUNCTION__, lb));
		rpcb->txerr_cnt++;
		lb_free(lb);
	} else {
		rpcb->tx_cnt++;

		/* give pkt ownership to usb driver, decrement the counter */
		rpcb->buf_cnt_inuse -= pktsegcnt(rpcb->osh, b);
	}

	return err;
}
Exemplo n.º 7
0
/* This is called by dngl_txstop as txflowcontrol (stopping tx from dongle to host) of bcmwl,
 * but is called rxflowcontrol in wl driver (pausing rx of wl driver). This is for low driver only.
 */
void
bcm_rpc_tp_txflowctl(rpc_tp_info_t *rpc_th, bool state, int prio)
{
	rpc_buf_t *b;

	ASSERT(rpc_th);

	if (rpc_th->tx_flowctl == state)
		return;

	RPC_TP_AGG(("tp_txflowctl %d\n", state));

	rpc_th->tx_flowctl = state;
	rpc_th->tx_flowctl_cnt++;
	rpc_th->tx_flowcontrolled = state;

	/* when get out of flowcontrol, send all queued packets in a loop
	 *  but need to check tx_flowctl every iteration and stop if we got flowcontrolled again
	 */
	while (!rpc_th->tx_flowctl && !pktq_empty(rpc_th->tx_flowctlq)) {

		b = pktdeq(rpc_th->tx_flowctlq);
		if (b == NULL) break;

		rpc_th->tx_q_flowctl_segcnt -= pktsegcnt(rpc_th->osh, b);

		bcm_rpc_tp_buf_send_internal(rpc_th, b, USBDEV_BULK_IN_EP1);
	}

	/* bcm_rpc_tp_agg_set(rpc_th, BCM_RPC_TP_DNGL_AGG_FLOWCTL, state); */

	/* if lowm is reached, release wldriver
	 *   TODO, count more(average 3?) if agg is ON
	 */
	if (rpc_th->tx_q_flowctl_segcnt < rpc_th->tx_q_flowctl_lowm) {
		RPC_TP_AGG(("bcm_rpc_tp_txflowctl, wm hit low!\n"));
		rpc_th->txflowctl_cb(rpc_th->txflowctl_ctx, OFF);
	}

	return;
}
Exemplo n.º 8
0
void
bcm_rpc_tp_watchdog(rpc_tp_info_t *rpcb)
{
	static int old = 0;

	/* close agg periodically to avoid stale aggregation(include rpc_agg change) */
	bcm_rpc_tp_tx_agg_release(rpcb);

	RPC_TP_AGG(("agg delta %d\n", (rpcb->tp_tx_agg_cnt_sf - old)));

	old = rpcb->tp_tx_agg_cnt_sf;
	BCM_REFERENCE(old);
}
Exemplo n.º 9
0
int
bcm_rpc_tp_send_callreturn(rpc_tp_info_t * rpc_th, rpc_buf_t *b)
{
	int err, pktlen;
	struct lbuf *lb;
	hndrte_dev_t *chained = rpc_th->ctx->chained;

	ASSERT(chained);

	/* Add the TP encapsulation */
	bcm_rpc_tp_tx_encap(rpc_th, b);

	/* Pad if pkt size is a multiple of MPS */
	pktlen = bcm_rpc_buf_totlen_get(rpc_th, b);
		if (pktlen % BCM_RPC_TP_DNGL_CTRLEP_MPS == 0) {
			RPC_TP_AGG(("%s, tp pkt is multiple of %d bytes, padding %d bytes\n",
			__FUNCTION__, BCM_RPC_TP_DNGL_CTRLEP_MPS, BCM_RPC_TP_DNGL_ZLP_PAD));

		bcm_rpc_tp_buf_pad(rpc_th, b, BCM_RPC_TP_DNGL_ZLP_PAD);
	}

	lb = PKTTONATIVE(rpc_th->osh, b);

	if (rpc_th->has_2nd_bulk_in_ep) {
		err = chained->funcs->xmit2(rpc_th->ctx, chained, lb, USBDEV_BULK_IN_EP2);
	} else {
		err = chained->funcs->xmit_ctl(rpc_th->ctx, chained, lb);
	}
	/* send through control endpoint */
	if (err != 0) {
		RPC_TP_ERR(("%s: xmit failed; free pkt %p\n", __FUNCTION__, lb));
		rpc_th->txerr_cnt++;
		lb_free(lb);
	} else {
		rpc_th->tx_cnt++;

		/* give pkt ownership to usb driver, decrement the counter */
		rpc_th->buf_cnt_inuse -= pktsegcnt(rpc_th->osh, b);
	}

	return err;

}
Exemplo n.º 10
0
static int
bcm_rpc_tp_dngl_agg_release(rpc_tp_info_t * rpcb)
{
	int err;
	rpc_buf_t *b;

	if (rpcb->tp_dngl_agg_p == NULL) {	/* no aggregation formed */
		return 0;
	}

	RPC_TP_AGG(("%s, send %d, sframe %d\n", __FUNCTION__,
		rpcb->tp_dngl_agg_bytes, rpcb->tp_dngl_agg_sframes));

	b = rpcb->tp_dngl_agg_p;
	rpcb->tp_dngl_agg_cnt_chain++;
	rpcb->tp_dngl_agg_cnt_sf += rpcb->tp_dngl_agg_sframes;
	rpcb->tp_dngl_agg_cnt_bytes += rpcb->tp_dngl_agg_bytes;
	if (rpcb->tp_dngl_agg_sframes == 1)
		rpcb->tp_dngl_agg_cnt_noagg++;

	bcm_rpc_tp_dngl_agg_initstate(rpcb);

	rpcb->tp_dngl_agg_txpending++;

	if (rpcb->tx_flowctl) {
		bcm_rpc_tp_buf_send_enq(rpcb, b);
		err = 0;
	} else {
		err = bcm_rpc_tp_buf_send_internal(rpcb, b, USBDEV_BULK_IN_EP1);
	}

	if (err != 0) {
		RPC_TP_ERR(("bcm_rpc_tp_dngl_agg_release: send err!!!\n"));
		/* ASSERT(0) */
	}

	return err;
}
Exemplo n.º 11
0
/*
 *  tp_dngl_agg_p points to the header lbuf, tp_dngl_agg_ptail points to the tail lbuf
 *
 * The TP agg format typically will be below
 *   | TP header(len) | subframe1 rpc_header | subframe1 data |
 *     | TP header(len) | subframe2 rpc_header | subframe2 data |
 *          ...
 *           | TP header(len) | subframeN rpc_header | subframeN data |
 * no padding
*/
static void
bcm_rpc_tp_dngl_agg_append(rpc_tp_info_t * rpcb, rpc_buf_t *b)
{
	uint tp_len = bcm_rpc_buf_len_get(rpcb, b);

	if (rpcb->tp_dngl_agg_p == NULL) {

		rpcb->tp_dngl_agg_p = rpcb->tp_dngl_agg_ptail = b;

	} else {
		/* chain the pkts at the end of current one */
		ASSERT(rpcb->tp_dngl_agg_ptail != NULL);

		PKTSETNEXT(rpcb->osh, rpcb->tp_dngl_agg_ptail, b);
		rpcb->tp_dngl_agg_ptail = b;
	}

	rpcb->tp_dngl_agg_sframes++;
	rpcb->tp_dngl_agg_bytes += tp_len;

	RPC_TP_AGG(("%s, tp_len %d tot %d, sframe %d\n", __FUNCTION__, tp_len,
		rpcb->tp_dngl_agg_bytes, rpcb->tp_dngl_agg_sframes));
}