Ejemplo n.º 1
0
/*
 * function to stop the RX
 *
 * rq - pointer to RQ structure
 *
 * return none
 */
void
oce_clean_rq(struct oce_rq *rq)
{
	uint16_t num_cqe = 0;
	struct oce_cq  *cq;
	struct oce_dev *dev;
	struct oce_nic_rx_cqe *cqe;
	int32_t ti = 0;

	dev = rq->parent;
	cq = rq->cq;
	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
	/* dequeue till you reach an invalid cqe */
	for (ti = 0; ti < DEFAULT_DRAIN_TIME; ti++) {

		while (RQ_CQE_VALID(cqe)) {
			DW_SWAP(u32ptr(cqe), sizeof (struct oce_nic_rx_cqe));
			oce_rx_drop_pkt(rq, cqe);
			atomic_add_32(&rq->buf_avail,
			    -(cqe->u0.s.num_fragments & 0x7));
			oce_arm_cq(dev, cq->cq_id, 1, B_TRUE);
			RQ_CQE_INVALIDATE(cqe);
			RING_GET(cq->ring, 1);
			cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring,
			    struct oce_nic_rx_cqe);
			num_cqe++;
		}
		OCE_MSDELAY(1);
	}
} /* oce_clean_rq */
Ejemplo n.º 2
0
static inline int
oce_process_tx_compl(struct oce_wq *wq, boolean_t rearm)
{
	struct oce_nic_tx_cqe *cqe;
	uint16_t num_cqe = 0;
	struct oce_cq *cq;
	oce_wqe_desc_t *wqed;
	int wqe_freed = 0;
	struct oce_dev *dev;

	cq  = wq->cq;
	dev = wq->parent;
	(void) ddi_dma_sync(cq->ring->dbuf->dma_handle, 0, 0,
	    DDI_DMA_SYNC_FORKERNEL);

	mutex_enter(&wq->txc_lock);
	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
	while (WQ_CQE_VALID(cqe)) {

		DW_SWAP(u32ptr(cqe), sizeof (struct oce_nic_tx_cqe));

		/* update stats */
		if (cqe->u0.s.status != 0) {
			atomic_inc_32(&dev->tx_errors);
		}

		/* complete the WQEs */
		wqed = OCE_LIST_REM_HEAD(&wq->wqe_desc_list);

		wqe_freed = wqed->wqe_cnt;
		oce_free_wqed(wq, wqed);
		RING_GET(wq->ring, wqe_freed);
		atomic_add_32(&wq->wq_free, wqe_freed);
		/* clear the valid bit and progress cqe */
		WQ_CQE_INVALIDATE(cqe);
		RING_GET(cq->ring, 1);
		cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring,
		    struct oce_nic_tx_cqe);
		num_cqe++;
	} /* for all valid CQE */
	mutex_exit(&wq->txc_lock);
	if (num_cqe)
		oce_arm_cq(wq->parent, cq->cq_id, num_cqe, rearm);
	return (num_cqe);
} /* oce_process_tx_completion */
Ejemplo n.º 3
0
static inline void
oce_rx_drop_pkt(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
{
	int frag_cnt;
	oce_rq_bdesc_t *rqbd;
	oce_rq_bdesc_t  **shadow_rq;
	shadow_rq = rq->shadow_ring;
	for (frag_cnt = 0; frag_cnt < cqe->u0.s.num_fragments; frag_cnt++) {
		rqbd = shadow_rq[rq->ring->cidx];
		oce_rqb_free(rq, rqbd);
		RING_GET(rq->ring, 1);
	}
}
Ejemplo n.º 4
0
/*
 * function to release the posted buffers
 *
 * rq - pointer to the RQ to charge
 *
 * return none
 */
void
oce_rq_discharge(struct oce_rq *rq)
{
	oce_rq_bdesc_t *rqbd;
	oce_rq_bdesc_t **shadow_rq;

	shadow_rq = rq->shadow_ring;
	/* Free the posted buffer since RQ is destroyed already */
	while ((int32_t)rq->buf_avail > 0) {
		rqbd = shadow_rq[rq->ring->cidx];
		oce_rqb_free(rq, rqbd);
		RING_GET(rq->ring, 1);
		rq->buf_avail--;
	}
}
Ejemplo n.º 5
0
/*
 * function to process a Recieve queue
 *
 * arg - pointer to the RQ to charge
 *
 * return number of cqes processed
 */
uint16_t
oce_drain_rq_cq(void *arg)
{
	struct oce_nic_rx_cqe *cqe;
	struct oce_rq *rq;
	mblk_t *mp = NULL;
	mblk_t *mblk_head;
	mblk_t **mblk_tail;
	uint16_t num_cqe = 0;
	struct oce_cq  *cq;
	struct oce_dev *dev;
	int32_t frag_cnt;
	uint32_t nbufs = 0;

	rq = (struct oce_rq *)arg;
	dev = rq->parent;
	cq = rq->cq;
	mblk_head = NULL;
	mblk_tail = &mblk_head;

	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);

	(void) DBUF_SYNC(cq->ring->dbuf, DDI_DMA_SYNC_FORKERNEL);
	/* dequeue till you reach an invalid cqe */
	while (RQ_CQE_VALID(cqe)) {
		DW_SWAP(u32ptr(cqe), sizeof (struct oce_nic_rx_cqe));
		frag_cnt = cqe->u0.s.num_fragments & 0x7;
		/* if insufficient buffers to charge then do copy */
		if ((cqe->u0.s.pkt_size < dev->rx_bcopy_limit) ||
		    (oce_atomic_reserve(&rq->rqb_free, frag_cnt) < 0)) {
			mp = oce_rx_bcopy(dev, rq, cqe);
		} else {
			mp = oce_rx(dev, rq, cqe);
			if (mp == NULL) {
				atomic_add_32(&rq->rqb_free, frag_cnt);
				mp = oce_rx_bcopy(dev, rq, cqe);
			}
		}
		if (mp != NULL) {
			if (cqe->u0.s.vlan_tag_present) {
				oce_rx_insert_tag(mp, cqe->u0.s.vlan_tag);
			}
			oce_set_rx_oflags(mp, cqe);

			*mblk_tail = mp;
			mblk_tail = &mp->b_next;
		} else {
			(void) oce_rq_charge(rq, frag_cnt, B_TRUE);
		}
		RING_GET(rq->ring, frag_cnt);
		rq->buf_avail -= frag_cnt;
		nbufs += frag_cnt;

		oce_rq_post_buffer(rq, frag_cnt);
		RQ_CQE_INVALIDATE(cqe);
		RING_GET(cq->ring, 1);
		cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring,
		    struct oce_nic_rx_cqe);
		num_cqe++;
		/* process max ring size */
		if (num_cqe > dev->rx_pkt_per_intr) {
			break;
		}
	} /* for all valid CQEs */

	if (mblk_head) {
		mac_rx(dev->mac_handle, NULL, mblk_head);
	}
	oce_arm_cq(dev, cq->cq_id, num_cqe, B_TRUE);
	return (num_cqe);
} /* oce_drain_rq_cq */