Esempio n. 1
0
/*
 * function to stop the RX
 *
 * rq - pointer to RQ structure
 *
 * return none
 */
void
oce_clean_rq(struct oce_rq *rq)
{
	uint16_t num_cqe = 0;
	struct oce_cq  *cq;
	struct oce_dev *dev;
	struct oce_nic_rx_cqe *cqe;
	int32_t ti = 0;

	dev = rq->parent;
	cq = rq->cq;
	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
	/* dequeue till you reach an invalid cqe */
	for (ti = 0; ti < DEFAULT_DRAIN_TIME; ti++) {

		while (RQ_CQE_VALID(cqe)) {
			DW_SWAP(u32ptr(cqe), sizeof (struct oce_nic_rx_cqe));
			oce_rx_drop_pkt(rq, cqe);
			atomic_add_32(&rq->buf_avail,
			    -(cqe->u0.s.num_fragments & 0x7));
			oce_arm_cq(dev, cq->cq_id, 1, B_TRUE);
			RQ_CQE_INVALIDATE(cqe);
			RING_GET(cq->ring, 1);
			cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring,
			    struct oce_nic_rx_cqe);
			num_cqe++;
		}
		OCE_MSDELAY(1);
	}
} /* oce_clean_rq */
Esempio n. 2
0
/*
 * function to start  the RX
 *
 * rq - pointer to RQ structure
 *
 * return number of rqe's charges.
 */
int
oce_start_rq(struct oce_rq *rq)
{
	int ret = 0;
	int to_charge = 0;
	struct oce_dev *dev = rq->parent;
	to_charge = rq->cfg.q_len - rq->buf_avail;
	to_charge = min(to_charge, rq->rqb_free);
	atomic_add_32(&rq->rqb_free, -to_charge);
	(void) oce_rq_charge(rq, to_charge, B_FALSE);
	/* ok to do it here since Rx has not even started */
	oce_rq_post_buffer(rq, to_charge);
	oce_arm_cq(dev, rq->cq->cq_id, 0, B_TRUE);
	return (ret);
} /* oce_start_rq */
Esempio n. 3
0
static inline int
oce_process_tx_compl(struct oce_wq *wq, boolean_t rearm)
{
	struct oce_nic_tx_cqe *cqe;
	uint16_t num_cqe = 0;
	struct oce_cq *cq;
	oce_wqe_desc_t *wqed;
	int wqe_freed = 0;
	struct oce_dev *dev;

	cq  = wq->cq;
	dev = wq->parent;
	(void) ddi_dma_sync(cq->ring->dbuf->dma_handle, 0, 0,
	    DDI_DMA_SYNC_FORKERNEL);

	mutex_enter(&wq->txc_lock);
	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
	while (WQ_CQE_VALID(cqe)) {

		DW_SWAP(u32ptr(cqe), sizeof (struct oce_nic_tx_cqe));

		/* update stats */
		if (cqe->u0.s.status != 0) {
			atomic_inc_32(&dev->tx_errors);
		}

		/* complete the WQEs */
		wqed = OCE_LIST_REM_HEAD(&wq->wqe_desc_list);

		wqe_freed = wqed->wqe_cnt;
		oce_free_wqed(wq, wqed);
		RING_GET(wq->ring, wqe_freed);
		atomic_add_32(&wq->wq_free, wqe_freed);
		/* clear the valid bit and progress cqe */
		WQ_CQE_INVALIDATE(cqe);
		RING_GET(cq->ring, 1);
		cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring,
		    struct oce_nic_tx_cqe);
		num_cqe++;
	} /* for all valid CQE */
	mutex_exit(&wq->txc_lock);
	if (num_cqe)
		oce_arm_cq(wq->parent, cq->cq_id, num_cqe, rearm);
	return (num_cqe);
} /* oce_process_tx_completion */
Esempio n. 4
0
/*
 * Hashing policy for load balancing over the set of TX rings
 * available to the driver.
 */
mblk_t *
oce_m_send(void *arg, mblk_t *mp)
{
	struct oce_dev *dev = arg;
	mblk_t *nxt_pkt;
	mblk_t *rmp = NULL;
	struct oce_wq *wq;

	DEV_LOCK(dev);
	if (dev->suspended || !(dev->state & STATE_MAC_STARTED)) {
		DEV_UNLOCK(dev);
		freemsg(mp);
		return (NULL);
	}
	DEV_UNLOCK(dev);
	/*
	 * Hash to pick a wq
	 */
	wq = oce_get_wq(dev, mp);

	while (mp != NULL) {
		/* Save the Pointer since mp will be freed in case of copy */
		nxt_pkt = mp->b_next;
		mp->b_next = NULL;
		/* Hardcode wq since we have only one */
		rmp = oce_send_packet(wq, mp);
		if (rmp != NULL) {
			/* reschedule Tx */
			wq->resched = B_TRUE;
			oce_arm_cq(dev, wq->cq->cq_id, 0, B_TRUE);
			/* restore the chain */
			rmp->b_next = nxt_pkt;
			break;
		}
		mp  = nxt_pkt;
	}
	return (rmp);
} /* oce_send */
Esempio n. 5
0
mblk_t *
oce_ring_tx(void *ring_handle, mblk_t *mp)
{
	struct oce_wq *wq = ring_handle;
	mblk_t *nxt_pkt;
	mblk_t *rmp = NULL;
	struct oce_dev *dev = wq->parent;

	if (dev->suspended) {
		freemsg(mp);
		return (NULL);
	}
	while (mp != NULL) {
		/* Save the Pointer since mp will be freed in case of copy */
		nxt_pkt = mp->b_next;
		mp->b_next = NULL;
		/* Hardcode wq since we have only one */
		rmp = oce_send_packet(wq, mp);
		if (rmp != NULL) {
			/* restore the chain */
			rmp->b_next = nxt_pkt;
			break;
		}
		mp  = nxt_pkt;
	}

	if (wq->resched) {
		if (atomic_cas_uint(&wq->qmode, OCE_MODE_POLL, OCE_MODE_INTR)
		    == OCE_MODE_POLL) {
			oce_arm_cq(wq->parent, wq->cq->cq_id, 0, B_TRUE);
			wq->last_armed = ddi_get_lbolt();
		}
	}

	return (rmp);
}
Esempio n. 6
0
/*
 * function to process a Recieve queue
 *
 * arg - pointer to the RQ to charge
 *
 * return number of cqes processed
 */
uint16_t
oce_drain_rq_cq(void *arg)
{
	struct oce_nic_rx_cqe *cqe;
	struct oce_rq *rq;
	mblk_t *mp = NULL;
	mblk_t *mblk_head;
	mblk_t **mblk_tail;
	uint16_t num_cqe = 0;
	struct oce_cq  *cq;
	struct oce_dev *dev;
	int32_t frag_cnt;
	uint32_t nbufs = 0;

	rq = (struct oce_rq *)arg;
	dev = rq->parent;
	cq = rq->cq;
	mblk_head = NULL;
	mblk_tail = &mblk_head;

	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);

	(void) DBUF_SYNC(cq->ring->dbuf, DDI_DMA_SYNC_FORKERNEL);
	/* dequeue till you reach an invalid cqe */
	while (RQ_CQE_VALID(cqe)) {
		DW_SWAP(u32ptr(cqe), sizeof (struct oce_nic_rx_cqe));
		frag_cnt = cqe->u0.s.num_fragments & 0x7;
		/* if insufficient buffers to charge then do copy */
		if ((cqe->u0.s.pkt_size < dev->rx_bcopy_limit) ||
		    (oce_atomic_reserve(&rq->rqb_free, frag_cnt) < 0)) {
			mp = oce_rx_bcopy(dev, rq, cqe);
		} else {
			mp = oce_rx(dev, rq, cqe);
			if (mp == NULL) {
				atomic_add_32(&rq->rqb_free, frag_cnt);
				mp = oce_rx_bcopy(dev, rq, cqe);
			}
		}
		if (mp != NULL) {
			if (cqe->u0.s.vlan_tag_present) {
				oce_rx_insert_tag(mp, cqe->u0.s.vlan_tag);
			}
			oce_set_rx_oflags(mp, cqe);

			*mblk_tail = mp;
			mblk_tail = &mp->b_next;
		} else {
			(void) oce_rq_charge(rq, frag_cnt, B_TRUE);
		}
		RING_GET(rq->ring, frag_cnt);
		rq->buf_avail -= frag_cnt;
		nbufs += frag_cnt;

		oce_rq_post_buffer(rq, frag_cnt);
		RQ_CQE_INVALIDATE(cqe);
		RING_GET(cq->ring, 1);
		cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring,
		    struct oce_nic_rx_cqe);
		num_cqe++;
		/* process max ring size */
		if (num_cqe > dev->rx_pkt_per_intr) {
			break;
		}
	} /* for all valid CQEs */

	if (mblk_head) {
		mac_rx(dev->mac_handle, NULL, mblk_head);
	}
	oce_arm_cq(dev, cq->cq_id, num_cqe, B_TRUE);
	return (num_cqe);
} /* oce_drain_rq_cq */