/* * function to stop the RX * * rq - pointer to RQ structure * * return none */ void oce_clean_rq(struct oce_rq *rq) { uint16_t num_cqe = 0; struct oce_cq *cq; struct oce_dev *dev; struct oce_nic_rx_cqe *cqe; int32_t ti = 0; dev = rq->parent; cq = rq->cq; cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe); /* dequeue till you reach an invalid cqe */ for (ti = 0; ti < DEFAULT_DRAIN_TIME; ti++) { while (RQ_CQE_VALID(cqe)) { DW_SWAP(u32ptr(cqe), sizeof (struct oce_nic_rx_cqe)); oce_rx_drop_pkt(rq, cqe); atomic_add_32(&rq->buf_avail, -(cqe->u0.s.num_fragments & 0x7)); oce_arm_cq(dev, cq->cq_id, 1, B_TRUE); RQ_CQE_INVALIDATE(cqe); RING_GET(cq->ring, 1); cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe); num_cqe++; } OCE_MSDELAY(1); } } /* oce_clean_rq */
/* Checks for pending rx buffers with Stack */ int oce_rx_pending(struct oce_dev *dev, struct oce_rq *rq, int32_t timeout) { int ti; _NOTE(ARGUNUSED(dev)); for (ti = 0; ti < timeout; ti++) { if (rq->pending > 0) { OCE_MSDELAY(10); continue; } else { rq->pending = 0; break; } } return (rq->pending); }
/* * function to stop the WQ * * wq - pointer to WQ * * return none */ void oce_clean_wq(struct oce_wq *wq) { oce_wqe_desc_t *wqed; int ti; /* Wait for already posted Tx to complete */ for (ti = 0; ti < DEFAULT_DRAIN_TIME; ti++) { (void) oce_process_tx_compl(wq, B_FALSE); OCE_MSDELAY(1); } /* Free the remaining descriptors */ while ((wqed = OCE_LIST_REM_HEAD(&wq->wqe_desc_list)) != NULL) { atomic_add_32(&wq->wq_free, wqed->wqe_cnt); oce_free_wqed(wq, wqed); } oce_drain_eq(wq->cq->eq); } /* oce_stop_wq */