/* * Hashing policy for load balancing over the set of TX rings * available to the driver. */ mblk_t * oce_m_send(void *arg, mblk_t *mp) { struct oce_dev *dev = arg; mblk_t *nxt_pkt; mblk_t *rmp = NULL; struct oce_wq *wq; DEV_LOCK(dev); if (dev->suspended || !(dev->state & STATE_MAC_STARTED)) { DEV_UNLOCK(dev); freemsg(mp); return (NULL); } DEV_UNLOCK(dev); /* * Hash to pick a wq */ wq = oce_get_wq(dev, mp); while (mp != NULL) { /* Save the Pointer since mp will be freed in case of copy */ nxt_pkt = mp->b_next; mp->b_next = NULL; /* Hardcode wq since we have only one */ rmp = oce_send_packet(wq, mp); if (rmp != NULL) { /* reschedule Tx */ wq->resched = B_TRUE; oce_arm_cq(dev, wq->cq->cq_id, 0, B_TRUE); /* restore the chain */ rmp->b_next = nxt_pkt; break; } mp = nxt_pkt; } return (rmp); } /* oce_send */
mblk_t * oce_ring_tx(void *ring_handle, mblk_t *mp) { struct oce_wq *wq = ring_handle; mblk_t *nxt_pkt; mblk_t *rmp = NULL; struct oce_dev *dev = wq->parent; if (dev->suspended) { freemsg(mp); return (NULL); } while (mp != NULL) { /* Save the Pointer since mp will be freed in case of copy */ nxt_pkt = mp->b_next; mp->b_next = NULL; /* Hardcode wq since we have only one */ rmp = oce_send_packet(wq, mp); if (rmp != NULL) { /* restore the chain */ rmp->b_next = nxt_pkt; break; } mp = nxt_pkt; } if (wq->resched) { if (atomic_cas_uint(&wq->qmode, OCE_MODE_POLL, OCE_MODE_INTR) == OCE_MODE_POLL) { oce_arm_cq(wq->parent, wq->cq->cq_id, 0, B_TRUE); wq->last_armed = ddi_get_lbolt(); } } return (rmp); }