/* * vmxnet3_tx_complete -- * * Parse a transmit queue and complete packets. * * Results: * B_TRUE if Tx must be updated or B_FALSE if no action is required. * * Side effects: * None. */ boolean_t vmxnet3_tx_complete(vmxnet3_softc_t *dp, vmxnet3_txqueue_t *txq) { vmxnet3_cmdring_t *cmdRing = &txq->cmdRing; vmxnet3_compring_t *compRing = &txq->compRing; Vmxnet3_GenericDesc *compDesc; boolean_t completedTx = B_FALSE; boolean_t ret = B_FALSE; mutex_enter(&dp->txLock); compDesc = VMXNET3_GET_DESC(compRing, compRing->next2comp); while (compDesc->tcd.gen == compRing->gen) { vmxnet3_metatx_t *sopMetaDesc, *eopMetaDesc; uint16_t sopIdx, eopIdx; mblk_t *mp; eopIdx = compDesc->tcd.txdIdx; eopMetaDesc = &txq->metaRing[eopIdx]; sopIdx = eopMetaDesc->sopIdx; sopMetaDesc = &txq->metaRing[sopIdx]; ASSERT(eopMetaDesc->frags); cmdRing->avail += eopMetaDesc->frags; ASSERT(sopMetaDesc->mp); mp = sopMetaDesc->mp; freemsg(mp); eopMetaDesc->sopIdx = 0; eopMetaDesc->frags = 0; sopMetaDesc->mp = NULL; completedTx = B_TRUE; vmxnet3_debug(dp, 3, "cp 0x%p on [%u;%u]\n", mp, sopIdx, eopIdx); VMXNET3_INC_RING_IDX(compRing, compRing->next2comp); compDesc = VMXNET3_GET_DESC(compRing, compRing->next2comp); } if (dp->txMustResched && completedTx) { dp->txMustResched = B_FALSE; ret = B_TRUE; } mutex_exit(&dp->txLock); return (ret); }
/* * Parse a transmit queue and complete packets. */ boolean_t vmxnet3s_tx_complete(vmxnet3s_softc_t *dp, vmxnet3s_txq_t *txq) { vmxnet3s_cmdring_t *cmdring = &txq->cmdring; vmxnet3s_compring_t *compring = &txq->compring; vmxnet3s_gendesc_t *compdesc; boolean_t completedtx = B_FALSE; boolean_t ret = B_FALSE; mutex_enter(&dp->txlock); compdesc = VMXNET3_GET_DESC(compring, compring->next2comp); while (compdesc->tcd.gen == compring->gen) { vmxnet3s_metatx_t *sopmetadesc, *eopmetadesc; uint16_t sopidx; uint16_t eopidx; mblk_t *mp; eopidx = compdesc->tcd.txdidx; eopmetadesc = &txq->metaring[eopidx]; sopidx = eopmetadesc->sopidx; sopmetadesc = &txq->metaring[sopidx]; ASSERT(eopmetadesc->frags); cmdring->avail += eopmetadesc->frags; ASSERT(sopmetadesc->mp); mp = sopmetadesc->mp; freemsg(mp); eopmetadesc->sopidx = 0; eopmetadesc->frags = 0; sopmetadesc->mp = NULL; completedtx = B_TRUE; VMXNET3_INC_RING_IDX(compring, compring->next2comp); compdesc = VMXNET3_GET_DESC(compring, compring->next2comp); } if (dp->txmustresched && completedtx) { dp->txmustresched = B_FALSE; ret = B_TRUE; } mutex_exit(&dp->txlock); return (ret); }
/* * Populate a Rx descriptor with a new rxbuf. */ static int vmxnet3s_rx_populate(vmxnet3s_softc_t *dp, vmxnet3s_rxq_t *rxq, uint16_t idx, boolean_t cansleep) { int ret = DDI_SUCCESS; vmxnet3s_rxbuf_t *rxbuf; if ((rxbuf = vmxnet3s_get_rxbuf(dp, cansleep)) != NULL) { vmxnet3s_cmdring_t *cmdring = &rxq->cmdring; vmxnet3s_gendesc_t *rxdesc = VMXNET3_GET_DESC(cmdring, idx); rxq->bufring[idx].rxbuf = rxbuf; rxdesc->rxd.addr = rxbuf->dma.bufpa; rxdesc->rxd.len = rxbuf->dma.buflen; /* rxdesc->rxd.btype = 0; */ membar_producer(); rxdesc->rxd.gen = cmdring->gen; } else { ret = DDI_FAILURE; } return (ret); }
/* *--------------------------------------------------------------------------- * * vmxnet3_tx_one -- * * Map a msg into the Tx command ring of a vmxnet3 device. * * Results: * VMXNET3_TX_OK if everything went well. * VMXNET3_TX_RINGFULL if the ring is nearly full. * VMXNET3_TX_PULLUP if the msg is overfragmented. * VMXNET3_TX_FAILURE if there was a DMA or offload error. * * Side effects: * The ring is filled if VMXNET3_TX_OK is returned. * *--------------------------------------------------------------------------- */ static vmxnet3_txstatus vmxnet3_tx_one(vmxnet3_softc_t *dp, vmxnet3_txqueue_t *txq, vmxnet3_offload_t *ol, mblk_t *mp, boolean_t retry) { int ret = VMXNET3_TX_OK; unsigned int frags = 0, totLen = 0; vmxnet3_cmdring_t *cmdRing = &txq->cmdRing; Vmxnet3_TxQueueCtrl *txqCtrl = txq->sharedCtrl; Vmxnet3_GenericDesc *txDesc; uint16_t sopIdx, eopIdx; uint8_t sopGen, curGen; mblk_t *mblk; mutex_enter(&dp->txLock); sopIdx = eopIdx = cmdRing->next2fill; sopGen = cmdRing->gen; curGen = !cmdRing->gen; for (mblk = mp; mblk != NULL; mblk = mblk->b_cont) { unsigned int len = MBLKL(mblk); ddi_dma_cookie_t cookie; uint_t cookieCount; if (len) { totLen += len; } else { continue; } if (ddi_dma_addr_bind_handle(dp->txDmaHandle, NULL, (caddr_t) mblk->b_rptr, len, DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_DONTWAIT, NULL, &cookie, &cookieCount) != DDI_DMA_MAPPED) { VMXNET3_WARN(dp, "ddi_dma_addr_bind_handle() failed\n"); ret = VMXNET3_TX_FAILURE; goto error; } ASSERT(cookieCount); do { uint64_t addr = cookie.dmac_laddress; size_t len = cookie.dmac_size; do { uint32_t dw2, dw3; size_t chunkLen; ASSERT(!txq->metaRing[eopIdx].mp); ASSERT(cmdRing->avail - frags); if (frags >= cmdRing->size - 1 || (ol->om != VMXNET3_OM_TSO && frags >= VMXNET3_MAX_TXD_PER_PKT)) { if (retry) { VMXNET3_DEBUG(dp, 2, "overfragmented, frags=%u ring=%hu om=%hu\n", frags, cmdRing->size, ol->om); } ddi_dma_unbind_handle(dp->txDmaHandle); ret = VMXNET3_TX_PULLUP; goto error; } if (cmdRing->avail - frags <= 1) { dp->txMustResched = B_TRUE; ddi_dma_unbind_handle(dp->txDmaHandle); ret = VMXNET3_TX_RINGFULL; goto error; } if (len > VMXNET3_MAX_TX_BUF_SIZE) { chunkLen = VMXNET3_MAX_TX_BUF_SIZE; } else { chunkLen = len; } frags++; eopIdx = cmdRing->next2fill; txDesc = VMXNET3_GET_DESC(cmdRing, eopIdx); ASSERT(txDesc->txd.gen != cmdRing->gen); // txd.addr txDesc->txd.addr = addr; // txd.dw2 dw2 = chunkLen == VMXNET3_MAX_TX_BUF_SIZE ? 0 : chunkLen; dw2 |= curGen << VMXNET3_TXD_GEN_SHIFT; txDesc->dword[2] = dw2; ASSERT(txDesc->txd.len == len || txDesc->txd.len == 0); // txd.dw3 dw3 = 0; txDesc->dword[3] = dw3; VMXNET3_INC_RING_IDX(cmdRing, cmdRing->next2fill); curGen = cmdRing->gen; addr += chunkLen; len -= chunkLen; } while (len); if (--cookieCount) { ddi_dma_nextcookie(dp->txDmaHandle, &cookie); } } while (cookieCount); ddi_dma_unbind_handle(dp->txDmaHandle); } /* Update the EOP descriptor */ txDesc = VMXNET3_GET_DESC(cmdRing, eopIdx); txDesc->dword[3] |= VMXNET3_TXD_CQ | VMXNET3_TXD_EOP; /* Update the SOP descriptor. Must be done last */ txDesc = VMXNET3_GET_DESC(cmdRing, sopIdx); if (ol->om == VMXNET3_OM_TSO && txDesc->txd.len != 0 && txDesc->txd.len < ol->hlen) { ret = VMXNET3_TX_FAILURE; goto error; } txDesc->txd.om = ol->om; txDesc->txd.hlen = ol->hlen; txDesc->txd.msscof = ol->msscof; membar_producer(); txDesc->txd.gen = sopGen; /* Update the meta ring & metadata */ txq->metaRing[sopIdx].mp = mp; txq->metaRing[eopIdx].sopIdx = sopIdx; txq->metaRing[eopIdx].frags = frags; cmdRing->avail -= frags; if (ol->om == VMXNET3_OM_TSO) { txqCtrl->txNumDeferred += (totLen - ol->hlen + ol->msscof - 1) / ol->msscof; } else { txqCtrl->txNumDeferred++; } VMXNET3_DEBUG(dp, 3, "tx 0x%p on [%u;%u]\n", mp, sopIdx, eopIdx); goto done; error: /* Reverse the generation bits */ while (sopIdx != cmdRing->next2fill) { VMXNET3_DEC_RING_IDX(cmdRing, cmdRing->next2fill); txDesc = VMXNET3_GET_DESC(cmdRing, cmdRing->next2fill); txDesc->txd.gen = !cmdRing->gen; } done: mutex_exit(&dp->txLock); return ret; }
/* *--------------------------------------------------------------------------- * * vmxnet3_tx_prepare_offload -- * * Build the offload context of a msg. * * Results: * 0 if everything went well. * +n if n bytes need to be pulled up. * -1 in case of error (not used). * * Side effects: * None. * *--------------------------------------------------------------------------- */ static int vmxnet3_tx_prepare_offload(vmxnet3_softc_t *dp, vmxnet3_offload_t *ol, mblk_t *mp) { int ret = 0; uint32_t start, stuff, value, flags; #if defined(OPEN_SOLARIS) || defined(SOL11) uint32_t lso_flag, mss; #endif ol->om = VMXNET3_OM_NONE; ol->hlen = 0; ol->msscof = 0; hcksum_retrieve(mp, NULL, NULL, &start, &stuff, NULL, &value, &flags); #if defined(OPEN_SOLARIS) || defined(SOL11) mac_lso_get(mp, &mss, &lso_flag); if (flags || lso_flag) { #else if (flags) { #endif struct ether_vlan_header *eth = (void *) mp->b_rptr; uint8_t ethLen; if (eth->ether_tpid == htons(ETHERTYPE_VLAN)) { ethLen = sizeof(struct ether_vlan_header); } else { ethLen = sizeof(struct ether_header); } VMXNET3_DEBUG(dp, 4, "flags=0x%x, ethLen=%u, start=%u, stuff=%u, value=%u\n", flags, ethLen, start, stuff, value); #if defined(OPEN_SOLARIS) || defined(SOL11) if (lso_flag & HW_LSO) { #else if (flags & HCK_PARTIALCKSUM) { ol->om = VMXNET3_OM_CSUM; ol->hlen = start + ethLen; ol->msscof = stuff + ethLen; } if (flags & HW_LSO) { #endif mblk_t *mblk = mp; uint8_t *ip, *tcp; uint8_t ipLen, tcpLen; /* * Copy e1000g's behavior: * - Do not assume all the headers are in the same mblk. * - Assume each header is always within one mblk. * - Assume the ethernet header is in the first mblk. */ ip = mblk->b_rptr + ethLen; if (ip >= mblk->b_wptr) { mblk = mblk->b_cont; ip = mblk->b_rptr; } ipLen = IPH_HDR_LENGTH((ipha_t *) ip); tcp = ip + ipLen; if (tcp >= mblk->b_wptr) { mblk = mblk->b_cont; tcp = mblk->b_rptr; } tcpLen = TCP_HDR_LENGTH((tcph_t *) tcp); if (tcp + tcpLen > mblk->b_wptr) { // careful, '>' instead of '>=' here mblk = mblk->b_cont; } ol->om = VMXNET3_OM_TSO; ol->hlen = ethLen + ipLen + tcpLen; #if defined(OPEN_SOLARIS) || defined(SOL11) ol->msscof = mss; #else /* OpenSolaris fills 'value' with the MSS but Solaris doesn't. */ ol->msscof = DB_LSOMSS(mp); #endif if (mblk != mp) { ret = ol->hlen; } } #if defined(OPEN_SOLARIS) || defined(SOL11) else if (flags & HCK_PARTIALCKSUM) { ol->om = VMXNET3_OM_CSUM; ol->hlen = start + ethLen; ol->msscof = stuff + ethLen; } #endif } return ret; } /* *--------------------------------------------------------------------------- * * vmxnet3_tx_one -- * * Map a msg into the Tx command ring of a vmxnet3 device. * * Results: * VMXNET3_TX_OK if everything went well. * VMXNET3_TX_RINGFULL if the ring is nearly full. * VMXNET3_TX_PULLUP if the msg is overfragmented. * VMXNET3_TX_FAILURE if there was a DMA or offload error. * * Side effects: * The ring is filled if VMXNET3_TX_OK is returned. * *--------------------------------------------------------------------------- */ static vmxnet3_txstatus vmxnet3_tx_one(vmxnet3_softc_t *dp, vmxnet3_txqueue_t *txq, vmxnet3_offload_t *ol, mblk_t *mp, boolean_t retry) { int ret = VMXNET3_TX_OK; unsigned int frags = 0, totLen = 0; vmxnet3_cmdring_t *cmdRing = &txq->cmdRing; Vmxnet3_TxQueueCtrl *txqCtrl = txq->sharedCtrl; Vmxnet3_GenericDesc *txDesc; uint16_t sopIdx, eopIdx; uint8_t sopGen, curGen; mblk_t *mblk; mutex_enter(&dp->txLock); sopIdx = eopIdx = cmdRing->next2fill; sopGen = cmdRing->gen; curGen = !cmdRing->gen; for (mblk = mp; mblk != NULL; mblk = mblk->b_cont) { unsigned int len = MBLKL(mblk); ddi_dma_cookie_t cookie; uint_t cookieCount; if (len) { totLen += len; } else { continue; } if (ddi_dma_addr_bind_handle(dp->txDmaHandle, NULL, (caddr_t) mblk->b_rptr, len, DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_DONTWAIT, NULL, &cookie, &cookieCount) != DDI_DMA_MAPPED) { VMXNET3_WARN(dp, "ddi_dma_addr_bind_handle() failed\n"); ret = VMXNET3_TX_FAILURE; goto error; } ASSERT(cookieCount); do { uint64_t addr = cookie.dmac_laddress; size_t len = cookie.dmac_size; do { uint32_t dw2, dw3; size_t chunkLen; ASSERT(!txq->metaRing[eopIdx].mp); ASSERT(cmdRing->avail - frags); if (frags >= cmdRing->size - 1 || (ol->om != VMXNET3_OM_TSO && frags >= VMXNET3_MAX_TXD_PER_PKT)) { if (retry) { VMXNET3_DEBUG(dp, 2, "overfragmented, frags=%u ring=%hu om=%hu\n", frags, cmdRing->size, ol->om); } ddi_dma_unbind_handle(dp->txDmaHandle); ret = VMXNET3_TX_PULLUP; goto error; } if (cmdRing->avail - frags <= 1) { dp->txMustResched = B_TRUE; ddi_dma_unbind_handle(dp->txDmaHandle); ret = VMXNET3_TX_RINGFULL; goto error; } if (len > VMXNET3_MAX_TX_BUF_SIZE) { chunkLen = VMXNET3_MAX_TX_BUF_SIZE; } else { chunkLen = len; } frags++; eopIdx = cmdRing->next2fill; txDesc = VMXNET3_GET_DESC(cmdRing, eopIdx); ASSERT(txDesc->txd.gen != cmdRing->gen); // txd.addr txDesc->txd.addr = addr; // txd.dw2 dw2 = chunkLen == VMXNET3_MAX_TX_BUF_SIZE ? 0 : chunkLen; dw2 |= curGen << VMXNET3_TXD_GEN_SHIFT; txDesc->dword[2] = dw2; ASSERT(txDesc->txd.len == len || txDesc->txd.len == 0); // txd.dw3 dw3 = 0; txDesc->dword[3] = dw3; VMXNET3_INC_RING_IDX(cmdRing, cmdRing->next2fill); curGen = cmdRing->gen; addr += chunkLen; len -= chunkLen; } while (len); if (--cookieCount) { ddi_dma_nextcookie(dp->txDmaHandle, &cookie); } } while (cookieCount); ddi_dma_unbind_handle(dp->txDmaHandle); } /* Update the EOP descriptor */ txDesc = VMXNET3_GET_DESC(cmdRing, eopIdx); txDesc->dword[3] |= VMXNET3_TXD_CQ | VMXNET3_TXD_EOP; /* Update the SOP descriptor. Must be done last */ txDesc = VMXNET3_GET_DESC(cmdRing, sopIdx); if (ol->om == VMXNET3_OM_TSO && txDesc->txd.len != 0 && txDesc->txd.len < ol->hlen) { ret = VMXNET3_TX_FAILURE; goto error; } txDesc->txd.om = ol->om; txDesc->txd.hlen = ol->hlen; txDesc->txd.msscof = ol->msscof; membar_producer(); txDesc->txd.gen = sopGen; /* Update the meta ring & metadata */ txq->metaRing[sopIdx].mp = mp; txq->metaRing[eopIdx].sopIdx = sopIdx; txq->metaRing[eopIdx].frags = frags; cmdRing->avail -= frags; if (ol->om == VMXNET3_OM_TSO) { txqCtrl->txNumDeferred += (totLen - ol->hlen + ol->msscof - 1) / ol->msscof; } else { txqCtrl->txNumDeferred++; } VMXNET3_DEBUG(dp, 3, "tx 0x%p on [%u;%u]\n", mp, sopIdx, eopIdx); goto done; error: /* Reverse the generation bits */ while (sopIdx != cmdRing->next2fill) { VMXNET3_DEC_RING_IDX(cmdRing, cmdRing->next2fill); txDesc = VMXNET3_GET_DESC(cmdRing, cmdRing->next2fill); txDesc->txd.gen = !cmdRing->gen; } done: mutex_exit(&dp->txLock); return ret; } /* *--------------------------------------------------------------------------- * * vmxnet3_tx -- * * Send packets on a vmxnet3 device. * * Results: * NULL in case of success or failure. * The mps to be retransmitted later if the ring is full. * * Side effects: * None. * *--------------------------------------------------------------------------- */ mblk_t * vmxnet3_tx(void *data, mblk_t *mps) { vmxnet3_softc_t *dp = data; vmxnet3_txqueue_t *txq = &dp->txQueue; vmxnet3_cmdring_t *cmdRing = &txq->cmdRing; Vmxnet3_TxQueueCtrl *txqCtrl = txq->sharedCtrl; vmxnet3_txstatus status = VMXNET3_TX_OK; mblk_t *mp; ASSERT(mps != NULL); do { vmxnet3_offload_t ol; int pullup; mp = mps; mps = mp->b_next; mp->b_next = NULL; if (DB_TYPE(mp) != M_DATA) { /* * PR #315560: Solaris might pass M_PROTO mblks for some reason. * Drop them because we don't understand them and because their * contents are not Ethernet frames anyway. */ ASSERT(B_FALSE); freemsg(mp); continue; } /* * Prepare the offload while we're still handling the original * message -- msgpullup() discards the metadata afterwards. */ pullup = vmxnet3_tx_prepare_offload(dp, &ol, mp); if (pullup) { mblk_t *new_mp = msgpullup(mp, pullup); freemsg(mp); if (new_mp) { mp = new_mp; } else { continue; } } /* * Try to map the message in the Tx ring. * This call might fail for non-fatal reasons. */ status = vmxnet3_tx_one(dp, txq, &ol, mp, B_FALSE); if (status == VMXNET3_TX_PULLUP) { /* * Try one more time after flattening * the message with msgpullup(). */ if (mp->b_cont != NULL) { mblk_t *new_mp = msgpullup(mp, -1); freemsg(mp); if (new_mp) { mp = new_mp; status = vmxnet3_tx_one(dp, txq, &ol, mp, B_TRUE); } else { continue; } } } if (status != VMXNET3_TX_OK && status != VMXNET3_TX_RINGFULL) { /* Fatal failure, drop it */ freemsg(mp); } } while (mps && status != VMXNET3_TX_RINGFULL); if (status == VMXNET3_TX_RINGFULL) { mp->b_next = mps; mps = mp; } else { ASSERT(!mps); } /* Notify the device */ mutex_enter(&dp->txLock); if (txqCtrl->txNumDeferred >= txqCtrl->txThreshold) { txqCtrl->txNumDeferred = 0; VMXNET3_BAR0_PUT32(dp, VMXNET3_REG_TXPROD, cmdRing->next2fill); } mutex_exit(&dp->txLock); return mps; }
/* * Interrupt handler for Rx. Look if there are any pending Rx and * put them in mplist. */ mblk_t * vmxnet3s_rx_intr(vmxnet3s_softc_t *dp, vmxnet3s_rxq_t *rxq) { vmxnet3s_compring_t *compring = &rxq->compring; vmxnet3s_cmdring_t *cmdring = &rxq->cmdring; vmxnet3s_rxqctrl_t *rxqctrl = rxq->sharedctrl; vmxnet3s_gendesc_t *compdesc; mblk_t *mplist = NULL; mblk_t **mplisttail = &mplist; ASSERT(mutex_owned(&dp->intrlock)); compdesc = VMXNET3_GET_DESC(compring, compring->next2comp); while (compdesc->rcd.gen == compring->gen) { mblk_t *mp = NULL; mblk_t **mptail = ∓ boolean_t mpvalid = B_TRUE; boolean_t eop; ASSERT(compdesc->rcd.sop); do { uint16_t rxdidx = compdesc->rcd.rxdidx; vmxnet3s_rxbuf_t *rxbuf = rxq->bufring[rxdidx].rxbuf; mblk_t *mblk = rxbuf->mblk; vmxnet3s_gendesc_t *rxdesc; while (compdesc->rcd.gen != compring->gen) { /* * H/W may be still be in the middle of * generating this entry, so hold on until * the gen bit is flipped. */ membar_consumer(); } ASSERT(compdesc->rcd.gen == compring->gen); ASSERT(rxbuf); ASSERT(mblk); /* Some Rx descriptors may have been skipped */ while (cmdring->next2fill != rxdidx) { rxdesc = VMXNET3_GET_DESC(cmdring, cmdring->next2fill); rxdesc->rxd.gen = cmdring->gen; VMXNET3_INC_RING_IDX(cmdring, cmdring->next2fill); } eop = compdesc->rcd.eop; /* * Now we have a piece of the packet in the rxdidx * descriptor. Grab it only if we achieve to replace * it with a fresh buffer. */ if (vmxnet3s_rx_populate(dp, rxq, rxdidx, B_FALSE) == DDI_SUCCESS) { /* Success, we can chain the mblk with the mp */ mblk->b_wptr = mblk->b_rptr + compdesc->rcd.len; *mptail = mblk; mptail = &mblk->b_cont; ASSERT(*mptail == NULL); if (eop) { if (!compdesc->rcd.err) { /* * Tag the mp if it was * checksummed by the H/W */ vmxnet3s_rx_hwcksum(dp, mp, compdesc); } else { mpvalid = B_FALSE; } } } else { /* * Keep the same buffer, we still need to flip * the gen bit */ rxdesc = VMXNET3_GET_DESC(cmdring, rxdidx); rxdesc->rxd.gen = cmdring->gen; mpvalid = B_FALSE; } VMXNET3_INC_RING_IDX(compring, compring->next2comp); VMXNET3_INC_RING_IDX(cmdring, cmdring->next2fill); compdesc = VMXNET3_GET_DESC(compring, compring->next2comp); } while (!eop); if (mp) { if (mpvalid) { *mplisttail = mp; mplisttail = &mp->b_next; ASSERT(*mplisttail == NULL); } else { /* This message got holes, drop it */ freemsg(mp); } } } if (rxqctrl->updaterxprod) { uint32_t rxprod; /* * All buffers are actually available, but we can't tell that to * the device because it may interpret that as an empty ring. * So skip one buffer. */ if (cmdring->next2fill) rxprod = cmdring->next2fill - 1; else rxprod = cmdring->size - 1; VMXNET3_BAR0_PUT32(dp, VMXNET3_REG_RXPROD, rxprod); } return (mplist); }
/* * Map a msg into the Tx command ring of a vmxnet3s device. */ static vmxnet3s_txstatus_t vmxnet3s_tx_one(vmxnet3s_softc_t *dp, vmxnet3s_txq_t *txq, vmxnet3s_offload_t *ol, mblk_t *mp, int to_copy) { int ret = VMXNET3_TX_OK; uint_t frags = 0, totlen = 0; vmxnet3s_cmdring_t *cmdring = &txq->cmdring; vmxnet3s_txqctrl_t *txqctrl = txq->sharedctrl; vmxnet3s_gendesc_t *txdesc; uint16_t sopidx; uint16_t eopidx; uint8_t sopgen; uint8_t curgen; mblk_t *mblk; uint_t len; size_t offset = 0; mutex_enter(&dp->txlock); sopidx = eopidx = cmdring->next2fill; sopgen = cmdring->gen; curgen = !cmdring->gen; mblk = mp; len = MBLKL(mblk); if (to_copy) { uint32_t dw2; uint32_t dw3; ASSERT(len >= to_copy); if (cmdring->avail <= 1) { dp->txmustresched = B_TRUE; ret = VMXNET3_TX_RINGFULL; goto error; } totlen += to_copy; len -= to_copy; offset = to_copy; bcopy(mblk->b_rptr, dp->txcache.nodes[sopidx].va, to_copy); eopidx = cmdring->next2fill; txdesc = VMXNET3_GET_DESC(cmdring, eopidx); ASSERT(txdesc->txd.gen != cmdring->gen); txdesc->txd.addr = dp->txcache.nodes[sopidx].pa; dw2 = to_copy; dw2 |= curgen << VMXNET3_TXD_GEN_SHIFT; txdesc->dword[2] = dw2; ASSERT(txdesc->txd.len == to_copy || txdesc->txd.len == 0); dw3 = 0; txdesc->dword[3] = dw3; VMXNET3_INC_RING_IDX(cmdring, cmdring->next2fill); curgen = cmdring->gen; frags++; } for (; mblk != NULL; mblk = mblk->b_cont, len = mblk ? MBLKL(mblk) : 0, offset = 0) { ddi_dma_cookie_t cookie; uint_t cookiecount; if (len) totlen += len; else continue; if (ddi_dma_addr_bind_handle(dp->txdmahdl, NULL, (caddr_t)mblk->b_rptr + offset, len, DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_DONTWAIT, NULL, &cookie, &cookiecount) != DDI_DMA_MAPPED) { ret = VMXNET3_TX_FAILURE; goto error; } ASSERT(cookiecount); do { uint64_t addr = cookie.dmac_laddress; size_t len = cookie.dmac_size; do { uint32_t dw2; uint32_t dw3; size_t chunklen; ASSERT(!txq->metaring[eopidx].mp); ASSERT(cmdring->avail - frags); if (frags >= cmdring->size - 1 || (ol->om != VMXNET3_OM_TSO && frags >= VMXNET3_MAX_TXD_PER_PKT)) { (void) ddi_dma_unbind_handle( dp->txdmahdl); ret = VMXNET3_TX_PULLUP; goto error; } if (cmdring->avail - frags <= 1) { dp->txmustresched = B_TRUE; (void) ddi_dma_unbind_handle( dp->txdmahdl); ret = VMXNET3_TX_RINGFULL; goto error; } if (len > VMXNET3_MAX_TX_BUF_SIZE) chunklen = VMXNET3_MAX_TX_BUF_SIZE; else chunklen = len; frags++; eopidx = cmdring->next2fill; txdesc = VMXNET3_GET_DESC(cmdring, eopidx); ASSERT(txdesc->txd.gen != cmdring->gen); /* txd.addr */ txdesc->txd.addr = addr; /* txd.dw2 */ dw2 = chunklen == VMXNET3_MAX_TX_BUF_SIZE ? 0 : chunklen; dw2 |= curgen << VMXNET3_TXD_GEN_SHIFT; txdesc->dword[2] = dw2; ASSERT(txdesc->txd.len == len || txdesc->txd.len == 0); /* txd.dw3 */ dw3 = 0; txdesc->dword[3] = dw3; VMXNET3_INC_RING_IDX(cmdring, cmdring->next2fill); curgen = cmdring->gen; addr += chunklen; len -= chunklen; } while (len); if (--cookiecount) ddi_dma_nextcookie(dp->txdmahdl, &cookie); } while (cookiecount); (void) ddi_dma_unbind_handle(dp->txdmahdl); } /* Update the EOP descriptor */ txdesc = VMXNET3_GET_DESC(cmdring, eopidx); txdesc->dword[3] |= VMXNET3_TXD_CQ | VMXNET3_TXD_EOP; /* Update the SOP descriptor. Must be done last */ txdesc = VMXNET3_GET_DESC(cmdring, sopidx); if (ol->om == VMXNET3_OM_TSO && txdesc->txd.len != 0 && txdesc->txd.len < ol->hlen) { ret = VMXNET3_TX_FAILURE; goto error; } txdesc->txd.om = ol->om; txdesc->txd.hlen = ol->hlen; txdesc->txd.msscof = ol->msscof; membar_producer(); txdesc->txd.gen = sopgen; /* Update the meta ring & metadata */ txq->metaring[sopidx].mp = mp; txq->metaring[eopidx].sopidx = sopidx; txq->metaring[eopidx].frags = frags; cmdring->avail -= frags; if (ol->om == VMXNET3_OM_TSO) { txqctrl->txnumdeferred += (totlen - ol->hlen + ol->msscof - 1) / ol->msscof; } else { txqctrl->txnumdeferred++; } goto done; error: /* Reverse the generation bits */ while (sopidx != cmdring->next2fill) { VMXNET3_DEC_RING_IDX(cmdring, cmdring->next2fill); txdesc = VMXNET3_GET_DESC(cmdring, cmdring->next2fill); txdesc->txd.gen = !cmdring->gen; } done: mutex_exit(&dp->txlock); return (ret); }