Example #1
0
/*
 *---------------------------------------------------------------------------
 *
 * vmxnet3_tx_prepare_offload --
 *
 *    Build the offload context of a msg.
 *
 * Results:
 *    0 if everything went well.
 *    +n if n bytes need to be pulled up.
 *    -1 in case of error (not used).
 *
 * Side effects:
 *    None.
 *
 *---------------------------------------------------------------------------
 */
static int
vmxnet3_tx_prepare_offload(vmxnet3_softc_t *dp,
                           vmxnet3_offload_t *ol,
                           mblk_t *mp)
{
   int ret = 0;
   uint32_t start, stuff, value, flags, lso_flag, mss;

   ol->om = VMXNET3_OM_NONE;
   ol->hlen = 0;
   ol->msscof = 0;

   hcksum_retrieve(mp, NULL, NULL, &start, &stuff, NULL, &value, &flags);
   mac_lso_get(mp, &mss, &lso_flag);

   if (flags || lso_flag) {
      struct ether_vlan_header *eth = (void *) mp->b_rptr;
      uint8_t ethLen;

      if (eth->ether_tpid == htons(ETHERTYPE_VLAN)) {
         ethLen = sizeof(struct ether_vlan_header);
      } else {
         ethLen = sizeof(struct ether_header);
      }

      VMXNET3_DEBUG(dp, 4, "flags=0x%x, ethLen=%u, start=%u, stuff=%u, value=%u\n",
                            flags,      ethLen,    start,    stuff,    value);

      if (lso_flag & HW_LSO) {
         mblk_t *mblk = mp;
         uint8_t *ip, *tcp;
         uint8_t ipLen, tcpLen;

         /*
          * Copy e1000g's behavior:
          * - Do not assume all the headers are in the same mblk.
          * - Assume each header is always within one mblk.
          * - Assume the ethernet header is in the first mblk.
          */
         ip = mblk->b_rptr + ethLen;
         if (ip >= mblk->b_wptr) {
            mblk = mblk->b_cont;
            ip = mblk->b_rptr;
         }
         ipLen = IPH_HDR_LENGTH((ipha_t *) ip);
         tcp = ip + ipLen;
         if (tcp >= mblk->b_wptr) {
            mblk = mblk->b_cont;
            tcp = mblk->b_rptr;
         }
         tcpLen = TCP_HDR_LENGTH((tcph_t *) tcp);
         if (tcp + tcpLen > mblk->b_wptr) { // careful, '>' instead of '>=' here
            mblk = mblk->b_cont;
         }

         ol->om = VMXNET3_OM_TSO;
         ol->hlen = ethLen + ipLen + tcpLen;
         ol->msscof = mss;

         if (mblk != mp) {
            ret = ol->hlen;
         }
      } else if (flags & HCK_PARTIALCKSUM) {
         ol->om = VMXNET3_OM_CSUM;
         ol->hlen = start + ethLen;
         ol->msscof = stuff + ethLen;
      }

   }

   return ret;
}
Example #2
0
/*
 *---------------------------------------------------------------------------
 *
 * vmxnet3_tx_prepare_offload --
 *
 *    Build the offload context of a msg.
 *
 * Results:
 *    0 if everything went well.
 *    +n if n bytes need to be pulled up.
 *    -1 in case of error (not used).
 *
 * Side effects:
 *    None.
 *
 *---------------------------------------------------------------------------
 */
static int
vmxnet3_tx_prepare_offload(vmxnet3_softc_t *dp,
                           vmxnet3_offload_t *ol,
                           mblk_t *mp)
{
   int ret = 0;
   uint32_t start, stuff, value, flags;
#if defined(OPEN_SOLARIS) || defined(SOL11)
   uint32_t lso_flag, mss;
#endif

   ol->om = VMXNET3_OM_NONE;
   ol->hlen = 0;
   ol->msscof = 0;

   hcksum_retrieve(mp, NULL, NULL, &start, &stuff, NULL, &value, &flags);
#if defined(OPEN_SOLARIS) || defined(SOL11)
   mac_lso_get(mp, &mss, &lso_flag);

   if (flags || lso_flag) {
#else
   if (flags) {
#endif
      struct ether_vlan_header *eth = (void *) mp->b_rptr;
      uint8_t ethLen;

      if (eth->ether_tpid == htons(ETHERTYPE_VLAN)) {
         ethLen = sizeof(struct ether_vlan_header);
      } else {
         ethLen = sizeof(struct ether_header);
      }

      VMXNET3_DEBUG(dp, 4, "flags=0x%x, ethLen=%u, start=%u, stuff=%u, value=%u\n",
                            flags,      ethLen,    start,    stuff,    value);

#if defined(OPEN_SOLARIS) || defined(SOL11)
      if (lso_flag & HW_LSO) {
#else
      if (flags & HCK_PARTIALCKSUM) {
         ol->om = VMXNET3_OM_CSUM;
         ol->hlen = start + ethLen;
         ol->msscof = stuff + ethLen;
      }
      if (flags & HW_LSO) {
#endif
         mblk_t *mblk = mp;
         uint8_t *ip, *tcp;
         uint8_t ipLen, tcpLen;

         /*
          * Copy e1000g's behavior:
          * - Do not assume all the headers are in the same mblk.
          * - Assume each header is always within one mblk.
          * - Assume the ethernet header is in the first mblk.
          */
         ip = mblk->b_rptr + ethLen;
         if (ip >= mblk->b_wptr) {
            mblk = mblk->b_cont;
            ip = mblk->b_rptr;
         }
         ipLen = IPH_HDR_LENGTH((ipha_t *) ip);
         tcp = ip + ipLen;
         if (tcp >= mblk->b_wptr) {
            mblk = mblk->b_cont;
            tcp = mblk->b_rptr;
         }
         tcpLen = TCP_HDR_LENGTH((tcph_t *) tcp);
         if (tcp + tcpLen > mblk->b_wptr) { // careful, '>' instead of '>=' here
            mblk = mblk->b_cont;
         }

         ol->om = VMXNET3_OM_TSO;
         ol->hlen = ethLen + ipLen + tcpLen;
#if defined(OPEN_SOLARIS) || defined(SOL11)
         ol->msscof = mss;
#else
         /* OpenSolaris fills 'value' with the MSS but Solaris doesn't. */
         ol->msscof = DB_LSOMSS(mp);
#endif
         if (mblk != mp) {
            ret = ol->hlen;
         }
      }
#if defined(OPEN_SOLARIS) || defined(SOL11)
      else if (flags & HCK_PARTIALCKSUM) {
         ol->om = VMXNET3_OM_CSUM;
         ol->hlen = start + ethLen;
         ol->msscof = stuff + ethLen;
      }
#endif
   }

   return ret;
}

/*
 *---------------------------------------------------------------------------
 *
 * vmxnet3_tx_one --
 *
 *    Map a msg into the Tx command ring of a vmxnet3 device.
 *
 * Results:
 *    VMXNET3_TX_OK if everything went well.
 *    VMXNET3_TX_RINGFULL if the ring is nearly full.
 *    VMXNET3_TX_PULLUP if the msg is overfragmented.
 *    VMXNET3_TX_FAILURE if there was a DMA or offload error.
 *
 * Side effects:
 *    The ring is filled if VMXNET3_TX_OK is returned.
 *
 *---------------------------------------------------------------------------
 */
static vmxnet3_txstatus
vmxnet3_tx_one(vmxnet3_softc_t *dp,
               vmxnet3_txqueue_t *txq,
               vmxnet3_offload_t *ol,
               mblk_t *mp,
               boolean_t retry)
{
   int ret = VMXNET3_TX_OK;
   unsigned int frags = 0, totLen = 0;
   vmxnet3_cmdring_t *cmdRing = &txq->cmdRing;
   Vmxnet3_TxQueueCtrl *txqCtrl = txq->sharedCtrl;
   Vmxnet3_GenericDesc *txDesc;
   uint16_t sopIdx, eopIdx;
   uint8_t sopGen, curGen;
   mblk_t *mblk;

   mutex_enter(&dp->txLock);

   sopIdx = eopIdx = cmdRing->next2fill;
   sopGen = cmdRing->gen;
   curGen = !cmdRing->gen;

   for (mblk = mp; mblk != NULL; mblk = mblk->b_cont) {
      unsigned int len = MBLKL(mblk);
      ddi_dma_cookie_t cookie;
      uint_t cookieCount;

      if (len) {
         totLen += len;
      } else {
         continue;
      }

      if (ddi_dma_addr_bind_handle(dp->txDmaHandle, NULL,
                                   (caddr_t) mblk->b_rptr, len,
                                   DDI_DMA_RDWR | DDI_DMA_STREAMING,
                                   DDI_DMA_DONTWAIT, NULL,
                                   &cookie, &cookieCount) != DDI_DMA_MAPPED) {
         VMXNET3_WARN(dp, "ddi_dma_addr_bind_handle() failed\n");
         ret = VMXNET3_TX_FAILURE;
         goto error;
      }

      ASSERT(cookieCount);

      do {
         uint64_t addr = cookie.dmac_laddress;
         size_t len = cookie.dmac_size;

         do {
            uint32_t dw2, dw3;
            size_t chunkLen;

            ASSERT(!txq->metaRing[eopIdx].mp);
            ASSERT(cmdRing->avail - frags);

            if (frags >= cmdRing->size - 1 ||
                (ol->om != VMXNET3_OM_TSO && frags >= VMXNET3_MAX_TXD_PER_PKT)) {

               if (retry) {
                  VMXNET3_DEBUG(dp, 2, "overfragmented, frags=%u ring=%hu om=%hu\n",
                                frags, cmdRing->size, ol->om);
               }
               ddi_dma_unbind_handle(dp->txDmaHandle);
               ret = VMXNET3_TX_PULLUP;
               goto error;
            }
            if (cmdRing->avail - frags <= 1) {
               dp->txMustResched = B_TRUE;
               ddi_dma_unbind_handle(dp->txDmaHandle);
               ret = VMXNET3_TX_RINGFULL;
               goto error;
            }

            if (len > VMXNET3_MAX_TX_BUF_SIZE) {
               chunkLen = VMXNET3_MAX_TX_BUF_SIZE;
            } else {
               chunkLen = len;
            }

            frags++;
            eopIdx = cmdRing->next2fill;

            txDesc = VMXNET3_GET_DESC(cmdRing, eopIdx);
            ASSERT(txDesc->txd.gen != cmdRing->gen);

            // txd.addr
            txDesc->txd.addr = addr;
            // txd.dw2
            dw2 = chunkLen == VMXNET3_MAX_TX_BUF_SIZE ? 0 : chunkLen;
            dw2 |= curGen << VMXNET3_TXD_GEN_SHIFT;
            txDesc->dword[2] = dw2;
            ASSERT(txDesc->txd.len == len || txDesc->txd.len == 0);
            // txd.dw3
            dw3 = 0;
            txDesc->dword[3] = dw3;

            VMXNET3_INC_RING_IDX(cmdRing, cmdRing->next2fill);
            curGen = cmdRing->gen;

            addr += chunkLen;
            len -= chunkLen;
         } while (len);

         if (--cookieCount) {
            ddi_dma_nextcookie(dp->txDmaHandle, &cookie);
         }
      } while (cookieCount);

      ddi_dma_unbind_handle(dp->txDmaHandle);
   }

   /* Update the EOP descriptor */
   txDesc = VMXNET3_GET_DESC(cmdRing, eopIdx);
   txDesc->dword[3] |= VMXNET3_TXD_CQ | VMXNET3_TXD_EOP;

   /* Update the SOP descriptor. Must be done last */
   txDesc = VMXNET3_GET_DESC(cmdRing, sopIdx);
   if (ol->om == VMXNET3_OM_TSO &&
       txDesc->txd.len != 0 &&
       txDesc->txd.len < ol->hlen) {
      ret = VMXNET3_TX_FAILURE;
      goto error;
   }
   txDesc->txd.om = ol->om;
   txDesc->txd.hlen = ol->hlen;
   txDesc->txd.msscof = ol->msscof;
   membar_producer();
   txDesc->txd.gen = sopGen;

   /* Update the meta ring & metadata */
   txq->metaRing[sopIdx].mp = mp;
   txq->metaRing[eopIdx].sopIdx = sopIdx;
   txq->metaRing[eopIdx].frags = frags;
   cmdRing->avail -= frags;
   if (ol->om == VMXNET3_OM_TSO) {
      txqCtrl->txNumDeferred +=
         (totLen - ol->hlen + ol->msscof - 1) / ol->msscof;
   } else {
      txqCtrl->txNumDeferred++;
   }

   VMXNET3_DEBUG(dp, 3, "tx 0x%p on [%u;%u]\n", mp, sopIdx, eopIdx);

   goto done;

error:
   /* Reverse the generation bits */
   while (sopIdx != cmdRing->next2fill) {
      VMXNET3_DEC_RING_IDX(cmdRing, cmdRing->next2fill);
      txDesc = VMXNET3_GET_DESC(cmdRing, cmdRing->next2fill);
      txDesc->txd.gen = !cmdRing->gen;
   }

done:
   mutex_exit(&dp->txLock);

   return ret;
}

/*
 *---------------------------------------------------------------------------
 *
 * vmxnet3_tx --
 *
 *    Send packets on a vmxnet3 device.
 *
 * Results:
 *    NULL in case of success or failure.
 *    The mps to be retransmitted later if the ring is full.
 *
 * Side effects:
 *    None.
 *
 *---------------------------------------------------------------------------
 */
mblk_t *
vmxnet3_tx(void *data, mblk_t *mps)
{
   vmxnet3_softc_t *dp = data;
   vmxnet3_txqueue_t *txq = &dp->txQueue;
   vmxnet3_cmdring_t *cmdRing = &txq->cmdRing;
   Vmxnet3_TxQueueCtrl *txqCtrl = txq->sharedCtrl;
   vmxnet3_txstatus status = VMXNET3_TX_OK;
   mblk_t *mp;

   ASSERT(mps != NULL);

   do {
      vmxnet3_offload_t ol;
      int pullup;

      mp = mps;
      mps = mp->b_next;
      mp->b_next = NULL;

      if (DB_TYPE(mp) != M_DATA) {
         /*
          * PR #315560: Solaris might pass M_PROTO mblks for some reason.
          * Drop them because we don't understand them and because their
          * contents are not Ethernet frames anyway.
          */
         ASSERT(B_FALSE);
         freemsg(mp);
         continue;
      }

      /*
       * Prepare the offload while we're still handling the original
       * message -- msgpullup() discards the metadata afterwards.
       */
      pullup = vmxnet3_tx_prepare_offload(dp, &ol, mp);
      if (pullup) {
         mblk_t *new_mp = msgpullup(mp, pullup);
         freemsg(mp);
         if (new_mp) {
            mp = new_mp;
         } else {
            continue;
         }
      }

      /*
       * Try to map the message in the Tx ring.
       * This call might fail for non-fatal reasons.
       */
      status = vmxnet3_tx_one(dp, txq, &ol, mp, B_FALSE);
      if (status == VMXNET3_TX_PULLUP) {
         /*
          * Try one more time after flattening
          * the message with msgpullup().
          */
         if (mp->b_cont != NULL) {
            mblk_t *new_mp = msgpullup(mp, -1);
            freemsg(mp);
            if (new_mp) {
               mp = new_mp;
               status = vmxnet3_tx_one(dp, txq, &ol, mp, B_TRUE);
            } else {
               continue;
            }
         }
      }
      if (status != VMXNET3_TX_OK && status != VMXNET3_TX_RINGFULL) {
         /* Fatal failure, drop it */
         freemsg(mp);
      }
   } while (mps && status != VMXNET3_TX_RINGFULL);

   if (status == VMXNET3_TX_RINGFULL) {
      mp->b_next = mps;
      mps = mp;
   } else {
      ASSERT(!mps);
   }

   /* Notify the device */
   mutex_enter(&dp->txLock);
   if (txqCtrl->txNumDeferred >= txqCtrl->txThreshold) {
      txqCtrl->txNumDeferred = 0;
      VMXNET3_BAR0_PUT32(dp, VMXNET3_REG_TXPROD, cmdRing->next2fill);
   }
   mutex_exit(&dp->txLock);

   return mps;
}
Example #3
0
/*
 * igb_get_tx_context
 *
 * Get the tx context information from the mblk
 */
static int
igb_get_tx_context(mblk_t *mp, tx_context_t *ctx)
{
	uint32_t start;
	uint32_t flags;
	uint32_t lso_flag;
	uint32_t mss;
	uint32_t len;
	uint32_t size;
	uint32_t offset;
	unsigned char *pos;
	ushort_t etype;
	uint32_t mac_hdr_len;
	uint32_t l4_proto;
	uint32_t l4_hdr_len;

	ASSERT(mp != NULL);

	mac_hcksum_get(mp, &start, NULL, NULL, NULL, &flags);
	bzero(ctx, sizeof (tx_context_t));

	ctx->hcksum_flags = flags;

	if (flags == 0)
		return (TX_CXT_SUCCESS);

	mac_lso_get(mp, &mss, &lso_flag);
	ctx->mss = mss;
	ctx->lso_flag = (lso_flag == HW_LSO);

	/*
	 * LSO relies on tx h/w checksum, so here the packet will be
	 * dropped if the h/w checksum flags are not set.
	 */
	if (ctx->lso_flag) {
		if (!((ctx->hcksum_flags & HCK_PARTIALCKSUM) &&
		    (ctx->hcksum_flags & HCK_IPV4_HDRCKSUM))) {
			IGB_DEBUGLOG_0(NULL, "igb_tx: h/w "
			    "checksum flags are not set for LSO");
			return (TX_CXT_E_LSO_CSUM);
		}
	}

	etype = 0;
	mac_hdr_len = 0;
	l4_proto = 0;

	/*
	 * Firstly get the position of the ether_type/ether_tpid.
	 * Here we don't assume the ether (VLAN) header is fully included
	 * in one mblk fragment, so we go thourgh the fragments to parse
	 * the ether type.
	 */
	size = len = MBLKL(mp);
	offset = offsetof(struct ether_header, ether_type);
	while (size <= offset) {
		mp = mp->b_cont;
		ASSERT(mp != NULL);
		len = MBLKL(mp);
		size += len;
	}
	pos = mp->b_rptr + offset + len - size;

	etype = ntohs(*(ushort_t *)(uintptr_t)pos);
	if (etype == ETHERTYPE_VLAN) {
		/*
		 * Get the position of the ether_type in VLAN header
		 */
		offset = offsetof(struct ether_vlan_header, ether_type);
		while (size <= offset) {
			mp = mp->b_cont;
			ASSERT(mp != NULL);
			len = MBLKL(mp);
			size += len;
		}
		pos = mp->b_rptr + offset + len - size;

		etype = ntohs(*(ushort_t *)(uintptr_t)pos);
		mac_hdr_len = sizeof (struct ether_vlan_header);
	} else {
Example #4
0
/* ARGSUSED */
static int
vmxnet3s_tx_prepare_offload(vmxnet3s_softc_t *dp, vmxnet3s_offload_t *ol,
    mblk_t *mp, int *to_copy)
{
	int		ret = 0;
	uint32_t	start;
	uint32_t	stuff;
	uint32_t	value;
	uint32_t	flags;
	uint32_t	lsoflags;
	uint32_t	mss;

	ol->om = VMXNET3_OM_NONE;
	ol->hlen = 0;
	ol->msscof = 0;

	hcksum_retrieve(mp, NULL, NULL, &start, &stuff, NULL, &value, &flags);

	mac_lso_get(mp, &mss, &lsoflags);
	if (lsoflags & HW_LSO)
		flags |= HW_LSO;

	if (flags) {
		struct ether_vlan_header *eth = (void *) mp->b_rptr;
		uint8_t		ethlen;

		if (eth->ether_tpid == htons(ETHERTYPE_VLAN))
			ethlen = sizeof (struct ether_vlan_header);
		else
			ethlen = sizeof (struct ether_header);

		if (flags & HCK_PARTIALCKSUM) {
			ol->om = VMXNET3_OM_CSUM;
			ol->hlen = start + ethlen;
			ol->msscof = stuff + ethlen;
		}
		if (flags & HW_LSO) {
			mblk_t		*mblk = mp;
			uint8_t		*ip;
			uint8_t		*tcp;
			uint8_t		iplen;
			uint8_t		tcplen;

			/*
			 * Copy e1000g's behavior:
			 * - Do not assume all the headers are in the same mblk.
			 * - Assume each header is always within one mblk.
			 * - Assume the ethernet header is in the first mblk.
			 */
			ip = mblk->b_rptr + ethlen;
			if (ip >= mblk->b_wptr) {
				mblk = mblk->b_cont;
				ip = mblk->b_rptr;
			}
			iplen = IPH_HDR_LENGTH((ipha_t *)ip);
			tcp = ip + iplen;
			if (tcp >= mblk->b_wptr) {
				mblk = mblk->b_cont;
				tcp = mblk->b_rptr;
			}
			tcplen = TCP_HDR_LENGTH((tcph_t *)tcp);
			/* Careful, '>' instead of '>=' here */
			if (tcp + tcplen > mblk->b_wptr)
				mblk = mblk->b_cont;

			ol->om = VMXNET3_OM_TSO;
			ol->hlen = ethlen + iplen + tcplen;
			ol->msscof = DB_LSOMSS(mp);

			if (mblk != mp)
				ret = ol->hlen;
			else
				*to_copy = ol->hlen;
		}
	}

	return (ret);
}
Example #5
0
/*
 * function to xmit  Single packet over the wire
 *
 * wq - pointer to WQ
 * mp - Pointer to packet chain
 *
 * return pointer to the packet
 */
mblk_t *
oce_send_packet(struct oce_wq *wq, mblk_t *mp)
{
	struct oce_nic_hdr_wqe *wqeh;
	struct oce_dev *dev;
	struct ether_header *eh;
	struct ether_vlan_header *evh;
	int32_t num_wqes;
	uint16_t etype;
	uint32_t ip_offset;
	uint32_t csum_flags = 0;
	boolean_t use_copy = B_FALSE;
	boolean_t tagged   = B_FALSE;
	uint16_t  vlan_tag;
	uint32_t  reg_value = 0;
	oce_wqe_desc_t *wqed = NULL;
	mblk_t *nmp = NULL;
	mblk_t *tmp = NULL;
	uint32_t pkt_len = 0;
	int num_mblks = 0;
	int ret = 0;
	uint32_t mss = 0;
	uint32_t flags = 0;
	int len = 0;

	/* retrieve the adap priv struct ptr */
	dev = wq->parent;

	/* check if we have enough free slots */
	if (wq->wq_free < dev->tx_reclaim_threshold) {
		(void) oce_process_tx_compl(wq, B_FALSE);
	}
	if (wq->wq_free < OCE_MAX_TX_HDL) {
		return (mp);
	}

	/* check if we should copy */
	for (tmp = mp; tmp != NULL; tmp = tmp->b_cont) {
		pkt_len += MBLKL(tmp);
		num_mblks++;
	}

	if (pkt_len == 0 || num_mblks == 0) {
		freemsg(mp);
		return (NULL);
	}

	/* retrieve LSO information */
	mac_lso_get(mp, &mss, &flags);

	/* get the offload flags */
	mac_hcksum_get(mp, NULL, NULL, NULL, NULL, &csum_flags);

	/* restrict the mapped segment to wat we support */
	if (num_mblks  > OCE_MAX_TX_HDL) {
		nmp = msgpullup(mp, -1);
		if (nmp == NULL) {
			atomic_inc_32(&wq->pkt_drops);
			freemsg(mp);
			return (NULL);
		}
		/* Reset it to new collapsed mp */
		freemsg(mp);
		mp = nmp;
	}

	/* Get the packet descriptor for Tx */
	wqed = kmem_cache_alloc(wq->wqed_cache, KM_NOSLEEP);
	if (wqed == NULL) {
		atomic_inc_32(&wq->pkt_drops);
		freemsg(mp);
		return (NULL);
	}
	eh = (struct ether_header *)(void *)mp->b_rptr;
	if (ntohs(eh->ether_type) == VLAN_TPID) {
		evh = (struct ether_vlan_header *)(void *)mp->b_rptr;
		tagged = B_TRUE;
		etype = ntohs(evh->ether_type);
		ip_offset = sizeof (struct ether_vlan_header);
		pkt_len -= VTAG_SIZE;
		vlan_tag = ntohs(evh->ether_tci);
		oce_remove_vtag(mp);
	} else {
		etype = ntohs(eh->ether_type);
		ip_offset = sizeof (struct ether_header);
	}

	/* Save the WQ pointer */
	wqed->wq = wq;
	wqed->frag_idx = 1; /* index zero is always header */
	wqed->frag_cnt = 0;
	wqed->nhdl = 0;
	wqed->mp = NULL;
	OCE_LIST_LINK_INIT(&wqed->link);

	/* If entire packet is less than the copy limit  just do copy */
	if (pkt_len < dev->tx_bcopy_limit) {
		use_copy = B_TRUE;
		ret = oce_bcopy_wqe(wq, wqed, mp, pkt_len);
	} else {
		/* copy or dma map the individual fragments */
		for (nmp = mp; nmp != NULL; nmp = nmp->b_cont) {
			len = MBLKL(nmp);
			if (len == 0) {
				continue;
			}
			if (len < dev->tx_bcopy_limit) {
				ret = oce_bcopy_wqe(wq, wqed, nmp, len);
			} else {
				ret = oce_map_wqe(wq, wqed, nmp, len);
			}
			if (ret != 0)
				break;
		}
	}

	/*
	 * Any failure other than insufficient Q entries
	 * drop the packet
	 */
	if (ret != 0) {
		oce_free_wqed(wq, wqed);
		atomic_inc_32(&wq->pkt_drops);
		freemsg(mp);
		return (NULL);
	}

	wqeh = (struct oce_nic_hdr_wqe *)&wqed->frag[0];
	bzero(wqeh, sizeof (struct oce_nic_hdr_wqe));

	/* fill rest of wqe header fields based on packet */
	if (flags & HW_LSO) {
		wqeh->u0.s.lso = B_TRUE;
		wqeh->u0.s.lso_mss = mss;
	}
	if (csum_flags & HCK_FULLCKSUM) {
		uint8_t *proto;
		if (etype == ETHERTYPE_IP) {
			proto = (uint8_t *)(void *)
			    (mp->b_rptr + ip_offset);
			if (proto[9] == 6)
				/* IPPROTO_TCP */
				wqeh->u0.s.tcpcs = B_TRUE;
			else if (proto[9] == 17)
				/* IPPROTO_UDP */
				wqeh->u0.s.udpcs = B_TRUE;
		}
	}

	if (csum_flags & HCK_IPV4_HDRCKSUM)
		wqeh->u0.s.ipcs = B_TRUE;
	if (tagged) {
		wqeh->u0.s.vlan = B_TRUE;
		wqeh->u0.s.vlan_tag = vlan_tag;
	}

	wqeh->u0.s.complete = B_TRUE;
	wqeh->u0.s.event = B_TRUE;
	wqeh->u0.s.crc = B_TRUE;
	wqeh->u0.s.total_length = pkt_len;

	num_wqes = wqed->frag_cnt + 1;

	/* h/w expects even no. of WQEs */
	if (num_wqes & 0x1) {
		bzero(&wqed->frag[num_wqes], sizeof (struct oce_nic_frag_wqe));
		num_wqes++;
	}
	wqed->wqe_cnt = (uint16_t)num_wqes;
	wqeh->u0.s.num_wqe = num_wqes;
	DW_SWAP(u32ptr(&wqed->frag[0]), (wqed->wqe_cnt * NIC_WQE_SIZE));

	mutex_enter(&wq->tx_lock);
	if (num_wqes > wq->wq_free) {
		atomic_inc_32(&wq->tx_deferd);
		mutex_exit(&wq->tx_lock);
		goto wqe_fail;
	}
	atomic_add_32(&wq->wq_free, -num_wqes);

	/* fill the wq for adapter */
	oce_fill_ring_descs(wq, wqed);

	/* Set the mp pointer in the wqe descriptor */
	if (use_copy == B_FALSE) {
		wqed->mp = mp;
	}
	/* Add the packet desc to list to be retrieved during cmpl */
	OCE_LIST_INSERT_TAIL(&wq->wqe_desc_list,  wqed);
	(void) ddi_dma_sync(wq->ring->dbuf->dma_handle, 0, 0,
	    DDI_DMA_SYNC_FORDEV);

	/* ring tx doorbell */
	reg_value = (num_wqes << 16) | wq->wq_id;
	/* Ring the door bell  */
	OCE_DB_WRITE32(dev, PD_TXULP_DB, reg_value);
	mutex_exit(&wq->tx_lock);
	if (oce_fm_check_acc_handle(dev, dev->db_handle) != DDI_FM_OK) {
		ddi_fm_service_impact(dev->dip, DDI_SERVICE_DEGRADED);
	}

	/* free mp if copied or packet chain collapsed */
	if (use_copy == B_TRUE) {
		freemsg(mp);
	}
	return (NULL);

wqe_fail:

	if (tagged) {
		oce_insert_vtag(mp, vlan_tag);
	}
	oce_free_wqed(wq, wqed);
	return (mp);
} /* oce_send_packet */