Esempio n. 1
0
/* Receive side checksum and other offloads */
static void
vmxnet3_rx_offload(const Vmxnet3_RxCompDesc *rcd, struct rte_mbuf *rxm)
{
	/* Check for hardware stripped VLAN tag */
	if (rcd->ts) {
		rxm->ol_flags |= PKT_RX_VLAN_PKT;
		rxm->vlan_tci = rte_le_to_cpu_16((uint16_t)rcd->tci);
	}

	/* Check for RSS */
	if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE) {
		rxm->ol_flags |= PKT_RX_RSS_HASH;
		rxm->hash.rss = rcd->rssHash;
	}

	/* Check packet type, checksum errors, etc. Only support IPv4 for now. */
	if (rcd->v4) {
		struct ether_hdr *eth = rte_pktmbuf_mtod(rxm, struct ether_hdr *);
		struct ipv4_hdr *ip = (struct ipv4_hdr *)(eth + 1);

		if (((ip->version_ihl & 0xf) << 2) > (int)sizeof(struct ipv4_hdr))
			rxm->packet_type = RTE_PTYPE_L3_IPV4_EXT;
		else
			rxm->packet_type = RTE_PTYPE_L3_IPV4;

		if (!rcd->cnc) {
			if (!rcd->ipc)
				rxm->ol_flags |= PKT_RX_IP_CKSUM_BAD;

			if ((rcd->tcp || rcd->udp) && !rcd->tuc)
				rxm->ol_flags |= PKT_RX_L4_CKSUM_BAD;
		}
	}
}
Esempio n. 2
0
struct nfp_nsp_identify *
__nfp_nsp_identify(struct nfp_nsp *nsp)
{
	struct nfp_nsp_identify *nspi = NULL;
	struct nsp_identify *ni;
	int ret;

	if (nfp_nsp_get_abi_ver_minor(nsp) < 15)
		return NULL;

	ni = malloc(sizeof(*ni));
	if (!ni)
		return NULL;

	memset(ni, 0, sizeof(*ni));
	ret = nfp_nsp_read_identify(nsp, ni, sizeof(*ni));
	if (ret < 0) {
		printf("reading bsp version failed %d\n",
			ret);
		goto exit_free;
	}

	nspi = malloc(sizeof(*nspi));
	if (!nspi)
		goto exit_free;

	memset(nspi, 0, sizeof(*nspi));
	memcpy(nspi->version, ni->version, sizeof(nspi->version));
	nspi->version[sizeof(nspi->version) - 1] = '\0';
	nspi->flags = ni->flags;
	nspi->br_primary = ni->br_primary;
	nspi->br_secondary = ni->br_secondary;
	nspi->br_nsp = ni->br_nsp;
	nspi->primary = rte_le_to_cpu_16(ni->primary);
	nspi->secondary = rte_le_to_cpu_16(ni->secondary);
	nspi->nsp = rte_le_to_cpu_16(ni->nsp);
	nspi->sensor_mask = rte_le_to_cpu_64(ni->sensor_mask);

exit_free:
	free(ni);
	return nspi;
}
Esempio n. 3
0
static int
i40e_pf_host_hmc_config_txq(struct i40e_hw *hw,
			    struct i40e_pf_vf *vf,
			    struct i40e_virtchnl_txq_info *txq)
{
	int err = I40E_SUCCESS;
	struct i40e_hmc_obj_txq tx_ctx;
	uint32_t qtx_ctl;
	uint16_t abs_queue_id = vf->vsi->base_queue + txq->queue_id;


	/* clear the context structure first */
	memset(&tx_ctx, 0, sizeof(tx_ctx));
	tx_ctx.new_context = 1;
	tx_ctx.base = txq->dma_ring_addr / I40E_QUEUE_BASE_ADDR_UNIT;
	tx_ctx.qlen = txq->ring_len;
	tx_ctx.rdylist = rte_le_to_cpu_16(vf->vsi->info.qs_handle[0]);
	err = i40e_clear_lan_tx_queue_context(hw, abs_queue_id);
	if (err != I40E_SUCCESS)
		return err;

	err = i40e_set_lan_tx_queue_context(hw, abs_queue_id, &tx_ctx);
	if (err != I40E_SUCCESS)
		return err;

	/* bind queue with VF function, since TX/QX will appear in pair,
	 * so only has QTX_CTL to set.
	 */
	qtx_ctl = (I40E_QTX_CTL_VF_QUEUE << I40E_QTX_CTL_PFVF_Q_SHIFT) |
				((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
				I40E_QTX_CTL_PF_INDX_MASK) |
				(((vf->vf_idx + hw->func_caps.vf_base_id) <<
				I40E_QTX_CTL_VFVM_INDX_SHIFT) &
				I40E_QTX_CTL_VFVM_INDX_MASK);
	I40E_WRITE_REG(hw, I40E_QTX_CTL(abs_queue_id), qtx_ctl);
	I40E_WRITE_FLUSH(hw);

	return I40E_SUCCESS;
}
Esempio n. 4
0
/*
 * Process the Rx Completion Ring of given vmxnet3_rx_queue
 * for nb_pkts burst and return the number of packets received
 */
uint16_t
vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
{
	uint16_t nb_rx;
	uint32_t nb_rxd, idx;
	uint8_t ring_idx;
	vmxnet3_rx_queue_t *rxq;
	Vmxnet3_RxCompDesc *rcd;
	vmxnet3_buf_info_t *rbi;
	Vmxnet3_RxDesc *rxd;
	struct rte_mbuf *rxm = NULL;
	struct vmxnet3_hw *hw;

	nb_rx = 0;
	ring_idx = 0;
	nb_rxd = 0;
	idx = 0;

	rxq = rx_queue;
	hw = rxq->hw;

	rcd = &rxq->comp_ring.base[rxq->comp_ring.next2proc].rcd;

	if (rxq->stopped) {
		PMD_RX_LOG(DEBUG, "Rx queue is stopped.");
		return 0;
	}

	while (rcd->gen == rxq->comp_ring.gen) {

		if (nb_rx >= nb_pkts)
			break;
		idx = rcd->rxdIdx;
		ring_idx = (uint8_t)((rcd->rqID == rxq->qid1) ? 0 : 1);
		rxd = (Vmxnet3_RxDesc *)rxq->cmd_ring[ring_idx].base + idx;
		rbi = rxq->cmd_ring[ring_idx].buf_info + idx;

		if (rcd->sop != 1 || rcd->eop != 1) {
			rte_pktmbuf_free_seg(rbi->m);

			PMD_RX_LOG(DEBUG, "Packet spread across multiple buffers\n)");
			goto rcd_done;

		} else {

			PMD_RX_LOG(DEBUG, "rxd idx: %d ring idx: %d.", idx, ring_idx);

#ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER
			VMXNET3_ASSERT(rcd->len <= rxd->len);
			VMXNET3_ASSERT(rbi->m);
#endif
			if (rcd->len == 0) {
				PMD_RX_LOG(DEBUG, "Rx buf was skipped. rxring[%d][%d]\n)",
					   ring_idx, idx);
#ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER
				VMXNET3_ASSERT(rcd->sop && rcd->eop);
#endif
				rte_pktmbuf_free_seg(rbi->m);

				goto rcd_done;
			}

			/* Assuming a packet is coming in a single packet buffer */
			if (rxd->btype != VMXNET3_RXD_BTYPE_HEAD) {
				PMD_RX_LOG(DEBUG,
					   "Alert : Misbehaving device, incorrect "
					   " buffer type used. iPacket dropped.");
				rte_pktmbuf_free_seg(rbi->m);
				goto rcd_done;
			}
#ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER
			VMXNET3_ASSERT(rxd->btype == VMXNET3_RXD_BTYPE_HEAD);
#endif
			/* Get the packet buffer pointer from buf_info */
			rxm = rbi->m;

			/* Clear descriptor associated buf_info to be reused */
			rbi->m = NULL;
			rbi->bufPA = 0;

			/* Update the index that we received a packet */
			rxq->cmd_ring[ring_idx].next2comp = idx;

			/* For RCD with EOP set, check if there is frame error */
			if (rcd->err) {
				rxq->stats.drop_total++;
				rxq->stats.drop_err++;

				if (!rcd->fcs) {
					rxq->stats.drop_fcs++;
					PMD_RX_LOG(ERR, "Recv packet dropped due to frame err.");
				}
				PMD_RX_LOG(ERR, "Error in received packet rcd#:%d rxd:%d",
					   (int)(rcd - (struct Vmxnet3_RxCompDesc *)
						 rxq->comp_ring.base), rcd->rxdIdx);
				rte_pktmbuf_free_seg(rxm);

				goto rcd_done;
			}

			/* Check for hardware stripped VLAN tag */
			if (rcd->ts) {

				PMD_RX_LOG(ERR, "Received packet with vlan ID: %d.",
					   rcd->tci);
				rxm->ol_flags = PKT_RX_VLAN_PKT;

#ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER
				VMXNET3_ASSERT(rxm &&
					       rte_pktmbuf_mtod(rxm, void *));
#endif
				/* Copy vlan tag in packet buffer */
				rxm->pkt.vlan_macip.f.vlan_tci =
					rte_le_to_cpu_16((uint16_t)rcd->tci);

			} else
				rxm->ol_flags = 0;

			/* Initialize newly received packet buffer */
			rxm->pkt.in_port = rxq->port_id;
			rxm->pkt.nb_segs = 1;
			rxm->pkt.next = NULL;
			rxm->pkt.pkt_len = (uint16_t)rcd->len;
			rxm->pkt.data_len = (uint16_t)rcd->len;
			rxm->pkt.in_port = rxq->port_id;
			rxm->pkt.vlan_macip.f.vlan_tci = 0;
			rxm->pkt.data = (char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM;

			rx_pkts[nb_rx++] = rxm;

rcd_done:
			rxq->cmd_ring[ring_idx].next2comp = idx;
			VMXNET3_INC_RING_IDX_ONLY(rxq->cmd_ring[ring_idx].next2comp, rxq->cmd_ring[ring_idx].size);

			/* It's time to allocate some new buf and renew descriptors */
			vmxnet3_post_rx_bufs(rxq, ring_idx);
			if (unlikely(rxq->shared->ctrl.updateRxProd)) {
				VMXNET3_WRITE_BAR0_REG(hw, rxprod_reg[ring_idx] + (rxq->queue_id * VMXNET3_REG_ALIGN),
						       rxq->cmd_ring[ring_idx].next2fill);
			}

			/* Advance to the next descriptor in comp_ring */
			vmxnet3_comp_ring_adv_next2proc(&rxq->comp_ring);

			rcd = &rxq->comp_ring.base[rxq->comp_ring.next2proc].rcd;
			nb_rxd++;
			if (nb_rxd > rxq->cmd_ring[0].size) {
				PMD_RX_LOG(ERR,
					   "Used up quota of receiving packets,"
					   " relinquish control.");
				break;
			}
		}
	}

	return nb_rx;
}