Beispiel #1
0
/*
 * test data manipulation in mbuf with non-ascii data
 */
static int
test_pktmbuf_with_non_ascii_data(void)
{
	struct rte_mbuf *m = NULL;
	char *data;

	m = rte_pktmbuf_alloc(pktmbuf_pool);
	if (m == NULL)
		GOTO_FAIL("Cannot allocate mbuf");
	if (rte_pktmbuf_pkt_len(m) != 0)
		GOTO_FAIL("Bad length");

	data = rte_pktmbuf_append(m, MBUF_TEST_DATA_LEN);
	if (data == NULL)
		GOTO_FAIL("Cannot append data");
	if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN)
		GOTO_FAIL("Bad pkt length");
	if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN)
		GOTO_FAIL("Bad data length");
	memset(data, 0xff, rte_pktmbuf_pkt_len(m));
	if (!rte_pktmbuf_is_contiguous(m))
		GOTO_FAIL("Buffer should be continuous");
	rte_pktmbuf_dump(m, MBUF_TEST_DATA_LEN);

	rte_pktmbuf_free(m);

	return 0;

fail:
	if(m) {
		rte_pktmbuf_free(m);
	}
	return -1;
}
Beispiel #2
0
/* Sends 'num_pkts' 'packets' and 'request' data to datapath. */
int
dpdk_link_send_bulk(struct dpif_dpdk_message *request,
                    const struct ofpbuf *const *packets, size_t num_pkts)
{
    struct rte_mbuf *mbufs[PKT_BURST_SIZE] = {NULL};
    uint8_t *mbuf_data = NULL;
    int i = 0;
    int ret = 0;

    if (num_pkts > PKT_BURST_SIZE) {
        return EINVAL;
    }

    DPDK_DEBUG()

    for (i = 0; i < num_pkts; i++) {
        mbufs[i] = rte_pktmbuf_alloc(mp);

        if (!mbufs[i]) {
            return ENOBUFS;
        }

        mbuf_data = rte_pktmbuf_mtod(mbufs[i], uint8_t *);
        rte_memcpy(mbuf_data, &request[i], sizeof(request[i]));

        if (request->type == DPIF_DPDK_PACKET_FAMILY) {
            mbuf_data = mbuf_data + sizeof(request[i]);
            if (likely(packets[i]->size <= (mbufs[i]->buf_len - sizeof(request[i])))) {
                rte_memcpy(mbuf_data, packets[i]->data, packets[i]->size);
                rte_pktmbuf_data_len(mbufs[i]) =
                    sizeof(request[i]) + packets[i]->size;
                rte_pktmbuf_pkt_len(mbufs[i]) = rte_pktmbuf_data_len(mbufs[i]);
            } else {
                RTE_LOG(ERR, APP, "%s, %d: %s", __FUNCTION__, __LINE__,
                        "memcpy prevented: packet size exceeds available mbuf space");
                for (i = 0; i < num_pkts; i++) {
                    rte_pktmbuf_free(mbufs[i]);
                }
                return ENOMEM;
            }
        } else {
            rte_pktmbuf_data_len(mbufs[i]) = sizeof(request[i]);
            rte_pktmbuf_pkt_len(mbufs[i]) = rte_pktmbuf_data_len(mbufs[i]);
        }
    }

    ret = rte_ring_sp_enqueue_bulk(message_ring, (void * const *)mbufs, num_pkts);
    if (ret == -ENOBUFS) {
        for (i = 0; i < num_pkts; i++) {
            rte_pktmbuf_free(mbufs[i]);
        }
        ret = ENOBUFS;
    } else if (unlikely(ret == -EDQUOT)) {
        ret = EDQUOT;
    }

    return ret;
}
Beispiel #3
0
void dpdk_ipDeFragment(void *handle, struct rte_mbuf *m){
	struct ip * iphead = (struct ip *)m;
	struct sk_buff s;
	s.data = (char *)(m - sizeof(struct ip));
	s.truesize = rte_pktmbuf_pkt_len(m) - sizeof(struct ip);
	ipDeFragment(handle, iphead, &s);
}
Beispiel #4
0
int pcap_next_ex(pcap_t *p, struct pcap_pkthdr **pkt_header,
    const u_char **pkt_data)
{
    struct rte_mbuf* mbuf = NULL;
    int              len  = 0;

    if (p == NULL || pkt_header == NULL || pkt_data == NULL ||
        p->deviceId < 0 || p->deviceId > RTE_MAX_ETHPORTS)
    {
        snprintf (errbuf_g, PCAP_ERRBUF_SIZE, "Invalid parameter");
        return DPDKPCAP_FAILURE;
    }

    debug("Receiving a packet on port %d\n", p->deviceId);

    while (!rte_eth_rx_burst(p->deviceId, 0, &mbuf, 1))
    {
    }

    len = rte_pktmbuf_pkt_len(mbuf);

    pktHeader_g.len = len;
    *pkt_header = &pktHeader_g;

    rte_memcpy((void*)data_g, rte_pktmbuf_mtod(mbuf, void*), len);
    *pkt_data = data_g;

    rte_pktmbuf_free(mbuf);

    return 1;
}
Beispiel #5
0
void dump_ring(CLQManager *a_pclsCLQ, char *a_pszQName, uint32_t a_unStartIdx, uint32_t a_unEndIdx)
{
	struct rte_ring *pstRing = NULL;
	struct rte_mbuf *m = NULL;
	int ret = 0;
	uint32_t unMask = 0;
	uint32_t unStartIdx = 0;
	uint32_t unEndIdx = 0;
	uint32_t unIdx = 0;

	ret = a_pclsCLQ->CreateRing(a_pszQName, &pstRing);
	if(ret != E_Q_EXIST)
	{
		printf("There is no Queue (%s) \n", a_pszQName);
		if(ret == 0)
		{
			a_pclsCLQ->DeleteQueue(a_pszQName);
		}
	}

	if(pstRing != NULL)
	{
		unMask = pstRing->prod.mask;
		unStartIdx = pstRing->cons.tail;
		unEndIdx = pstRing->prod.tail;

		if(a_unEndIdx > unEndIdx)
		{
			printf("Invalid End idx %u\n", a_unEndIdx);
			return ;
		}

		if(a_unEndIdx < a_unStartIdx)
		{
			printf("Invalid Start Idx %u, End Idx %u\n", a_unStartIdx, a_unEndIdx);
			return ;
		}

		if(a_unStartIdx)
			unStartIdx = a_unStartIdx;
		if(a_unEndIdx)
			unEndIdx = a_unEndIdx;

		printf("Start Idx %u, End Idx %u\n", unStartIdx, unEndIdx);

		for(uint32_t i = unStartIdx; i < unEndIdx ; i++)
		{
			unIdx = i & unMask ;
			m = (struct rte_mbuf*)pstRing->ring[unIdx];
			printf("idx : [%8u], total_len : [%5u], data_len : [%5u], seg_cnt : [%2u], Data : %s\n"
							, i
							, rte_pktmbuf_pkt_len(m)
							, rte_pktmbuf_data_len(m)
							, m->nb_segs
							, rte_pktmbuf_mtod(m, char*)
					);
		}
	}
static inline struct rte_mbuf *ipaugenblick_get_from_shadow(int sock)
{
	struct rte_mbuf *mbuf = NULL;

	if(local_socket_descriptors[sock].shadow) {
		mbuf = local_socket_descriptors[sock].shadow;
		local_socket_descriptors[sock].shadow = NULL;
		rte_pktmbuf_data_len(mbuf) = local_socket_descriptors[sock].shadow_len_remainder;
		mbuf->data_off += local_socket_descriptors[sock].shadow_len_delievered;
		if(mbuf->next) {
			rte_pktmbuf_pkt_len(mbuf) = rte_pktmbuf_data_len(mbuf) + rte_pktmbuf_pkt_len(mbuf->next);
		}
		else {
			rte_pktmbuf_pkt_len(mbuf) = rte_pktmbuf_data_len(mbuf);
		}
		mbuf->next = local_socket_descriptors[sock].shadow_next;
		local_socket_descriptors[sock].shadow_next = NULL;
	}
	return mbuf;
}
Beispiel #7
0
/*
 * Function sends unmatched packets to vswitchd.
 */
void
send_packet_to_vswitchd(struct rte_mbuf *mbuf, struct dpdk_upcall *info)
{
	int rslt = 0;
	void *mbuf_ptr = NULL;
	const uint64_t dpif_send_tsc =
		(rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * DPIF_SEND_US;
	uint64_t cur_tsc = 0;
	uint64_t diff_tsc = 0;
	static uint64_t prev_tsc = 0;

	/* send one packet, delete information about segments */
	rte_pktmbuf_pkt_len(mbuf) = rte_pktmbuf_data_len(mbuf);

	/* allocate space before the packet for the upcall info */
	mbuf_ptr = rte_pktmbuf_prepend(mbuf, sizeof(*info));

	if (mbuf_ptr == NULL) {
		printf("Cannot prepend upcall info\n");
		rte_pktmbuf_free(mbuf);
		stats_vswitch_tx_drop_increment(INC_BY_1);
		stats_vport_tx_drop_increment(VSWITCHD, INC_BY_1);
		return;
	}

	rte_memcpy(mbuf_ptr, info, sizeof(*info));

	/* send the packet and the upcall info to the daemon */
	rslt = rte_ring_mp_enqueue(vswitchd_packet_ring, mbuf);
	if (rslt < 0) {
		if (rslt == -ENOBUFS) {
			rte_pktmbuf_free(mbuf);
			stats_vswitch_tx_drop_increment(INC_BY_1);
			stats_vport_tx_drop_increment(VSWITCHD, INC_BY_1);
			return;
		} else {
			stats_vport_overrun_increment(VSWITCHD, INC_BY_1);
		}
	}

	stats_vport_tx_increment(VSWITCHD, INC_BY_1);

	cur_tsc = rte_rdtsc();
	diff_tsc = cur_tsc - prev_tsc;
	prev_tsc = cur_tsc;
	/* Only signal the daemon after 100 milliseconds */
	if (unlikely(diff_tsc > dpif_send_tsc))
		send_signal_to_dpif();
}
Beispiel #8
0
static struct rte_mbuf * dpdk_replicate_packet_mb (vlib_buffer_t * b)
{
  vlib_main_t * vm = vlib_get_main();
  vlib_buffer_main_t * bm = vm->buffer_main;
  struct rte_mbuf * first_mb = 0, * new_mb, * pkt_mb, ** prev_mb_next = 0;
  u8 nb_segs, nb_segs_left;
  u32 copy_bytes;
  unsigned socket_id = rte_socket_id();

  ASSERT (bm->pktmbuf_pools[socket_id]);
  pkt_mb = ((struct rte_mbuf *)b)-1;
  nb_segs = pkt_mb->nb_segs;
  for (nb_segs_left = nb_segs; nb_segs_left; nb_segs_left--)
    {
      if (PREDICT_FALSE(pkt_mb == 0))
	{
	  clib_warning ("Missing %d mbuf chain segment(s):   "
			"(nb_segs = %d, nb_segs_left = %d)!",
			nb_segs - nb_segs_left, nb_segs, nb_segs_left);
	  if (first_mb)
	    rte_pktmbuf_free(first_mb);
	  return NULL;
	}
      new_mb = rte_pktmbuf_alloc (bm->pktmbuf_pools[socket_id]);
      if (PREDICT_FALSE(new_mb == 0))
	{
	  if (first_mb)
	    rte_pktmbuf_free(first_mb);
	  return NULL;
	}
      
      /*
       * Copy packet info into 1st segment.
       */
      if (first_mb == 0)
	{
	  first_mb = new_mb;
	  rte_pktmbuf_pkt_len (first_mb) = pkt_mb->pkt_len;
	  first_mb->nb_segs = pkt_mb->nb_segs;
	  first_mb->port = pkt_mb->port;
#ifdef DAW_FIXME // TX Offload support TBD
	  first_mb->vlan_macip = pkt_mb->vlan_macip;
	  first_mb->hash = pkt_mb->hash;
	  first_mb->ol_flags = pkt_mb->ol_flags
#endif
	}
      else
	{
Beispiel #9
0
static int
testclone_testupdate_testdetach(void)
{
#ifndef RTE_MBUF_SCATTER_GATHER
	return 0;
#else
	struct rte_mbuf *mc = NULL;
	struct rte_mbuf *clone = NULL;

	/* alloc a mbuf */

	mc = rte_pktmbuf_alloc(pktmbuf_pool);
	if (mc == NULL)
		GOTO_FAIL("ooops not allocating mbuf");

	if (rte_pktmbuf_pkt_len(mc) != 0)
		GOTO_FAIL("Bad length");


	/* clone the allocated mbuf */
	clone = rte_pktmbuf_clone(mc, pktmbuf_pool);
	if (clone == NULL)
		GOTO_FAIL("cannot clone data\n");
	rte_pktmbuf_free(clone);

	mc->pkt.next = rte_pktmbuf_alloc(pktmbuf_pool);
	if(mc->pkt.next == NULL)
		GOTO_FAIL("Next Pkt Null\n");

	clone = rte_pktmbuf_clone(mc, pktmbuf_pool);
	if (clone == NULL)
		GOTO_FAIL("cannot clone data\n");

	/* free mbuf */
	rte_pktmbuf_free(mc);
	rte_pktmbuf_free(clone);
	mc = NULL;
	clone = NULL;
	return 0;

fail:
	if (mc)
		rte_pktmbuf_free(mc);
	return -1;
#endif /* RTE_MBUF_SCATTER_GATHER */
}
static int
usock_mbuf_write(struct vr_usocket *usockp, struct rte_mbuf *mbuf)
{
    unsigned int i, pkt_len;
    struct msghdr mhdr;
    struct rte_mbuf *m;
    struct iovec *iov;

    if (!mbuf)
        return 0;

    pkt_len = rte_pktmbuf_pkt_len(mbuf);
    if (!pkt_len)
        return 0;

    iov = usockp->usock_iovec;

    m = mbuf;
    for (i = 0; (m && (i < PKT0_MAX_IOV_LEN)); i++) {
        iov->iov_base = rte_pktmbuf_mtod(m, unsigned char *);
        iov->iov_len = rte_pktmbuf_data_len(m);
        m = m->next;
        iov++;
    }

    if ((i == PKT0_MAX_IOV_LEN) && m)
        usockp->usock_pkt_truncated++;

    mhdr.msg_name = NULL;
    mhdr.msg_namelen = 0;
    mhdr.msg_iov = usockp->usock_iovec;
    mhdr.msg_iovlen = i;
    mhdr.msg_control = NULL;
    mhdr.msg_controllen = 0;
    mhdr.msg_flags = 0;

#ifdef VR_DPDK_USOCK_DUMP
    RTE_LOG(DEBUG, USOCK, "%s[%lx]: FD %d sending message\n", __func__,
            pthread_self(), usockp->usock_fd);
    rte_hexdump(stdout, "usock message dump:", &mhdr, sizeof(mhdr));
#endif
    return sendmsg(usockp->usock_fd, &mhdr, MSG_DONTWAIT);
}
Beispiel #11
0
static inline int
app_pkt_handle(struct rte_mbuf *pkt, uint64_t time)
{
	uint8_t input_color, output_color;
	uint8_t *pkt_data = rte_pktmbuf_mtod(pkt, uint8_t *);
	uint32_t pkt_len = rte_pktmbuf_pkt_len(pkt) - sizeof(struct ether_hdr);
	uint8_t flow_id = (uint8_t)(pkt_data[APP_PKT_FLOW_POS] & (APP_FLOWS_MAX - 1));
	input_color = pkt_data[APP_PKT_COLOR_POS];
	enum policer_action action;

	/* color input is not used for blind modes */
	output_color = (uint8_t) FUNC_METER(&app_flows[flow_id], time, pkt_len,
		(enum rte_meter_color) input_color);

	/* Apply policing and set the output color */
	action = policer_table[input_color][output_color];
	app_set_pkt_color(pkt_data, action);

	return action;
}
Beispiel #12
0
/*
 * Function sends unmatched packets to vswitchd.
 */
static void
send_packet_to_vswitchd(struct rte_mbuf *mbuf, struct dpdk_upcall *info)
{
	int rslt = 0;
	struct statistics *vswd_stat = NULL;
	void *mbuf_ptr = NULL;

	vswd_stat = &vport_stats[VSWITCHD];

	/* send one packet, delete information about segments */
	rte_pktmbuf_pkt_len(mbuf) = rte_pktmbuf_data_len(mbuf);

	/* allocate space before the packet for the upcall info */
	mbuf_ptr = rte_pktmbuf_prepend(mbuf, sizeof(*info));

	if (mbuf_ptr == NULL) {
		printf("Cannot prepend upcall info\n");
		rte_pktmbuf_free(mbuf);
		switch_tx_drop++;
		vswd_stat->tx_drop++;
		return;
	}

	rte_memcpy(mbuf_ptr, info, sizeof(*info));

	/* send the packet and the upcall info to the daemon */
	rslt = rte_ring_sp_enqueue(vswitch_packet_ring, mbuf);
	if (rslt < 0) {
		if (rslt == -ENOBUFS) {
			rte_pktmbuf_free(mbuf);
			switch_tx_drop++;
			vswd_stat->tx_drop++;
			return;
		} else {
			overruns++;
		}
	}

	vswd_stat->tx++;
}
Beispiel #13
0
static uint32_t send_pkts(uint8_t port, struct rte_mempool* pool) {
	static uint64_t seq = 0;
	// alloc bufs
	struct rte_mbuf* bufs[BATCH_SIZE];
	uint32_t i;
	for (i = 0; i < BATCH_SIZE; i++) {
		struct rte_mbuf* buf = rte_pktmbuf_alloc(pool);
		rte_pktmbuf_data_len(buf) = 60;
		rte_pktmbuf_pkt_len(buf) = 60;
		bufs[i] = buf;
		// write seq number
		uint64_t* pkt = rte_pktmbuf_mtod(buf, uint64_t*);
		pkt[0] = seq++;
	}
	// send pkts
	uint32_t sent = 0;
	while (1) {
		sent += rte_eth_tx_burst(port, 0, bufs + sent, BATCH_SIZE - sent);
		if (sent >= BATCH_SIZE) {
			return sent;
		}
	}
}
Beispiel #14
0
int
qat_comp_build_request(void *in_op, uint8_t *out_msg,
		       void *op_cookie,
		       enum qat_device_gen qat_dev_gen __rte_unused)
{
	struct rte_comp_op *op = in_op;
	struct qat_comp_op_cookie *cookie =
			(struct qat_comp_op_cookie *)op_cookie;
	struct qat_comp_xform *qat_xform = op->private_xform;
	const uint8_t *tmpl = (uint8_t *)&qat_xform->qat_comp_req_tmpl;
	struct icp_qat_fw_comp_req *comp_req =
	    (struct icp_qat_fw_comp_req *)out_msg;

	if (unlikely(op->op_type != RTE_COMP_OP_STATELESS)) {
		QAT_DP_LOG(ERR, "QAT PMD only supports stateless compression "
				"operation requests, op (%p) is not a "
				"stateless operation.", op);
		op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
		return -EINVAL;
	}

	rte_mov128(out_msg, tmpl);
	comp_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;

	/* common for sgl and flat buffers */
	comp_req->comp_pars.comp_len = op->src.length;
	comp_req->comp_pars.out_buffer_sz = rte_pktmbuf_pkt_len(op->m_dst) -
			op->dst.offset;

	if (op->m_src->next != NULL || op->m_dst->next != NULL) {
		/* sgl */
		int ret = 0;

		ICP_QAT_FW_COMN_PTR_TYPE_SET(comp_req->comn_hdr.comn_req_flags,
				QAT_COMN_PTR_TYPE_SGL);

		ret = qat_sgl_fill_array(op->m_src,
				op->src.offset,
				&cookie->qat_sgl_src,
				op->src.length,
				RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS);
		if (ret) {
			QAT_DP_LOG(ERR, "QAT PMD Cannot fill source sgl array");
			op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
			return ret;
		}

		ret = qat_sgl_fill_array(op->m_dst,
				op->dst.offset,
				&cookie->qat_sgl_dst,
				comp_req->comp_pars.out_buffer_sz,
				RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS);
		if (ret) {
			QAT_DP_LOG(ERR, "QAT PMD Cannot fill dest. sgl array");
			op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
			return ret;
		}

		comp_req->comn_mid.src_data_addr =
				cookie->qat_sgl_src_phys_addr;
		comp_req->comn_mid.dest_data_addr =
				cookie->qat_sgl_dst_phys_addr;
		comp_req->comn_mid.src_length = 0;
		comp_req->comn_mid.dst_length = 0;

	} else {
		/* flat aka linear buffer */
		ICP_QAT_FW_COMN_PTR_TYPE_SET(comp_req->comn_hdr.comn_req_flags,
				QAT_COMN_PTR_TYPE_FLAT);
		comp_req->comn_mid.src_length = op->src.length;
		comp_req->comn_mid.dst_length =
				comp_req->comp_pars.out_buffer_sz;

		comp_req->comn_mid.src_data_addr =
		    rte_pktmbuf_mtophys_offset(op->m_src, op->src.offset);
		comp_req->comn_mid.dest_data_addr =
		    rte_pktmbuf_mtophys_offset(op->m_dst, op->dst.offset);
	}

#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
	QAT_DP_LOG(DEBUG, "Direction: %s",
	    qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS ?
			    "decompression" : "compression");
	QAT_DP_HEXDUMP_LOG(DEBUG, "qat compression message:", comp_req,
		    sizeof(struct icp_qat_fw_comp_req));
#endif
	return 0;
}
Beispiel #15
0
virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
	struct rte_mbuf **pkts, uint32_t count)
{
	struct vhost_virtqueue *vq;
	struct vring_desc *desc;
	struct rte_mbuf *buff;
	/* The virtio_hdr is initialised to 0. */
	struct virtio_net_hdr_mrg_rxbuf virtio_hdr = {{0, 0, 0, 0, 0, 0}, 0};
	uint64_t buff_addr = 0;
	uint64_t buff_hdr_addr = 0;
	uint32_t head[MAX_PKT_BURST];
	uint32_t head_idx, packet_success = 0;
	uint16_t avail_idx, res_cur_idx;
	uint16_t res_base_idx, res_end_idx;
	uint16_t free_entries;
	uint8_t success = 0;

	LOG_DEBUG(VHOST_DATA, "(%"PRIu64") virtio_dev_rx()\n", dev->device_fh);
	if (unlikely(queue_id != VIRTIO_RXQ)) {
		LOG_DEBUG(VHOST_DATA, "mq isn't supported in this version.\n");
		return 0;
	}

	vq = dev->virtqueue[VIRTIO_RXQ];
	count = (count > MAX_PKT_BURST) ? MAX_PKT_BURST : count;

	/*
	 * As many data cores may want access to available buffers,
	 * they need to be reserved.
	 */
	do {
		res_base_idx = vq->last_used_idx_res;
		avail_idx = *((volatile uint16_t *)&vq->avail->idx);

		free_entries = (avail_idx - res_base_idx);
		/*check that we have enough buffers*/
		if (unlikely(count > free_entries))
			count = free_entries;

		if (count == 0)
			return 0;

		res_end_idx = res_base_idx + count;
		/* vq->last_used_idx_res is atomically updated. */
		/* TODO: Allow to disable cmpset if no concurrency in application. */
		success = rte_atomic16_cmpset(&vq->last_used_idx_res,
				res_base_idx, res_end_idx);
	} while (unlikely(success == 0));
	res_cur_idx = res_base_idx;
	LOG_DEBUG(VHOST_DATA, "(%"PRIu64") Current Index %d| End Index %d\n",
			dev->device_fh, res_cur_idx, res_end_idx);

	/* Prefetch available ring to retrieve indexes. */
	rte_prefetch0(&vq->avail->ring[res_cur_idx & (vq->size - 1)]);

	/* Retrieve all of the head indexes first to avoid caching issues. */
	for (head_idx = 0; head_idx < count; head_idx++)
		head[head_idx] = vq->avail->ring[(res_cur_idx + head_idx) &
					(vq->size - 1)];

	/*Prefetch descriptor index. */
	rte_prefetch0(&vq->desc[head[packet_success]]);

	while (res_cur_idx != res_end_idx) {
		uint32_t offset = 0, vb_offset = 0;
		uint32_t pkt_len, len_to_cpy, data_len, total_copied = 0;
		uint8_t hdr = 0, uncompleted_pkt = 0;

		/* Get descriptor from available ring */
		desc = &vq->desc[head[packet_success]];

		buff = pkts[packet_success];

		/* Convert from gpa to vva (guest physical addr -> vhost virtual addr) */
		buff_addr = gpa_to_vva(dev, desc->addr);
		/* Prefetch buffer address. */
		rte_prefetch0((void *)(uintptr_t)buff_addr);

		/* Copy virtio_hdr to packet and increment buffer address */
		buff_hdr_addr = buff_addr;

		/*
		 * If the descriptors are chained the header and data are
		 * placed in separate buffers.
		 */
		if ((desc->flags & VRING_DESC_F_NEXT) &&
			(desc->len == vq->vhost_hlen)) {
			desc = &vq->desc[desc->next];
			/* Buffer address translation. */
			buff_addr = gpa_to_vva(dev, desc->addr);
		} else {
			vb_offset += vq->vhost_hlen;
			hdr = 1;
		}

		pkt_len = rte_pktmbuf_pkt_len(buff);
		data_len = rte_pktmbuf_data_len(buff);
		len_to_cpy = RTE_MIN(data_len,
			hdr ? desc->len - vq->vhost_hlen : desc->len);
		while (total_copied < pkt_len) {
			/* Copy mbuf data to buffer */
			rte_memcpy((void *)(uintptr_t)(buff_addr + vb_offset),
				(const void *)(rte_pktmbuf_mtod(buff, const char *) + offset),
				len_to_cpy);
			PRINT_PACKET(dev, (uintptr_t)(buff_addr + vb_offset),
				len_to_cpy, 0);

			offset += len_to_cpy;
			vb_offset += len_to_cpy;
			total_copied += len_to_cpy;

			/* The whole packet completes */
			if (total_copied == pkt_len)
				break;

			/* The current segment completes */
			if (offset == data_len) {
				buff = buff->next;
				offset = 0;
				data_len = rte_pktmbuf_data_len(buff);
			}

			/* The current vring descriptor done */
			if (vb_offset == desc->len) {
				if (desc->flags & VRING_DESC_F_NEXT) {
					desc = &vq->desc[desc->next];
					buff_addr = gpa_to_vva(dev, desc->addr);
					vb_offset = 0;
				} else {
					/* Room in vring buffer is not enough */
					uncompleted_pkt = 1;
					break;
				}
			}
			len_to_cpy = RTE_MIN(data_len - offset, desc->len - vb_offset);
		};

		/* Update used ring with desc information */
		vq->used->ring[res_cur_idx & (vq->size - 1)].id =
							head[packet_success];

		/* Drop the packet if it is uncompleted */
		if (unlikely(uncompleted_pkt == 1))
			vq->used->ring[res_cur_idx & (vq->size - 1)].len =
							vq->vhost_hlen;
		else
			vq->used->ring[res_cur_idx & (vq->size - 1)].len =
							pkt_len + vq->vhost_hlen;

		res_cur_idx++;
		packet_success++;

		if (unlikely(uncompleted_pkt == 1))
			continue;

		rte_memcpy((void *)(uintptr_t)buff_hdr_addr,
			(const void *)&virtio_hdr, vq->vhost_hlen);

		PRINT_PACKET(dev, (uintptr_t)buff_hdr_addr, vq->vhost_hlen, 1);

		if (res_cur_idx < res_end_idx) {
			/* Prefetch descriptor index. */
			rte_prefetch0(&vq->desc[head[packet_success]]);
		}
	}

	rte_compiler_barrier();

	/* Wait until it's our turn to add our buffer to the used ring. */
	while (unlikely(vq->last_used_idx != res_base_idx))
		rte_pause();

	*(volatile uint16_t *)&vq->used->idx += count;
	vq->last_used_idx = res_end_idx;

	/* flush used->idx update before we read avail->flags. */
	rte_mb();

	/* Kick the guest if necessary. */
	if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT))
		eventfd_write((int)vq->callfd, 1);
	return count;
}
Beispiel #16
0
uint16_t
fm10k_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
	uint16_t nb_pkts)
{
	struct rte_mbuf *mbuf;
	union fm10k_rx_desc desc;
	struct fm10k_rx_queue *q = rx_queue;
	uint16_t count = 0;
	int alloc = 0;
	uint16_t next_dd;
	int ret;

	next_dd = q->next_dd;

	nb_pkts = RTE_MIN(nb_pkts, q->alloc_thresh);
	for (count = 0; count < nb_pkts; ++count) {
		mbuf = q->sw_ring[next_dd];
		desc = q->hw_ring[next_dd];
		if (!(desc.d.staterr & FM10K_RXD_STATUS_DD))
			break;
#ifdef RTE_LIBRTE_FM10K_DEBUG_RX
		dump_rxd(&desc);
#endif
		rte_pktmbuf_pkt_len(mbuf) = desc.w.length;
		rte_pktmbuf_data_len(mbuf) = desc.w.length;

		mbuf->ol_flags = 0;
#ifdef RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE
		rx_desc_to_ol_flags(mbuf, &desc);
#endif

		mbuf->hash.rss = desc.d.rss;

		rx_pkts[count] = mbuf;
		if (++next_dd == q->nb_desc) {
			next_dd = 0;
			alloc = 1;
		}

		/* Prefetch next mbuf while processing current one. */
		rte_prefetch0(q->sw_ring[next_dd]);

		/*
		 * When next RX descriptor is on a cache-line boundary,
		 * prefetch the next 4 RX descriptors and the next 8 pointers
		 * to mbufs.
		 */
		if ((next_dd & 0x3) == 0) {
			rte_prefetch0(&q->hw_ring[next_dd]);
			rte_prefetch0(&q->sw_ring[next_dd]);
		}
	}

	q->next_dd = next_dd;

	if ((q->next_dd > q->next_trigger) || (alloc == 1)) {
		ret = rte_mempool_get_bulk(q->mp,
					(void **)&q->sw_ring[q->next_alloc],
					q->alloc_thresh);

		if (unlikely(ret != 0)) {
			uint8_t port = q->port_id;
			PMD_RX_LOG(ERR, "Failed to alloc mbuf");
			/*
			 * Need to restore next_dd if we cannot allocate new
			 * buffers to replenish the old ones.
			 */
			q->next_dd = (q->next_dd + q->nb_desc - count) %
								q->nb_desc;
			rte_eth_devices[port].data->rx_mbuf_alloc_failed++;
			return 0;
		}

		for (; q->next_alloc <= q->next_trigger; ++q->next_alloc) {
			mbuf = q->sw_ring[q->next_alloc];

			/* setup static mbuf fields */
			fm10k_pktmbuf_reset(mbuf, q->port_id);

			/* write descriptor */
			desc.q.pkt_addr = MBUF_DMA_ADDR_DEFAULT(mbuf);
			desc.q.hdr_addr = MBUF_DMA_ADDR_DEFAULT(mbuf);
			q->hw_ring[q->next_alloc] = desc;
		}
		FM10K_PCI_REG_WRITE(q->tail_ptr, q->next_trigger);
		q->next_trigger += q->alloc_thresh;
		if (q->next_trigger >= q->nb_desc) {
			q->next_trigger = q->alloc_thresh - 1;
			q->next_alloc = 0;
		}
	}

	return count;
}
Beispiel #17
0
uint16_t
vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
		  uint16_t nb_pkts)
{
	uint16_t nb_tx;
	Vmxnet3_TxDesc *txd = NULL;
	vmxnet3_buf_info_t *tbi = NULL;
	struct vmxnet3_hw *hw;
	struct rte_mbuf *txm;
	vmxnet3_tx_queue_t *txq = tx_queue;

	hw = txq->hw;

	if (unlikely(txq->stopped)) {
		PMD_TX_LOG(DEBUG, "Tx queue is stopped.");
		return 0;
	}

	/* Free up the comp_descriptors aggressively */
	vmxnet3_tq_tx_complete(txq);

	nb_tx = 0;
	while (nb_tx < nb_pkts) {

		if (vmxnet3_cmd_ring_desc_avail(&txq->cmd_ring)) {
			int copy_size = 0;

			txm = tx_pkts[nb_tx];
			/* Don't support scatter packets yet, free them if met */
			if (txm->nb_segs != 1) {
                if (vmxnet3_xmit_convert_callback ){
                    txm=vmxnet3_xmit_convert_callback(txm);
                }else{
                    txq->stats.drop_total++;
                    nb_tx++;
                    rte_pktmbuf_free(txm);
                    continue;
                }
			}

            if (!txm) {
                txq->stats.drop_total++;
                nb_tx++;
                continue;
            }

			/* Needs to minus ether header len */
			if (txm->data_len > (hw->cur_mtu + ETHER_HDR_LEN)) {
				PMD_TX_LOG(DEBUG, "Packet data_len higher than MTU");
				rte_pktmbuf_free(txm);
				txq->stats.drop_total++;
				nb_tx++;
				continue;
			}

			txd = (Vmxnet3_TxDesc *)(txq->cmd_ring.base + txq->cmd_ring.next2fill);
			if (rte_pktmbuf_pkt_len(txm) <= VMXNET3_HDR_COPY_SIZE) {
				struct Vmxnet3_TxDataDesc *tdd;

				tdd = txq->data_ring.base + txq->cmd_ring.next2fill;
				copy_size = rte_pktmbuf_pkt_len(txm);
				rte_memcpy(tdd->data, rte_pktmbuf_mtod(txm, char *), copy_size);
			}

			/* Fill the tx descriptor */
			tbi = txq->cmd_ring.buf_info + txq->cmd_ring.next2fill;
			tbi->bufPA = RTE_MBUF_DATA_DMA_ADDR(txm);
			if (copy_size)
				txd->addr = rte_cpu_to_le_64(txq->data_ring.basePA +
							txq->cmd_ring.next2fill *
							sizeof(struct Vmxnet3_TxDataDesc));
			else
				txd->addr = tbi->bufPA;
			txd->len = txm->data_len;

			/* Mark the last descriptor as End of Packet. */
			txd->cq = 1;
			txd->eop = 1;

			/* Add VLAN tag if requested */
			if (txm->ol_flags & PKT_TX_VLAN_PKT) {
				txd->ti = 1;
				txd->tci = rte_cpu_to_le_16(txm->vlan_tci);
			}

			/* Record current mbuf for freeing it later in tx complete */
#ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER
			VMXNET3_ASSERT(txm);
#endif
			tbi->m = txm;

			/* Set the offloading mode to default */
			txd->hlen = 0;
			txd->om = VMXNET3_OM_NONE;
			txd->msscof = 0;

			/* finally flip the GEN bit of the SOP desc  */
			txd->gen = txq->cmd_ring.gen;
			txq->shared->ctrl.txNumDeferred++;

			/* move to the next2fill descriptor */
			vmxnet3_cmd_ring_adv_next2fill(&txq->cmd_ring);
			nb_tx++;

		} else {
Beispiel #18
0
/*
 * test data manipulation in mbuf
 */
static int
test_one_pktmbuf(void)
{
	struct rte_mbuf *m = NULL;
	char *data, *data2, *hdr;
	unsigned i;

	printf("Test pktmbuf API\n");

	/* alloc a mbuf */

	m = rte_pktmbuf_alloc(pktmbuf_pool);
	if (m == NULL)
		GOTO_FAIL("Cannot allocate mbuf");
	if (rte_pktmbuf_pkt_len(m) != 0)
		GOTO_FAIL("Bad length");

	rte_pktmbuf_dump(m, 0);

	/* append data */

	data = rte_pktmbuf_append(m, MBUF_TEST_DATA_LEN);
	if (data == NULL)
		GOTO_FAIL("Cannot append data");
	if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN)
		GOTO_FAIL("Bad pkt length");
	if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN)
		GOTO_FAIL("Bad data length");
	memset(data, 0x66, rte_pktmbuf_pkt_len(m));
	if (!rte_pktmbuf_is_contiguous(m))
		GOTO_FAIL("Buffer should be continuous");
	rte_pktmbuf_dump(m, MBUF_TEST_DATA_LEN);
	rte_pktmbuf_dump(m, 2*MBUF_TEST_DATA_LEN);

	/* this append should fail */

	data2 = rte_pktmbuf_append(m, (uint16_t)(rte_pktmbuf_tailroom(m) + 1));
	if (data2 != NULL)
		GOTO_FAIL("Append should not succeed");

	/* append some more data */

	data2 = rte_pktmbuf_append(m, MBUF_TEST_DATA_LEN2);
	if (data2 == NULL)
		GOTO_FAIL("Cannot append data");
	if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN + MBUF_TEST_DATA_LEN2)
		GOTO_FAIL("Bad pkt length");
	if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN + MBUF_TEST_DATA_LEN2)
		GOTO_FAIL("Bad data length");
	if (!rte_pktmbuf_is_contiguous(m))
		GOTO_FAIL("Buffer should be continuous");

	/* trim data at the end of mbuf */

	if (rte_pktmbuf_trim(m, MBUF_TEST_DATA_LEN2) < 0)
		GOTO_FAIL("Cannot trim data");
	if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN)
		GOTO_FAIL("Bad pkt length");
	if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN)
		GOTO_FAIL("Bad data length");
	if (!rte_pktmbuf_is_contiguous(m))
		GOTO_FAIL("Buffer should be continuous");

	/* this trim should fail */

	if (rte_pktmbuf_trim(m, (uint16_t)(rte_pktmbuf_data_len(m) + 1)) == 0)
		GOTO_FAIL("trim should not succeed");

	/* prepend one header */

	hdr = rte_pktmbuf_prepend(m, MBUF_TEST_HDR1_LEN);
	if (hdr == NULL)
		GOTO_FAIL("Cannot prepend");
	if (data - hdr != MBUF_TEST_HDR1_LEN)
		GOTO_FAIL("Prepend failed");
	if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN + MBUF_TEST_HDR1_LEN)
		GOTO_FAIL("Bad pkt length");
	if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN + MBUF_TEST_HDR1_LEN)
		GOTO_FAIL("Bad data length");
	if (!rte_pktmbuf_is_contiguous(m))
		GOTO_FAIL("Buffer should be continuous");
	memset(hdr, 0x55, MBUF_TEST_HDR1_LEN);

	/* prepend another header */

	hdr = rte_pktmbuf_prepend(m, MBUF_TEST_HDR2_LEN);
	if (hdr == NULL)
		GOTO_FAIL("Cannot prepend");
	if (data - hdr != MBUF_TEST_ALL_HDRS_LEN)
		GOTO_FAIL("Prepend failed");
	if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN + MBUF_TEST_ALL_HDRS_LEN)
		GOTO_FAIL("Bad pkt length");
	if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN + MBUF_TEST_ALL_HDRS_LEN)
		GOTO_FAIL("Bad data length");
	if (!rte_pktmbuf_is_contiguous(m))
		GOTO_FAIL("Buffer should be continuous");
	memset(hdr, 0x55, MBUF_TEST_HDR2_LEN);

	rte_mbuf_sanity_check(m, RTE_MBUF_PKT, 1);
	rte_mbuf_sanity_check(m, RTE_MBUF_PKT, 0);
	rte_pktmbuf_dump(m, 0);

	/* this prepend should fail */

	hdr = rte_pktmbuf_prepend(m, (uint16_t)(rte_pktmbuf_headroom(m) + 1));
	if (hdr != NULL)
		GOTO_FAIL("prepend should not succeed");

	/* remove data at beginning of mbuf (adj) */

	if (data != rte_pktmbuf_adj(m, MBUF_TEST_ALL_HDRS_LEN))
		GOTO_FAIL("rte_pktmbuf_adj failed");
	if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN)
		GOTO_FAIL("Bad pkt length");
	if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN)
		GOTO_FAIL("Bad data length");
	if (!rte_pktmbuf_is_contiguous(m))
		GOTO_FAIL("Buffer should be continuous");

	/* this adj should fail */

	if (rte_pktmbuf_adj(m, (uint16_t)(rte_pktmbuf_data_len(m) + 1)) != NULL)
		GOTO_FAIL("rte_pktmbuf_adj should not succeed");

	/* check data */

	if (!rte_pktmbuf_is_contiguous(m))
		GOTO_FAIL("Buffer should be continuous");

	for (i=0; i<MBUF_TEST_DATA_LEN; i++) {
		if (data[i] != 0x66)
			GOTO_FAIL("Data corrupted at offset %u", i);
	}

	/* free mbuf */

	rte_pktmbuf_free(m);
	m = NULL;
	return 0;

fail:
	if (m)
		rte_pktmbuf_free(m);
	return -1;
}
dpdk_virtio_dev_to_vm_tx_burst(struct dpdk_virtio_writer *p,
        vr_dpdk_virtioq_t *vq, struct rte_mbuf **pkts, uint32_t count)
{
    struct vring_desc *desc;
    struct rte_mbuf *buff;
    /* The virtio_hdr is initialised to 0. */
    struct virtio_net_hdr_mrg_rxbuf virtio_hdr = {{0, 0, 0, 0, 0, 0}, 0};
    uint64_t buff_addr = 0;
    uint64_t buff_hdr_addr = 0;
    uint32_t head[VR_DPDK_VIRTIO_TX_BURST_SZ];
    uint32_t head_idx, packet_success = 0;
    uint16_t avail_idx, res_cur_idx;
    uint16_t res_base_idx, res_end_idx;
    uint16_t free_entries;
    uint8_t success = 0;
    vr_uvh_client_t *vru_cl;

    if (unlikely(vq->vdv_ready_state == VQ_NOT_READY))
        return 0;

    vru_cl = vr_dpdk_virtio_get_vif_client(vq->vdv_vif_idx);
    if (unlikely(vru_cl == NULL))
        return 0;

    /*
     * As many data cores may want access to available buffers,
     * they need to be reserved.
     */
    do {
        res_base_idx = vq->vdv_last_used_idx_res;
        avail_idx = *((volatile uint16_t *)&vq->vdv_avail->idx);

        free_entries = (avail_idx - res_base_idx);
        /*check that we have enough buffers*/
        if (unlikely(count > free_entries))
            count = free_entries;

        if (unlikely(count == 0))
            return 0;

        res_end_idx = res_base_idx + count;
        /* vq->vdv_last_used_idx_res is atomically updated. */
        /* TODO: Allow to disable cmpset if no concurrency in application. */
        success = rte_atomic16_cmpset(&vq->vdv_last_used_idx_res,
                res_base_idx, res_end_idx);
    } while (unlikely(success == 0));
    res_cur_idx = res_base_idx;
    RTE_LOG(DEBUG, VROUTER, "%s: Current Index %d| End Index %d\n",
            __func__, res_cur_idx, res_end_idx);

    /* Prefetch available ring to retrieve indexes. */
    rte_prefetch0(&vq->vdv_avail->ring[res_cur_idx & (vq->vdv_size - 1)]);

    /* Retrieve all of the head indexes first to avoid caching issues. */
    for (head_idx = 0; head_idx < count; head_idx++)
        head[head_idx] = vq->vdv_avail->ring[(res_cur_idx + head_idx) &
                    (vq->vdv_size - 1)];

    /* Prefetch descriptor index. */
    rte_prefetch0(&vq->vdv_desc[head[packet_success]]);

    while (res_cur_idx != res_end_idx) {
        uint32_t offset = 0, vb_offset = 0;
        uint32_t pkt_len, len_to_cpy, data_len, total_copied = 0;
        uint8_t hdr = 0, uncompleted_pkt = 0;

        /* Get descriptor from available ring */
        desc = &vq->vdv_desc[head[packet_success]];

        buff = pkts[packet_success];

        /* Convert from gpa to vva (guest physical addr -> vhost virtual addr) */
        buff_addr = (uintptr_t)vr_dpdk_guest_phys_to_host_virt(vru_cl, desc->addr);
        /* Prefetch buffer address. */
        rte_prefetch0((void *)(uintptr_t)buff_addr);

        /* Copy virtio_hdr to packet and increment buffer address */
        buff_hdr_addr = buff_addr;

        /*
         * If the descriptors are chained the header and data are
         * placed in separate buffers.
         */
        if (likely(desc->flags & VRING_DESC_F_NEXT)
            && (desc->len == sizeof(struct virtio_net_hdr))) {
            /*
             * TODO: verify that desc->next is sane below.
             */
            desc = &vq->vdv_desc[desc->next];
            /* Buffer address translation. */
            buff_addr = (uintptr_t)vr_dpdk_guest_phys_to_host_virt(vru_cl, desc->addr);
        } else {
            vb_offset += sizeof(struct virtio_net_hdr);
            hdr = 1;
        }

        pkt_len = rte_pktmbuf_pkt_len(buff);
        data_len = rte_pktmbuf_data_len(buff);
        len_to_cpy = RTE_MIN(data_len,
            hdr ? desc->len - sizeof(struct virtio_net_hdr) : desc->len);
        while (total_copied < pkt_len) {
            /* Copy mbuf data to buffer */
            rte_memcpy((void *)(uintptr_t)(buff_addr + vb_offset),
                rte_pktmbuf_mtod_offset(buff, const void *, offset),
                len_to_cpy);

            offset += len_to_cpy;
            vb_offset += len_to_cpy;
            total_copied += len_to_cpy;

            /* The whole packet completes */
            if (likely(total_copied == pkt_len))
                break;

            /* The current segment completes */
            if (offset == data_len) {
                buff = buff->next;
                offset = 0;
                data_len = rte_pktmbuf_data_len(buff);
            }

            /* The current vring descriptor done */
            if (vb_offset == desc->len) {
                if (desc->flags & VRING_DESC_F_NEXT) {
                    desc = &vq->vdv_desc[desc->next];
                    buff_addr = (uintptr_t)vr_dpdk_guest_phys_to_host_virt(vru_cl, desc->addr);
                    vb_offset = 0;
                } else {
                    /* Room in vring buffer is not enough */
                    uncompleted_pkt = 1;
                    break;
                }
            }
            len_to_cpy = RTE_MIN(data_len - offset, desc->len - vb_offset);
        };

        /* Update used ring with desc information */
        vq->vdv_used->ring[res_cur_idx & (vq->vdv_size - 1)].id =
                            head[packet_success];

        /* Drop the packet if it is uncompleted */
        if (unlikely(uncompleted_pkt == 1))
            vq->vdv_used->ring[res_cur_idx & (vq->vdv_size - 1)].len =
                            sizeof(struct virtio_net_hdr);
        else
            vq->vdv_used->ring[res_cur_idx & (vq->vdv_size - 1)].len =
                            pkt_len + sizeof(struct virtio_net_hdr);

        res_cur_idx++;
        packet_success++;

        /* TODO: in DPDK 2.1 we do not copy the header
        if (unlikely(uncompleted_pkt == 1))
            continue;
        */
        rte_memcpy((void *)(uintptr_t)buff_hdr_addr,
            (const void *)&virtio_hdr, sizeof(struct virtio_net_hdr));

        if (likely(res_cur_idx < res_end_idx)) {
            /* Prefetch descriptor index. */
            rte_prefetch0(&vq->vdv_desc[head[packet_success]]);
        }
    }

    rte_compiler_barrier();

    /* Wait until it's our turn to add our buffer to the used ring. */
    while (unlikely(vq->vdv_last_used_idx != res_base_idx))
        rte_pause();

    *(volatile uint16_t *)&vq->vdv_used->idx += count;
    vq->vdv_last_used_idx = res_end_idx;
    RTE_LOG(DEBUG, VROUTER, "%s: vif %d vq %p last_used_idx %d used->idx %d\n",
            __func__, vq->vdv_vif_idx, vq, vq->vdv_last_used_idx, vq->vdv_used->idx);

    /* flush used->idx update before we read avail->flags. */
    rte_mb();

    /* Kick the guest if necessary. */
    if (unlikely(!(vq->vdv_avail->flags & VRING_AVAIL_F_NO_INTERRUPT))) {
        p->nb_syscalls++;
        eventfd_write(vq->vdv_callfd, 1);
    }
    return count;
}
Beispiel #20
0
int do_nf(void *useless)
{
	(void) useless; //XXX: this line suppresses the "unused-parameter" error

	int i;
	unsigned int p;
	mbuf_array_t pkts_received;

	//Init the regex engine
	if(!initializeRegEx(&re_bbc, re_extra_bbc,BBC))
		return 0;

	mbuf_array_t *pkts_to_send = (mbuf_array_t*)malloc(NUM_PORTS * sizeof(mbuf_array_t));
	for(p = 0; p < NUM_PORTS; p++)
		pkts_to_send[p].n_mbufs = 0;

	while(1)
	{
#ifdef ENABLE_SEMAPHORE
		sem_wait(nf_params.semaphore);
#endif

		/*0) Iterates on all the ports */
		for(p = 0; p < NUM_PORTS; p++)
		{
			/*1) Receive incoming packets */

			pkts_received.n_mbufs = rte_ring_sc_dequeue_burst(nf_params.ports[p].to_nf_queue,(void **)&pkts_received.array[0],PKT_TO_NF_THRESHOLD);

			if(likely(pkts_received.n_mbufs > 0))
			{
#ifdef ENABLE_LOG
				fprintf(logFile,"[%s] Received %d pkts on port %d (%s)\n", NAME, pkts_received.n_mbufs,p,nf_params.ports[p].name);
#endif

				for (i=0;i < pkts_received.n_mbufs;i++)
				{
					/*2) Operate on the packet */

					unsigned char *pkt = rte_pktmbuf_mtod(pkts_received.array[i],unsigned char *);
#ifdef ENABLE_LOG
					fprintf(logFile,"[%s] Packet size: %d\n",NAME,rte_pktmbuf_pkt_len(pkts_received.array[i]));
					fprintf(logFile,"[%s] %.2x:%.2x:%.2x:%.2x:%.2x:%.2x -> %.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n",NAME,pkt[6],pkt[7],pkt[8],pkt[9],pkt[10],pkt[11],pkt[0],pkt[1],pkt[2],pkt[3],pkt[4],pkt[5]);
#endif

					/**
					*	If the packet arrives from the first port, check if it must be dropped
					*/
					if(p == 0)
					{
#ifdef ENABLE_LOG
						fprintf(logFile,"[%s] I'm going to check if the packet must be dropped.\n", NAME);
#endif
						if(drop(pkt,rte_pktmbuf_pkt_len(pkts_received.array[i])))
						{
							//The packet must be dropped
#ifdef ENABLE_LOG
 		                                       fprintf(logFile,"[%s] The packet is dropped.\n", NAME);
#endif
							rte_pktmbuf_free(pkts_received.array[i]);
							continue;
						}
					}
					unsigned int output_port = (p+1) % NUM_PORTS;

					pkts_to_send[output_port].array[pkts_to_send[output_port].n_mbufs] = pkts_received.array[i];
					pkts_to_send[output_port].n_mbufs++;
				}//end of iteration on the packets received from the current port
			} //end if(likely(pkts_received.n_mbufs > 0))
		}//end iteration on the ports

		/*3) Send the processed packet not transmitted yet*/
		for(p = 0; p < NUM_PORTS; p++)
		{
			if(likely(pkts_to_send[p].n_mbufs > 0))
			{
#ifdef ENABLE_LOG
				fprintf(logFile,"[%s] Sending %d packets on port %x (%s).\n", NAME,pkts_to_send[p].n_mbufs,p,nf_params.ports[p].name);
#endif
				int ret = rte_ring_sp_enqueue_burst(nf_params.ports[p].to_xdpd_queue,(void *const*)pkts_to_send[p].array,(unsigned)pkts_to_send[p].n_mbufs);

	        	if (unlikely(ret < pkts_to_send[p].n_mbufs))
		        {
		        	fprintf(logFile,"[%s] Not enough room in port %d towards xDPD to enqueue; the packet will be dropped.\n", NAME,p);
					do {
						struct rte_mbuf *pkt_to_free = pkts_to_send[p].array[ret];
						rte_pktmbuf_free(pkt_to_free);
					} while (++ret < pkts_to_send[p].n_mbufs);
				}
			}
			pkts_to_send[p].n_mbufs = 0;
		}/* End of iteration on the ports */

	}/*End of while true*/
Beispiel #21
0
void app_main_loop_rx_flow(void)
{
	const unsigned lcore_id = rte_lcore_id();
	struct rte_mbuf *bufs[RX_BURST_SIZE];
	struct rte_mbuf *buf;
	struct ether_hdr *eth_hdr;
	struct ipv4_hdr *ipv4_hdr;
	struct ipv6_hdr *ipv6_hdr;
	struct tcp_hdr *tcp_hdr;
	struct udp_hdr *udp_hdr;
	struct pkt_info pktinfo;
	int32_t ret;
	uint16_t i, n_rx, queueid;
	uint8_t port;

	port = 0;
	queueid = (uint16_t) app.lcore_conf[lcore_id].queue_id;
	RTE_LOG(INFO, FLOWATCHER, "[core %u] packet RX & update flow_table Ready\n", lcore_id);

	while (!app_quit_signal) {

		n_rx = rte_eth_rx_burst(port, queueid, bufs, RX_BURST_SIZE);
		if (unlikely(n_rx == 0)) {
			port++;
			if (port >= app.n_ports)
				port = 0;
			continue;
		}
		app_stat[queueid].rx_count += n_rx;

		for (i = 0; i < n_rx; i++) {
			buf = bufs[i];

			pktinfo.timestamp = rte_rdtsc();
			pktinfo.pktlen = rte_pktmbuf_pkt_len(buf);

			eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *);

			/* strip vlan_hdr */
			if (eth_hdr->ether_type == rte_cpu_to_be_16(ETHER_TYPE_VLAN)) {
				/* struct vlan_hdr *vh = (struct vlan_hdr *) &eth_hdr[1]; */
				/* buf->ol_flags |= PKT_RX_VLAN_PKT; */
				/* buf->vlan_tci = rte_be_to_cpu_16(vh->vlan_tci); */
				/* memmove(rte_pktmbuf_adj(buf, sizeof(struct vlan_hdr)), */
				/* 		eth_hdr, 2 * ETHER_ADDR_LEN); */
				/* eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *); */
				eth_hdr = (struct ether_hdr *) rte_pktmbuf_adj(buf, sizeof(struct vlan_hdr));
			}

			if (eth_hdr->ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) {
				/* IPv4 */
				pktinfo.type = PKT_IP_TYPE_IPV4;
				ipv4_hdr = (struct ipv4_hdr *) &eth_hdr[1];

				pktinfo.key.v4.src_ip = rte_be_to_cpu_32(ipv4_hdr->src_addr);
				pktinfo.key.v4.dst_ip = rte_be_to_cpu_32(ipv4_hdr->dst_addr);
				pktinfo.key.v4.proto = ipv4_hdr->next_proto_id;

				switch (ipv4_hdr->next_proto_id) {
					case IPPROTO_TCP:
						tcp_hdr = (struct tcp_hdr *) &ipv4_hdr[1];
						pktinfo.key.v4.src_port = rte_be_to_cpu_16(tcp_hdr->src_port);
						pktinfo.key.v4.dst_port = rte_be_to_cpu_16(tcp_hdr->dst_port);
						break;
					case IPPROTO_UDP:
						udp_hdr = (struct udp_hdr *) &ipv4_hdr[1];
						pktinfo.key.v4.src_port = rte_be_to_cpu_16(udp_hdr->src_port);
						pktinfo.key.v4.dst_port = rte_be_to_cpu_16(udp_hdr->dst_port);
						break;
					default:
						pktinfo.key.v4.src_port = 0;
						pktinfo.key.v4.dst_port = 0;
						break;
				}

				rte_pktmbuf_free(buf);

				/* update flow_table_v4 */
				ret = update_flow_entry(app.flow_table_v4[queueid], &pktinfo);
				if (ret == 0)
					app_stat[queueid].updated_tbl_v4_count++;
				else
					app_stat[queueid].miss_updated_tbl_v4_count++;

			} else if (eth_hdr->ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv6)) {
				/* IPv6 */
				pktinfo.type = PKT_IP_TYPE_IPV6;
				ipv6_hdr = (struct ipv6_hdr *) &eth_hdr[1];

				rte_memcpy(pktinfo.key.v6.src_ip, ipv6_hdr->src_addr, 16);
				rte_memcpy(pktinfo.key.v6.dst_ip, ipv6_hdr->dst_addr, 16);
				pktinfo.key.v6.proto = ipv6_hdr->proto;

				switch (ipv6_hdr->proto) {
					case IPPROTO_TCP:
						tcp_hdr = (struct tcp_hdr *) &ipv6_hdr[1];
						pktinfo.key.v6.src_port = rte_be_to_cpu_16(tcp_hdr->src_port);
						pktinfo.key.v6.dst_port = rte_be_to_cpu_16(tcp_hdr->dst_port);
						break;
					case IPPROTO_UDP:
						udp_hdr = (struct udp_hdr *) &ipv6_hdr[1];
						pktinfo.key.v6.src_port = rte_be_to_cpu_16(udp_hdr->src_port);
						pktinfo.key.v6.dst_port = rte_be_to_cpu_16(udp_hdr->dst_port);
						break;
					default:
						pktinfo.key.v6.src_port = 0;
						pktinfo.key.v6.dst_port = 0;
						break;
				}

				rte_pktmbuf_free(buf);

				/* update flow_table_v6 */
				ret = update_flow_entry(app.flow_table_v6[queueid], &pktinfo);
				if (ret == 0)
					app_stat[queueid].updated_tbl_v6_count++;
				else
					app_stat[queueid].miss_updated_tbl_v6_count++;

			} else {
				/* others */
				app_stat[queueid].unknown_pkt_count++;
				rte_pktmbuf_free(buf);
				continue;
			}
		}

		port++;
		if (port >= app.n_ports)
			port = 0;
	}

	RTE_LOG(INFO, FLOWATCHER, "[core %u] packet RX & update flow_table finished\n", lcore_id);
}