Beispiel #1
0
rofl_result_t pop_datapacket_offset(datapacket_dpdk_t *dpkt, unsigned int offset, unsigned int num_of_bytes){
	
	uint8_t *src_ptr = get_buffer_dpdk(dpkt);
	uint8_t *dst_ptr = (uint8_t*)rte_pktmbuf_adj(dpkt->mbuf, num_of_bytes);
	//NOTE dst_ptr = src_ptr + num_of_bytes
	
	if( NULL==dst_ptr )
		return ROFL_FAILURE;

	if(false==rte_pktmbuf_is_contiguous(dpkt->mbuf)){
		assert(0);
		return ROFL_FAILURE;
	}
	
	// move first bytes backward
	memmove(dst_ptr, src_ptr, offset);
	
#ifndef NDEBUG
	// set now unused bytes to 0x00 for easier debugging
	memset(src_ptr, 0x00, num_of_bytes);
#endif

	dpkt->clas_state.base = get_buffer_dpdk(dpkt);
	dpkt->clas_state.len = get_buffer_length_dpdk(dpkt);

	return ROFL_SUCCESS;
}
Beispiel #2
0
/*
 * Upper layer processing for a received Ethernet packet.
 */
void
ether_demux(struct ifnet *ifp, struct rte_mbuf *m)
{
	struct ether_hdr *eh;
	int i, isr;
	u_short ether_type;

	//KASSERT(ifp != NULL, ("%s: NULL interface pointer", __func__));

	eh = rte_pktmbuf_mtod(m, struct ether_hdr *);
	ether_type = rte_be_to_cpu_16(eh->ether_type);

	rte_pktmbuf_adj(m, ETHER_HDR_LEN);

	/*
	 * Dispatch frame to upper layer.
	 */
	switch (ether_type) {
	case ETHER_TYPE_IPv4:
		isr = NETISR_IP;
		break;

	case ETHER_TYPE_ARP:
		isr = NETISR_ARP;
		break;
#ifdef INET6
	case ETHER_TYPE_IPv6:
		isr = NETISR_IPV6;
		break;
#endif
	default:
		goto discard;
	}
	netisr_dispatch(isr, m);
	return;

discard:
	/*
	 * Packet is to be discarded.  If netgraph is present,
	 * hand the packet to it for last chance processing;
	 * otherwise dispose of it.
	 */
	rte_pktmbuf_free(m);
}
Beispiel #3
0
/*
 * Parse message from vswitchd and send to appropriate handler
 */
static void
handle_vswitchd_cmd(struct rte_mbuf *mbuf)
{
	struct dpdk_message *request = NULL;

	request = rte_pktmbuf_mtod(mbuf, struct dpdk_message *);

	switch (request->type) {
	case FLOW_CMD_FAMILY:
		handle_flow_cmd(&request->flow_msg);
		rte_pktmbuf_free(mbuf);
		break;
	case PACKET_CMD_FAMILY:
		rte_pktmbuf_adj(mbuf, sizeof(*request));
		handle_packet_cmd(&request->packet_msg, mbuf);
		break;
	default:
		handle_unknown_cmd();
		rte_pktmbuf_free(mbuf);
	}
}
Beispiel #4
0
/*
 * Reassemble fragments into one packet.
 */
struct rte_mbuf *
ipv6_frag_reassemble(const struct ip_frag_pkt *fp)
{
	struct ipv6_hdr *ip_hdr;
	struct ipv6_extension_fragment *frag_hdr;
	struct rte_mbuf *m, *prev;
	uint32_t i, n, ofs, first_len;
	uint32_t last_len, move_len, payload_len;

	first_len = fp->frags[IP_FIRST_FRAG_IDX].len;
	n = fp->last_idx - 1;

	/*start from the last fragment. */
	m = fp->frags[IP_LAST_FRAG_IDX].mb;
	ofs = fp->frags[IP_LAST_FRAG_IDX].ofs;
	last_len = fp->frags[IP_LAST_FRAG_IDX].len;

	payload_len = ofs + last_len;

	while (ofs != first_len) {

		prev = m;

		for (i = n; i != IP_FIRST_FRAG_IDX && ofs != first_len; i--) {

			/* previous fragment found. */
			if (fp->frags[i].ofs + fp->frags[i].len == ofs) {

				ip_frag_chain(fp->frags[i].mb, m);

				/* update our last fragment and offset. */
				m = fp->frags[i].mb;
				ofs = fp->frags[i].ofs;
			}
		}

		/* error - hole in the packet. */
		if (m == prev) {
			return NULL;
		}
	}

	/* chain with the first fragment. */
	ip_frag_chain(fp->frags[IP_FIRST_FRAG_IDX].mb, m);
	m = fp->frags[IP_FIRST_FRAG_IDX].mb;

	/* update mbuf fields for reassembled packet. */
	m->ol_flags |= PKT_TX_IP_CKSUM;

	/* update ipv6 header for the reassembled datagram */
	ip_hdr = rte_pktmbuf_mtod_offset(m, struct ipv6_hdr *, m->l2_len);

	ip_hdr->payload_len = rte_cpu_to_be_16(payload_len);

	/*
	 * remove fragmentation header. note that per RFC2460, we need to update
	 * the last non-fragmentable header with the "next header" field to contain
	 * type of the first fragmentable header, but we currently don't support
	 * other headers, so we assume there are no other headers and thus update
	 * the main IPv6 header instead.
	 */
	move_len = m->l2_len + m->l3_len - sizeof(*frag_hdr);
	frag_hdr = (struct ipv6_extension_fragment *) (ip_hdr + 1);
	ip_hdr->proto = frag_hdr->next_header;

	ip_frag_memmove(rte_pktmbuf_mtod_offset(m, char *, sizeof(*frag_hdr)),
			rte_pktmbuf_mtod(m, char*), move_len);

	rte_pktmbuf_adj(m, sizeof(*frag_hdr));

	return m;
}
Beispiel #5
0
/*
 * Reassemble fragments into one packet.
 */
struct rte_mbuf *
ipv4_frag_reassemble(struct ip_frag_pkt *fp)
{
	struct ipv4_hdr *ip_hdr;
	struct rte_mbuf *m, *prev;
	uint32_t i, n, ofs, first_len;
	uint32_t curr_idx = 0;

	first_len = fp->frags[IP_FIRST_FRAG_IDX].len;
	n = fp->last_idx - 1;

	/*start from the last fragment. */
	m = fp->frags[IP_LAST_FRAG_IDX].mb;
	ofs = fp->frags[IP_LAST_FRAG_IDX].ofs;
	curr_idx = IP_LAST_FRAG_IDX;

	while (ofs != first_len) {

		prev = m;

		for (i = n; i != IP_FIRST_FRAG_IDX && ofs != first_len; i--) {

			/* previous fragment found. */
			if(fp->frags[i].ofs + fp->frags[i].len == ofs) {

				/* adjust start of the last fragment data. */
				rte_pktmbuf_adj(m, (uint16_t)(m->l2_len + m->l3_len));
				rte_pktmbuf_chain(fp->frags[i].mb, m);

				/* this mbuf should not be accessed directly */
				fp->frags[curr_idx].mb = NULL;
				curr_idx = i;

				/* update our last fragment and offset. */
				m = fp->frags[i].mb;
				ofs = fp->frags[i].ofs;
			}
		}

		/* error - hole in the packet. */
		if (m == prev) {
			return NULL;
		}
	}

	/* chain with the first fragment. */
	rte_pktmbuf_adj(m, (uint16_t)(m->l2_len + m->l3_len));
	rte_pktmbuf_chain(fp->frags[IP_FIRST_FRAG_IDX].mb, m);
	m = fp->frags[IP_FIRST_FRAG_IDX].mb;

	/* update mbuf fields for reassembled packet. */
	m->ol_flags |= PKT_TX_IP_CKSUM;

	/* update ipv4 header for the reassmebled packet */
	ip_hdr = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *, m->l2_len);

	ip_hdr->total_length = rte_cpu_to_be_16((uint16_t)(fp->total_size +
		m->l3_len));
	ip_hdr->fragment_offset = (uint16_t)(ip_hdr->fragment_offset &
		rte_cpu_to_be_16(IPV4_HDR_DF_FLAG));
	ip_hdr->hdr_checksum = 0;

	return m;
}
Beispiel #6
0
char *rte_pktmbuf_adj_export(struct rte_mbuf *m, uint16_t len) {
	return rte_pktmbuf_adj(m, len);
}
Beispiel #7
0
/*
 * test data manipulation in mbuf
 */
static int
test_one_pktmbuf(void)
{
	struct rte_mbuf *m = NULL;
	char *data, *data2, *hdr;
	unsigned i;

	printf("Test pktmbuf API\n");

	/* alloc a mbuf */

	m = rte_pktmbuf_alloc(pktmbuf_pool);
	if (m == NULL)
		GOTO_FAIL("Cannot allocate mbuf");
	if (rte_pktmbuf_pkt_len(m) != 0)
		GOTO_FAIL("Bad length");

	rte_pktmbuf_dump(m, 0);

	/* append data */

	data = rte_pktmbuf_append(m, MBUF_TEST_DATA_LEN);
	if (data == NULL)
		GOTO_FAIL("Cannot append data");
	if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN)
		GOTO_FAIL("Bad pkt length");
	if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN)
		GOTO_FAIL("Bad data length");
	memset(data, 0x66, rte_pktmbuf_pkt_len(m));
	if (!rte_pktmbuf_is_contiguous(m))
		GOTO_FAIL("Buffer should be continuous");
	rte_pktmbuf_dump(m, MBUF_TEST_DATA_LEN);
	rte_pktmbuf_dump(m, 2*MBUF_TEST_DATA_LEN);

	/* this append should fail */

	data2 = rte_pktmbuf_append(m, (uint16_t)(rte_pktmbuf_tailroom(m) + 1));
	if (data2 != NULL)
		GOTO_FAIL("Append should not succeed");

	/* append some more data */

	data2 = rte_pktmbuf_append(m, MBUF_TEST_DATA_LEN2);
	if (data2 == NULL)
		GOTO_FAIL("Cannot append data");
	if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN + MBUF_TEST_DATA_LEN2)
		GOTO_FAIL("Bad pkt length");
	if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN + MBUF_TEST_DATA_LEN2)
		GOTO_FAIL("Bad data length");
	if (!rte_pktmbuf_is_contiguous(m))
		GOTO_FAIL("Buffer should be continuous");

	/* trim data at the end of mbuf */

	if (rte_pktmbuf_trim(m, MBUF_TEST_DATA_LEN2) < 0)
		GOTO_FAIL("Cannot trim data");
	if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN)
		GOTO_FAIL("Bad pkt length");
	if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN)
		GOTO_FAIL("Bad data length");
	if (!rte_pktmbuf_is_contiguous(m))
		GOTO_FAIL("Buffer should be continuous");

	/* this trim should fail */

	if (rte_pktmbuf_trim(m, (uint16_t)(rte_pktmbuf_data_len(m) + 1)) == 0)
		GOTO_FAIL("trim should not succeed");

	/* prepend one header */

	hdr = rte_pktmbuf_prepend(m, MBUF_TEST_HDR1_LEN);
	if (hdr == NULL)
		GOTO_FAIL("Cannot prepend");
	if (data - hdr != MBUF_TEST_HDR1_LEN)
		GOTO_FAIL("Prepend failed");
	if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN + MBUF_TEST_HDR1_LEN)
		GOTO_FAIL("Bad pkt length");
	if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN + MBUF_TEST_HDR1_LEN)
		GOTO_FAIL("Bad data length");
	if (!rte_pktmbuf_is_contiguous(m))
		GOTO_FAIL("Buffer should be continuous");
	memset(hdr, 0x55, MBUF_TEST_HDR1_LEN);

	/* prepend another header */

	hdr = rte_pktmbuf_prepend(m, MBUF_TEST_HDR2_LEN);
	if (hdr == NULL)
		GOTO_FAIL("Cannot prepend");
	if (data - hdr != MBUF_TEST_ALL_HDRS_LEN)
		GOTO_FAIL("Prepend failed");
	if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN + MBUF_TEST_ALL_HDRS_LEN)
		GOTO_FAIL("Bad pkt length");
	if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN + MBUF_TEST_ALL_HDRS_LEN)
		GOTO_FAIL("Bad data length");
	if (!rte_pktmbuf_is_contiguous(m))
		GOTO_FAIL("Buffer should be continuous");
	memset(hdr, 0x55, MBUF_TEST_HDR2_LEN);

	rte_mbuf_sanity_check(m, RTE_MBUF_PKT, 1);
	rte_mbuf_sanity_check(m, RTE_MBUF_PKT, 0);
	rte_pktmbuf_dump(m, 0);

	/* this prepend should fail */

	hdr = rte_pktmbuf_prepend(m, (uint16_t)(rte_pktmbuf_headroom(m) + 1));
	if (hdr != NULL)
		GOTO_FAIL("prepend should not succeed");

	/* remove data at beginning of mbuf (adj) */

	if (data != rte_pktmbuf_adj(m, MBUF_TEST_ALL_HDRS_LEN))
		GOTO_FAIL("rte_pktmbuf_adj failed");
	if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN)
		GOTO_FAIL("Bad pkt length");
	if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN)
		GOTO_FAIL("Bad data length");
	if (!rte_pktmbuf_is_contiguous(m))
		GOTO_FAIL("Buffer should be continuous");

	/* this adj should fail */

	if (rte_pktmbuf_adj(m, (uint16_t)(rte_pktmbuf_data_len(m) + 1)) != NULL)
		GOTO_FAIL("rte_pktmbuf_adj should not succeed");

	/* check data */

	if (!rte_pktmbuf_is_contiguous(m))
		GOTO_FAIL("Buffer should be continuous");

	for (i=0; i<MBUF_TEST_DATA_LEN; i++) {
		if (data[i] != 0x66)
			GOTO_FAIL("Data corrupted at offset %u", i);
	}

	/* free mbuf */

	rte_pktmbuf_free(m);
	m = NULL;
	return 0;

fail:
	if (m)
		rte_pktmbuf_free(m);
	return -1;
}
Beispiel #8
0
void app_main_loop_rx_flow(void)
{
	const unsigned lcore_id = rte_lcore_id();
	struct rte_mbuf *bufs[RX_BURST_SIZE];
	struct rte_mbuf *buf;
	struct ether_hdr *eth_hdr;
	struct ipv4_hdr *ipv4_hdr;
	struct ipv6_hdr *ipv6_hdr;
	struct tcp_hdr *tcp_hdr;
	struct udp_hdr *udp_hdr;
	struct pkt_info pktinfo;
	int32_t ret;
	uint16_t i, n_rx, queueid;
	uint8_t port;

	port = 0;
	queueid = (uint16_t) app.lcore_conf[lcore_id].queue_id;
	RTE_LOG(INFO, FLOWATCHER, "[core %u] packet RX & update flow_table Ready\n", lcore_id);

	while (!app_quit_signal) {

		n_rx = rte_eth_rx_burst(port, queueid, bufs, RX_BURST_SIZE);
		if (unlikely(n_rx == 0)) {
			port++;
			if (port >= app.n_ports)
				port = 0;
			continue;
		}
		app_stat[queueid].rx_count += n_rx;

		for (i = 0; i < n_rx; i++) {
			buf = bufs[i];

			pktinfo.timestamp = rte_rdtsc();
			pktinfo.pktlen = rte_pktmbuf_pkt_len(buf);

			eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *);

			/* strip vlan_hdr */
			if (eth_hdr->ether_type == rte_cpu_to_be_16(ETHER_TYPE_VLAN)) {
				/* struct vlan_hdr *vh = (struct vlan_hdr *) &eth_hdr[1]; */
				/* buf->ol_flags |= PKT_RX_VLAN_PKT; */
				/* buf->vlan_tci = rte_be_to_cpu_16(vh->vlan_tci); */
				/* memmove(rte_pktmbuf_adj(buf, sizeof(struct vlan_hdr)), */
				/* 		eth_hdr, 2 * ETHER_ADDR_LEN); */
				/* eth_hdr = rte_pktmbuf_mtod(buf, struct ether_hdr *); */
				eth_hdr = (struct ether_hdr *) rte_pktmbuf_adj(buf, sizeof(struct vlan_hdr));
			}

			if (eth_hdr->ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) {
				/* IPv4 */
				pktinfo.type = PKT_IP_TYPE_IPV4;
				ipv4_hdr = (struct ipv4_hdr *) &eth_hdr[1];

				pktinfo.key.v4.src_ip = rte_be_to_cpu_32(ipv4_hdr->src_addr);
				pktinfo.key.v4.dst_ip = rte_be_to_cpu_32(ipv4_hdr->dst_addr);
				pktinfo.key.v4.proto = ipv4_hdr->next_proto_id;

				switch (ipv4_hdr->next_proto_id) {
					case IPPROTO_TCP:
						tcp_hdr = (struct tcp_hdr *) &ipv4_hdr[1];
						pktinfo.key.v4.src_port = rte_be_to_cpu_16(tcp_hdr->src_port);
						pktinfo.key.v4.dst_port = rte_be_to_cpu_16(tcp_hdr->dst_port);
						break;
					case IPPROTO_UDP:
						udp_hdr = (struct udp_hdr *) &ipv4_hdr[1];
						pktinfo.key.v4.src_port = rte_be_to_cpu_16(udp_hdr->src_port);
						pktinfo.key.v4.dst_port = rte_be_to_cpu_16(udp_hdr->dst_port);
						break;
					default:
						pktinfo.key.v4.src_port = 0;
						pktinfo.key.v4.dst_port = 0;
						break;
				}

				rte_pktmbuf_free(buf);

				/* update flow_table_v4 */
				ret = update_flow_entry(app.flow_table_v4[queueid], &pktinfo);
				if (ret == 0)
					app_stat[queueid].updated_tbl_v4_count++;
				else
					app_stat[queueid].miss_updated_tbl_v4_count++;

			} else if (eth_hdr->ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv6)) {
				/* IPv6 */
				pktinfo.type = PKT_IP_TYPE_IPV6;
				ipv6_hdr = (struct ipv6_hdr *) &eth_hdr[1];

				rte_memcpy(pktinfo.key.v6.src_ip, ipv6_hdr->src_addr, 16);
				rte_memcpy(pktinfo.key.v6.dst_ip, ipv6_hdr->dst_addr, 16);
				pktinfo.key.v6.proto = ipv6_hdr->proto;

				switch (ipv6_hdr->proto) {
					case IPPROTO_TCP:
						tcp_hdr = (struct tcp_hdr *) &ipv6_hdr[1];
						pktinfo.key.v6.src_port = rte_be_to_cpu_16(tcp_hdr->src_port);
						pktinfo.key.v6.dst_port = rte_be_to_cpu_16(tcp_hdr->dst_port);
						break;
					case IPPROTO_UDP:
						udp_hdr = (struct udp_hdr *) &ipv6_hdr[1];
						pktinfo.key.v6.src_port = rte_be_to_cpu_16(udp_hdr->src_port);
						pktinfo.key.v6.dst_port = rte_be_to_cpu_16(udp_hdr->dst_port);
						break;
					default:
						pktinfo.key.v6.src_port = 0;
						pktinfo.key.v6.dst_port = 0;
						break;
				}

				rte_pktmbuf_free(buf);

				/* update flow_table_v6 */
				ret = update_flow_entry(app.flow_table_v6[queueid], &pktinfo);
				if (ret == 0)
					app_stat[queueid].updated_tbl_v6_count++;
				else
					app_stat[queueid].miss_updated_tbl_v6_count++;

			} else {
				/* others */
				app_stat[queueid].unknown_pkt_count++;
				rte_pktmbuf_free(buf);
				continue;
			}
		}

		port++;
		if (port >= app.n_ports)
			port = 0;
	}

	RTE_LOG(INFO, FLOWATCHER, "[core %u] packet RX & update flow_table finished\n", lcore_id);
}
Beispiel #9
0
static int
paxos_rx_process(struct rte_mbuf *pkt, struct proposer* proposer)
{
    int ret = 0;
    uint8_t l4_proto = 0;
    uint16_t outer_header_len;
    union tunnel_offload_info info = { .data = 0 };
    struct udp_hdr *udp_hdr;
    struct paxos_hdr *paxos_hdr;
    struct ether_hdr *phdr = rte_pktmbuf_mtod(pkt, struct ether_hdr *);

    parse_ethernet(phdr, &info, &l4_proto);

    if (l4_proto != IPPROTO_UDP)
        return -1;

    udp_hdr = (struct udp_hdr *)((char *)phdr +
                                 info.outer_l2_len + info.outer_l3_len);

    /* if UDP dst port is not either PROPOSER or LEARNER port */
    if (!(udp_hdr->dst_port == rte_cpu_to_be_16(PROPOSER_PORT) ||
            udp_hdr->dst_port == rte_cpu_to_be_16(LEARNER_PORT)) &&
            (pkt->packet_type & RTE_PTYPE_TUNNEL_MASK) == 0)
        return -1;

    paxos_hdr = (struct paxos_hdr *)((char *)udp_hdr + sizeof(struct udp_hdr));

    if (rte_get_log_level() == RTE_LOG_DEBUG) {
        //rte_hexdump(stdout, "udp", udp_hdr, sizeof(struct udp_hdr));
        //rte_hexdump(stdout, "paxos", paxos_hdr, sizeof(struct paxos_hdr));
        print_paxos_hdr(paxos_hdr);
    }

    int value_len = rte_be_to_cpu_16(paxos_hdr->value_len);
    struct paxos_value *v = paxos_value_new((char *)paxos_hdr->paxosval, value_len);
    switch(rte_be_to_cpu_16(paxos_hdr->msgtype)) {
    case PAXOS_PROMISE: {
        struct paxos_promise promise = {
            .iid = rte_be_to_cpu_32(paxos_hdr->inst),
            .ballot = rte_be_to_cpu_16(paxos_hdr->rnd),
            .value_ballot = rte_be_to_cpu_16(paxos_hdr->vrnd),
            .aid = rte_be_to_cpu_16(paxos_hdr->acptid),
            .value = *v
        };
        proposer_handle_promise(proposer, &promise);
        break;
    }
    case PAXOS_ACCEPT: {
        if (first_time) {
            proposer_preexecute(proposer);
            first_time = false;
        }
        struct paxos_accept acpt = {
            .iid = rte_be_to_cpu_32(paxos_hdr->inst),
            .ballot = rte_be_to_cpu_16(paxos_hdr->rnd),
            .value_ballot = rte_be_to_cpu_16(paxos_hdr->vrnd),
            .aid = rte_be_to_cpu_16(paxos_hdr->acptid),
            .value = *v
        };
        proposer_handle_accept(proposer, &acpt);
        break;
    }
    case PAXOS_ACCEPTED: {
        struct paxos_accepted ack = {
            .iid = rte_be_to_cpu_32(paxos_hdr->inst),
            .ballot = rte_be_to_cpu_16(paxos_hdr->rnd),
            .value_ballot = rte_be_to_cpu_16(paxos_hdr->vrnd),
            .aid = rte_be_to_cpu_16(paxos_hdr->acptid),
            .value = *v
        };
        proposer_handle_accepted(proposer, &ack);
        break;
    }
    default:
        break;
    }
    outer_header_len = info.outer_l2_len + info.outer_l3_len
                       + sizeof(struct udp_hdr) + sizeof(struct paxos_hdr);

    rte_pktmbuf_adj(pkt, outer_header_len);

    return ret;

}

static uint16_t
add_timestamps(uint8_t port __rte_unused, uint16_t qidx __rte_unused,
               struct rte_mbuf **pkts, uint16_t nb_pkts,
               uint16_t max_pkts __rte_unused, void *user_param)
{
    struct proposer* proposer = (struct proposer *)user_param;
    unsigned i;
    uint64_t now = rte_rdtsc();

    for (i = 0; i < nb_pkts; i++) {
        pkts[i]->udata64 = now;
        paxos_rx_process(pkts[i], proposer);
    }
    return nb_pkts;
}


static inline int
port_init(uint8_t port, struct rte_mempool *mbuf_pool, struct proposer* proposer)
{
    struct rte_eth_dev_info dev_info;
    struct rte_eth_txconf *txconf;
    struct rte_eth_rxconf *rxconf;
    struct rte_eth_conf port_conf = port_conf_default;
    const uint16_t rx_rings = 1, tx_rings = 1;
    int retval;
    uint16_t q;

    rte_eth_dev_info_get(port, &dev_info);

    rxconf = &dev_info.default_rxconf;
    txconf = &dev_info.default_txconf;

    txconf->txq_flags &= PKT_TX_IPV4;
    txconf->txq_flags &= PKT_TX_UDP_CKSUM;
    if (port >= rte_eth_dev_count())
        return -1;

    retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
    if (retval != 0)
        return retval;

    for (q = 0; q < rx_rings; q++) {
        retval = rte_eth_rx_queue_setup(port, q, RX_RING_SIZE,
                                        rte_eth_dev_socket_id(port), rxconf, mbuf_pool);
        if (retval < 0)
            return retval;
    }

    for (q = 0; q < tx_rings; q++) {
        retval = rte_eth_tx_queue_setup(port, q, TX_RING_SIZE,
                                        rte_eth_dev_socket_id(port), txconf);
        if (retval < 0)
            return retval;
    }

    retval = rte_eth_dev_start(port);
    if (retval < 0)
        return retval;

    struct ether_addr addr;
    rte_eth_macaddr_get(port, &addr);
    rte_eth_promiscuous_enable(port);

    rte_eth_add_rx_callback(port, 0, add_timestamps, proposer);
    rte_eth_add_tx_callback(port, 0, calc_latency, NULL);
    return 0;
}


static void
lcore_main(uint8_t port, __rte_unused struct proposer *p)
{
    proposer_preexecute(p);

    for (;;) {
        // Check if signal is received
        if (force_quit)
            break;
        struct rte_mbuf *bufs[BURST_SIZE];
        const uint16_t nb_rx = rte_eth_rx_burst(port, 0, bufs, BURST_SIZE);
        if (unlikely(nb_rx == 0))
            continue;
        uint16_t buf;
        for (buf = 0; buf < nb_rx; buf++)
            rte_pktmbuf_free(bufs[buf]);
    }
}



static __attribute__((noreturn)) int
lcore_mainloop(__attribute__((unused)) void *arg)
{
    uint64_t prev_tsc = 0, cur_tsc, diff_tsc;
    unsigned lcore_id;

    lcore_id = rte_lcore_id();

    rte_log(RTE_LOG_DEBUG, RTE_LOGTYPE_TIMER,
            "Starting mainloop on core %u\n", lcore_id);

    while(1) {
        cur_tsc = rte_rdtsc();
        diff_tsc = cur_tsc - prev_tsc;
        if (diff_tsc > TIMER_RESOLUTION_CYCLES) {
            rte_timer_manage();
            prev_tsc = cur_tsc;
        }
    }
}

static void
report_stat(struct rte_timer *tim, __attribute((unused)) void *arg)
{
    /* print stat */
    uint32_t count = rte_atomic32_read(&stat);
    rte_log(RTE_LOG_INFO, RTE_LOGTYPE_USER8,
            "Throughput = %8u msg/s\n", count);
    /* reset stat */
    rte_atomic32_set(&stat, 0);
    /* this timer is automatically reloaded until we decide to stop it */
    if (force_quit)
        rte_timer_stop(tim);
}


static void
check_timeout(struct rte_timer *tim, void *arg)
{
    struct proposer* p = (struct proposer *) arg;
    unsigned lcore_id = rte_lcore_id();

    rte_log(RTE_LOG_DEBUG, RTE_LOGTYPE_USER8, "%s() on lcore_id %i\n", __func__, lcore_id);

    struct paxos_message out;
    out.type = PAXOS_PREPARE;
    struct timeout_iterator* iter = proposer_timeout_iterator(p);
    while(timeout_iterator_prepare(iter, &out.u.prepare)) {
        rte_log(RTE_LOG_DEBUG, RTE_LOGTYPE_USER8,
                "%s Send PREPARE inst %d ballot %d\n",
                __func__, out.u.prepare.iid, out.u.prepare.ballot);
        send_paxos_message(&out);
    }
    out.type = PAXOS_ACCEPT;
    while(timeout_iterator_accept(iter, &out.u.accept)) {
        rte_log(RTE_LOG_DEBUG, RTE_LOGTYPE_USER8,
                "%s: Send ACCEPT inst %d ballot %d\n",
                __func__, out.u.prepare.iid, out.u.prepare.ballot);
        send_paxos_message(&out);
    }
    timeout_iterator_free(iter);

    /* this timer is automatically reloaded until we decide to stop it */
    if (force_quit)
        rte_timer_stop(tim);
}

int
main(int argc, char *argv[])
{
    uint8_t portid = 0;
    unsigned master_core, lcore_id;
    signal(SIGTERM, signal_handler);
    signal(SIGINT, signal_handler);
    force_quit = false;
    int proposer_id = 0;

    if (rte_get_log_level() == RTE_LOG_DEBUG) {
        paxos_config.verbosity = PAXOS_LOG_DEBUG;
    }

    struct proposer *proposer = proposer_new(proposer_id, NUM_ACCEPTORS);
    first_time = true;
    /* init EAL */
    int ret = rte_eal_init(argc, argv);

    if (ret < 0)
        rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");

    /* init timer structure */
    rte_timer_init(&timer);
    rte_timer_init(&stat_timer);

    /* load deliver_timer, every 1 s, on a slave lcore, reloaded automatically */
    uint64_t hz = rte_get_timer_hz();

    /* Call rte_timer_manage every 10ms */
    TIMER_RESOLUTION_CYCLES = hz / 100;
    rte_log(RTE_LOG_INFO, RTE_LOGTYPE_USER1, "Clock: %"PRIu64"\n", hz);

    /* master core */
    master_core = rte_lcore_id();
    /* slave core */
    lcore_id = rte_get_next_lcore(master_core, 0, 1);
    rte_log(RTE_LOG_DEBUG, RTE_LOGTYPE_USER1, "lcore_id: %d\n", lcore_id);
    rte_timer_reset(&timer, hz, PERIODICAL, lcore_id, check_timeout, proposer);
    /* reset timer */
    rte_eal_remote_launch(lcore_mainloop, NULL, lcore_id);

    /* stat core */
    lcore_id = rte_get_next_lcore(lcore_id , 0, 1);
    rte_log(RTE_LOG_DEBUG, RTE_LOGTYPE_USER1, "lcore_id: %d\n", lcore_id);
    rte_timer_reset(&stat_timer, hz, PERIODICAL, lcore_id,
                    report_stat, NULL);

    /* init RTE timer library */
    rte_timer_subsystem_init();

    mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL",
                                        NUM_MBUFS, MBUF_CACHE_SIZE, 0,
                                        RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());

    if (mbuf_pool == NULL)
        rte_exit(EXIT_FAILURE, "Cannot create mbuf_pool\n");
    /* reset timer */
    rte_eal_remote_launch(lcore_mainloop, NULL, lcore_id);

    if (port_init(portid, mbuf_pool, proposer) != 0)
        rte_exit(EXIT_FAILURE, "Cannot init port %"PRIu8"\n", portid);


    lcore_main(portid, proposer);

    rte_log(RTE_LOG_DEBUG, RTE_LOGTYPE_USER8, "Free proposer\n");
    proposer_free(proposer);
    return 0;
}
static void remove_vtep_hdr(struct pg_bench *bench)
{
	for (int i = 0; i < 64; ++i) {
		rte_pktmbuf_adj(bench->pkts[i], HEADERS_LENGTH);
	}
}