Exemplo n.º 1
0
int32_t rte_service_run_iter_on_app_lcore(uint32_t id,
		uint32_t serialize_mt_unsafe)
{
	/* run service on calling core, using all-ones as the service mask */
	if (!service_valid(id))
		return -EINVAL;

	struct core_state *cs = &lcore_states[rte_lcore_id()];
	struct rte_service_spec_impl *s = &rte_services[id];

	/* Atomically add this core to the mapped cores first, then examine if
	 * we can run the service. This avoids a race condition between
	 * checking the value, and atomically adding to the mapped count.
	 */
	if (serialize_mt_unsafe)
		rte_atomic32_inc(&s->num_mapped_cores);

	if (service_mt_safe(s) == 0 &&
			rte_atomic32_read(&s->num_mapped_cores) > 1) {
		if (serialize_mt_unsafe)
			rte_atomic32_dec(&s->num_mapped_cores);
		return -EBUSY;
	}

	int ret = service_run(id, cs, UINT64_MAX);

	if (serialize_mt_unsafe)
		rte_atomic32_dec(&s->num_mapped_cores);

	return ret;
}
Exemplo n.º 2
0
static inline int32_t
service_run(uint32_t i, struct core_state *cs, uint64_t service_mask)
{
	if (!service_valid(i))
		return -EINVAL;
	struct rte_service_spec_impl *s = &rte_services[i];
	if (s->comp_runstate != RUNSTATE_RUNNING ||
			s->app_runstate != RUNSTATE_RUNNING ||
			!(service_mask & (UINT64_C(1) << i)))
		return -ENOEXEC;

	/* check do we need cmpset, if MT safe or <= 1 core
	 * mapped, atomic ops are not required.
	 */
	const int use_atomics = (service_mt_safe(s) == 0) &&
				(rte_atomic32_read(&s->num_mapped_cores) > 1);
	if (use_atomics) {
		if (!rte_atomic32_cmpset((uint32_t *)&s->execute_lock, 0, 1))
			return -EBUSY;

		rte_service_runner_do_callback(s, cs, i);
		rte_atomic32_clear(&s->execute_lock);
	} else
		rte_service_runner_do_callback(s, cs, i);

	return 0;
}
Exemplo n.º 3
0
lagopus_result_t
dataplane_thread_loop(__UNUSED const lagopus_thread_t *selfptr,
                      __UNUSED void *arg) {
  unsigned lcore_id;

  lagopus_result_t rv;
  global_state_t cur_state;
  shutdown_grace_level_t cur_grace;

  rv = global_state_wait_for(GLOBAL_STATE_STARTED,
                             &cur_state,
                             &cur_grace,
                             -1);
  if (rv != LAGOPUS_RESULT_OK) {
    return rv;
  }

  for (;;) {
    if (rte_atomic32_read(&dpdk_stop) != 0) {
      break;
    }
    sleep(1);
  }
  /* 'stop' is requested */
  RTE_LCORE_FOREACH_SLAVE(lcore_id) {
    if (rte_eal_wait_lcore(lcore_id) < 0) {
      return LAGOPUS_RESULT_STOP;
    }
  }
  return LAGOPUS_RESULT_OK;
}
Exemplo n.º 4
0
int32_t
rte_service_lcore_stop(uint32_t lcore)
{
	if (lcore >= RTE_MAX_LCORE)
		return -EINVAL;

	if (lcore_states[lcore].runstate == RUNSTATE_STOPPED)
		return -EALREADY;

	uint32_t i;
	uint64_t service_mask = lcore_states[lcore].service_mask;
	for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
		int32_t enabled = service_mask & (UINT64_C(1) << i);
		int32_t service_running = rte_service_runstate_get(i);
		int32_t only_core = (1 ==
			rte_atomic32_read(&rte_services[i].num_mapped_cores));

		/* if the core is mapped, and the service is running, and this
		 * is the only core that is mapped, the service would cease to
		 * run if this core stopped, so fail instead.
		 */
		if (enabled && service_running && only_core)
			return -EBUSY;
	}

	lcore_states[lcore].runstate = RUNSTATE_STOPPED;

	return 0;
}
Exemplo n.º 5
0
int ipaugenblick_get_socket_tx_space_own_buffer(int sock)
{
    int ring_space = ipaugenblick_socket_tx_space(sock); 
    int tx_space = rte_atomic32_read(&(local_socket_descriptors[sock & SOCKET_READY_MASK].socket->tx_space))/1448;
    int rc = (ring_space > tx_space) ? tx_space : ring_space;
//    printf("sock %d ring space %d free bufs %d tx space %d\n",sock,ring_space,free_bufs_count,tx_space);
    if(!rc) {
	rte_atomic16_set(&(local_socket_descriptors[sock & SOCKET_READY_MASK].socket->write_ready_to_app),0);
        ipaugenblick_notify_empty_tx_buffers(sock);
    }
    return rc;
}
Exemplo n.º 6
0
int32_t
rte_service_runstate_get(uint32_t id)
{
	struct rte_service_spec_impl *s;
	SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
	rte_smp_rmb();

	int check_disabled = !(s->internal_flags & SERVICE_F_START_CHECK);
	int lcore_mapped = (rte_atomic32_read(&s->num_mapped_cores) > 0);

	return (s->app_runstate == RUNSTATE_RUNNING) &&
		(s->comp_runstate == RUNSTATE_RUNNING) &&
		(check_disabled | lcore_mapped);
}
Exemplo n.º 7
0
void
pktgen_set_tap(port_info_t * info, uint32_t onOff)
{
	if ( onOff == ENABLE_STATE ) {
		struct ifreq	ifr;
		int sockfd, i;
		struct sockaddr_in	sai;
		static char * tapdevs[] = { "/dev/net/tun", "/dev/tun", NULL };

		for(i = 0; tapdevs[i]; i++) {
			if ( (info->tapfd = open(tapdevs[i], O_RDWR)) >= 0 ) {
				break;
			}
		}
		if ( tapdevs[i] == NULL ) {
			printf("Unable to create TUN/TAP interface.\n");
			return;
		}
		memset(&ifr, 0, sizeof(struct ifreq));

		ifr.ifr_flags = IFF_TAP | IFF_NO_PI;

		snprintf(ifr.ifr_name, IFNAMSIZ, "%s%d", "pgtap", info->pid);
		if ( ioctl(info->tapfd, TUNSETIFF, (void *)&ifr) < 0 ) {
			printf("Unable to set TUNSETIFF for %s\n", ifr.ifr_name);
			close(info->tapfd);
			info->tapfd = 0;
			return;
		}

		sockfd = socket(AF_INET, SOCK_DGRAM, 0);

		ifr.ifr_flags = IFF_UP | IFF_RUNNING;
		if ( ioctl(sockfd, SIOCSIFFLAGS, (void *)&ifr) < 0 ) {
			printf("Unable to set SIOCSIFFLAGS for %s\n", ifr.ifr_name);
			close(sockfd);
			close(info->tapfd);
			info->tapfd = 0;
			return;
		}
		close(sockfd);
		pktgen_set_port_flags(info, PROCESS_TAP_PKTS);
	} else {
		if ( rte_atomic32_read(&info->port_flags) & PROCESS_TAP_PKTS ) {
			close(info->tapfd);
			info->tapfd = 0;
		}
		pktgen_clr_port_flags(info, PROCESS_TAP_PKTS);
	}
}
Exemplo n.º 8
0
static __inline__ uint64_t
pktgen_wire_size(port_info_t *info) {
	uint64_t i, size = 0;

	if (rte_atomic32_read(&info->port_flags) & SEND_PCAP_PKTS)
		size = info->pcap->pkt_size + PKT_PREAMBLE_SIZE + INTER_FRAME_GAP + FCS_SIZE;
	else {
		if (unlikely(info->seqCnt > 0) ) {
			for (i = 0; i < info->seqCnt; i++)
				size += info->seq_pkt[i].pktSize + PKT_PREAMBLE_SIZE + INTER_FRAME_GAP + FCS_SIZE;
			size = size / info->seqCnt;	/* Calculate the average sized packet */
		} else
			size = info->seq_pkt[SINGLE_PKT].pktSize + PKT_PREAMBLE_SIZE + INTER_FRAME_GAP + FCS_SIZE;
	}
	return size;
}
Exemplo n.º 9
0
char *
pktgen_flags_string( port_info_t * info )
{
    static char buff[32];
    uint32_t	flags = rte_atomic32_read(&info->port_flags);

    snprintf(buff, sizeof(buff), "%c%c%c%c%c%c%c%c%c%c",
            (pktgen.flags & PROMISCUOUS_ON_FLAG)? 'P' : '-',
            (flags & ICMP_ECHO_ENABLE_FLAG)? 'E' : '-',
            (flags & SEND_ARP_REQUEST)? 'A' : '-',
            (flags & SEND_GRATUITOUS_ARP)? 'G' : '-',
            (flags & SEND_PCAP_PKTS)? 'p' : '-',
            (flags & SEND_SEQ_PKTS)? 'S' : '-',
            (flags & SEND_RANGE_PKTS)? 'R' : '-',
            (flags & PROCESS_INPUT_PKTS)? 'I' : '-',
            (flags & PROCESS_TAP_PKTS)? 'T' : '-',
            (flags & SEND_VLAN_ID)? 'V' : '-');

    return buff;
}
Exemplo n.º 10
0
int
kni_main_loop(void* arg)
{
	uint8_t i, nb_ports = rte_eth_dev_count();
	int32_t f_stop;
	const unsigned lcore_id = (uintptr_t)arg;

	RTE_LOG(INFO, KNI, "entering kni main loop on lcore %u\n", lcore_id);

	while (1) {
		f_stop = rte_atomic32_read(&kni_stop);
		if (f_stop)
			break;
		for (i = 0; i < nb_ports; i++) {
			kni_egress(kni_port_params_array[i], lcore_id);
		}
		usleep(1000);
	}

	return 0;
}
Exemplo n.º 11
0
void
fill_proto_field_info(proto_type type, unsigned int pid, unsigned int seq_id)
{
	uint i = 0;
	protocol val;
	unsigned int size;

	port_info_t *info = NULL;
	pkt_seq_t *pkt = NULL;

	info = &pktgen.info[pid];
	pkt  = &info->seq_pkt[seq_id];
	char buff[50];

	g_return_if_fail(packet_info != NULL);
	if (pkt == NULL)/* Update with default values */
		pkt  = &info->seq_pkt[SINGLE_PKT];

	if (type == TYPE_ETH) {
		struct ether_addr *eaddr = &pkt->eth_dst_addr;
		val.name = g_strdup(pktgen_ethernet_fields[i++]);
		snprintf(buff, sizeof(buff), "%02x%02x%02x%02x%02x%02x",
		         eaddr->addr_bytes[0], eaddr->addr_bytes[1],
		         eaddr->addr_bytes[2], eaddr->addr_bytes[3],
		         eaddr->addr_bytes[4], eaddr->addr_bytes[5]);

		val.value = g_strdup(buff);
		g_array_append_vals(packet_info, &val, 1);

		val.name = g_strdup(pktgen_ethernet_fields[i++]);
		eaddr = &pkt->eth_src_addr;
		snprintf(buff, sizeof(buff), "%02x%02x%02x%02x%02x%02x",
		         eaddr->addr_bytes[0], eaddr->addr_bytes[1],
		         eaddr->addr_bytes[2], eaddr->addr_bytes[3],
		         eaddr->addr_bytes[4], eaddr->addr_bytes[5]);

		val.value = g_strdup(buff);
		g_array_append_vals(packet_info, &val, 1);

		val.name = g_strdup(pktgen_ethernet_fields[i++]);
		val.value = g_strdup("IPv4");
		g_array_append_vals(packet_info, &val, 1);

		if (rte_atomic32_read(&info->port_flags) & SEND_VLAN_ID)
			g_object_set(G_OBJECT(stream_l2_vlan), "active", TRUE,
			             NULL);
		else
			g_object_set(G_OBJECT(stream_l2_vlan), "active", FALSE,
			             NULL);

		sprintf(buff, "%d", pkt->vlanid);
		val.name = g_strdup(pktgen_ethernet_fields[i++]);
		val.value = g_strdup(buff);
		g_array_append_vals(packet_info, &val, 1);

		size = (pkt->pktSize + FCS_SIZE);
		sprintf(buff, "%d", size);
		gtk_entry_set_text(GTK_ENTRY(pktsize_entry), buff);

		sprintf(buff, "%x", pkt->ipProto);
		gtk_entry_set_text(GTK_ENTRY(ip_proto_entry), buff);
		gtk_entry_set_editable(GTK_ENTRY(ip_proto_entry), FALSE);
	} else if (type == TYPE_IPv4) {
		val.name = g_strdup(pktgen_ipv4_fields[i++]);
		val.value = g_strdup("IPv4");
		g_array_append_vals(packet_info, &val, 1);

		val.name = g_strdup(pktgen_ipv4_fields[i++]);
		val.value = g_strdup("<auto>");
		g_array_append_vals(packet_info, &val, 1);

		val.name = g_strdup(pktgen_ipv4_fields[i++]);
		val.value = g_strdup("<auto>");
		g_array_append_vals(packet_info, &val, 1);

		val.name = g_strdup(pktgen_ipv4_fields[i++]);
		val.value = g_strdup("<auto>");
		g_array_append_vals(packet_info, &val, 1);

		val.name = g_strdup(pktgen_ipv4_fields[i++]);
		val.value = g_strdup("<auto>");
		g_array_append_vals(packet_info, &val, 1);

		val.name = g_strdup(pktgen_ipv4_fields[i++]);
		val.value = g_strdup("<auto>");
		g_array_append_vals(packet_info, &val, 1);

		val.name = g_strdup(pktgen_ipv4_fields[i++]);
		val.value = g_strdup("<auto>");
		g_array_append_vals(packet_info, &val, 1);

		val.name = g_strdup(pktgen_ipv4_fields[i++]);
		val.value = g_strdup(
		                (pkt->ipProto ==
		                 PG_IPPROTO_UDP) ? "UDP" : "User Defined");
		g_array_append_vals(packet_info, &val, 1);

		val.name = g_strdup(pktgen_ipv4_fields[i++]);
		val.value = g_strdup("<auto>");
		g_array_append_vals(packet_info, &val, 1);

		val.name = g_strdup(pktgen_ipv4_fields[i++]);
		val.value =
		        g_strdup(inet_ntop4(buff, sizeof(buff),
		                            ntohl(pkt->ip_src_addr.addr.ipv4.
		                                  s_addr),
		                            0xFFFFFFFF));
		g_array_append_vals(packet_info, &val, 1);

		val.name = g_strdup(pktgen_ipv4_fields[i++]);
		val.value =
		        g_strdup(inet_ntop4(buff, sizeof(buff),
		                            ntohl(pkt->ip_dst_addr.addr.ipv4.
		                                  s_addr),
		                            0xFFFFFFFF));
		g_array_append_vals(packet_info, &val, 1);
	} else if (type == TYPE_UDP) {
		sprintf(buff, "%d", pkt->sport);
		val.name = g_strdup(pktgen_udp_fields[i++]);
		val.value = g_strdup(buff);
		g_array_append_vals(packet_info, &val, 1);

		sprintf(buff, "%d", pkt->dport);
		val.name = g_strdup(pktgen_udp_fields[i++]);
		val.value = g_strdup(buff);
		g_array_append_vals(packet_info, &val, 1);

		val.name = g_strdup(pktgen_udp_fields[i++]);
		val.value = g_strdup("<auto>");
		g_array_append_vals(packet_info, &val, 1);

		val.name = g_strdup(pktgen_udp_fields[i++]);
		val.value = g_strdup("<auto>");
		g_array_append_vals(packet_info, &val, 1);
	}
}
Exemplo n.º 12
0
char *
pktgen_ether_hdr_ctor(port_info_t *info, pkt_seq_t *pkt, struct ether_hdr *eth)
{
	uint32_t flags;

	/* src and dest addr */
	ether_addr_copy(&pkt->eth_src_addr, &eth->s_addr);
	ether_addr_copy(&pkt->eth_dst_addr, &eth->d_addr);

	flags = rte_atomic32_read(&info->port_flags);
	if (flags & SEND_VLAN_ID) {
		/* vlan ethernet header */
		eth->ether_type = htons(ETHER_TYPE_VLAN);

		/* only set the TCI field for now; don't bother with PCP/DEI */
		struct vlan_hdr *vlan_hdr = (struct vlan_hdr *)(eth + 1);
		vlan_hdr->vlan_tci = htons(pkt->vlanid);
		vlan_hdr->eth_proto = htons(pkt->ethType);

		/* adjust header size for VLAN tag */
		pkt->ether_hdr_size = sizeof(struct ether_hdr) +
		        sizeof(struct vlan_hdr);

		return (char *)(vlan_hdr + 1);
	} else if (rte_atomic32_read(&info->port_flags) & SEND_MPLS_LABEL) {
		/* MPLS unicast ethernet header */
		eth->ether_type = htons(ETHER_TYPE_MPLS_UNICAST);

		mplsHdr_t *mpls_hdr = (mplsHdr_t *)(eth + 1);

		/* Only a single MPLS label is supported at the moment. Make sure the
		 * BoS flag is set. */
		uint32_t mpls_label = pkt->mpls_entry;
		MPLS_SET_BOS(mpls_label);

		mpls_hdr->label = htonl(mpls_label);

		/* Adjust header size for MPLS label */
		pkt->ether_hdr_size = sizeof(struct ether_hdr) +
		        sizeof(mplsHdr_t);

		return (char *)(mpls_hdr + 1);
	} else if (rte_atomic32_read(&info->port_flags) & SEND_Q_IN_Q_IDS) {
		/* Q-in-Q ethernet header */
		eth->ether_type = htons(ETHER_TYPE_Q_IN_Q);

		qinqHdr_t *qinq_hdr = (qinqHdr_t *)(eth + 1);

		/* only set the TCI field for now; don't bother with PCP/DEI */
		qinq_hdr->qinq_tci = htons(pkt->qinq_outerid);

		qinq_hdr->vlan_tpid = htons(ETHER_TYPE_VLAN);
		qinq_hdr->vlan_tci = htons(pkt->qinq_innerid);

		qinq_hdr->eth_proto = htons(pkt->ethType);

		/* Adjust header size for Q-in-Q header */
		pkt->ether_hdr_size = sizeof(struct ether_hdr) +
		        sizeof(qinqHdr_t);

		return (char *)(qinq_hdr + 1);
	} else {
		/* normal ethernet header */
		eth->ether_type = htons(pkt->ethType);
		pkt->ether_hdr_size = sizeof(struct ether_hdr);
	}

	return (char *)(eth + 1);
}
Exemplo n.º 13
0
int
pktgen_save(char * path)
{
	port_info_t	  * info;
	pkt_seq_t	  * pkt;
	range_info_t  * range;
	uint32_t		flags;
	char		buff[64];
	FILE	  * fd;
	int			i, j;
	uint32_t	lcore;
    struct ether_addr eaddr;

	fd = fopen(path, "w");
	if ( fd == NULL ) {
		return -1;
	}

	for(i=0, lcore=0; i<RTE_MAX_LCORE; i++)
		if ( rte_lcore_is_enabled(i) )
			lcore |= (1 << i);

	fprintf(fd, "#\n# Pktgen - %s\n", pktgen_version());
	fprintf(fd, "# %s, %s\n\n", wr_copyright_msg(), wr_powered_by());

	// TODO: Determine DPDK arguments for rank and memory, default for now.
	fprintf(fd, "# Command line arguments: (DPDK args are defaults)\n");
	fprintf(fd, "# %s -c %x -n 3 -m 512 --proc-type %s -- ", pktgen.argv[0], lcore, (rte_eal_process_type() == RTE_PROC_PRIMARY)? "primary" : "secondary");
	for(i=1; i < pktgen.argc; i++)
		fprintf(fd, "%s ", pktgen.argv[i]);
	fprintf(fd, "\n\n");

	fprintf(fd, "#######################################################################\n");
	fprintf(fd, "# Pktgen Configuration script information:\n");
	fprintf(fd, "#   GUI socket is %s\n", (pktgen.flags & ENABLE_GUI_FLAG)? "Enabled" : "Not Enabled");
	fprintf(fd, "#   Enabled Port mask: %08x\n", pktgen.enabled_port_mask);
	fprintf(fd, "#   Flags %08x\n", pktgen.flags);
	fprintf(fd, "#   Number of ports: %d\n", pktgen.nb_ports);
	fprintf(fd, "#   Number ports per page: %d\n", pktgen.nb_ports_per_page);
	fprintf(fd, "#   Coremask 0x%08x\n", pktgen.coremask);
	fprintf(fd, "#   Number descriptors: RX %d TX: %d\n", pktgen.nb_rxd, pktgen.nb_txd);
	fprintf(fd, "#   Promiscuous mode is %s\n\n", (pktgen.flags & PROMISCUOUS_ON_FLAG)? "Enabled" : "Disabled");

	fprintf(fd, "# Port Descriptions (-- = blacklisted port):\n");
	for(i=0; i < RTE_MAX_ETHPORTS; i++) {
		if ( strlen(pktgen.portdesc[i]) ) {
	    	if ( (pktgen.enabled_port_mask & (1 << i)) == 0 )
	    		strcpy(buff, "--");
	    	else
	    		strcpy(buff, "++");

			fprintf(fd, "#   %s %s\n", buff, pktgen.portdesc[i]);
		}
	}
	fprintf(fd, "\n#######################################################################\n");

	fprintf(fd, "# Global configuration:\n");
	fprintf(fd, "geometry %dx%d\n", pktgen.scrn->ncols, pktgen.scrn->nrows);
	fprintf(fd, "mac_from_arp %s\n\n", (pktgen.flags & MAC_FROM_ARP_FLAG)? "enable" : "disable");

	for(i=0; i < RTE_MAX_ETHPORTS; i++) {
		info = &pktgen.info[i];
		pkt = &info->seq_pkt[SINGLE_PKT];
		range = &info->range;

		if ( info->tx_burst == 0 )
			continue;

		fprintf(fd, "######################### Port %2d ##################################\n", i);
		if ( info->transmit_count == 0 )
			strcpy(buff, "Forever");
		else
			snprintf(buff, sizeof(buff), "%ld", info->transmit_count);
		fprintf(fd, "#\n");
		flags = rte_atomic32_read(&info->port_flags);
		fprintf(fd, "# Port: %2d, Burst:%3d, Rate:%3d%%, Flags:%08x, TX Count:%s\n",
				info->pid, info->tx_burst, info->tx_rate, flags, buff);
		fprintf(fd, "#           SeqCnt:%d, Prime:%d VLAN ID:%04x, ",
				info->seqCnt, info->prime_cnt, info->vlanid);
		pktgen_link_state(info->pid, buff, sizeof(buff));
		fprintf(fd, "Link: %s\n", buff);

		fprintf(fd, "#\n# Set up the primary port information:\n");
		fprintf(fd, "set %d count %ld\n", info->pid, info->transmit_count);
		fprintf(fd, "set %d size %d\n", info->pid, pkt->pktSize+FCS_SIZE);
		fprintf(fd, "set %d rate %d\n", info->pid, info->tx_rate);
		fprintf(fd, "set %d burst %d\n", info->pid, info->tx_burst);
		fprintf(fd, "set %d sport %d\n", info->pid, pkt->sport);
		fprintf(fd, "set %d dport %d\n", info->pid, pkt->dport);
		fprintf(fd, "set %d prime %d\n", info->pid, info->prime_cnt);
		fprintf(fd, "set %s %d\n",
				(pkt->ethType == ETHER_TYPE_IPv4)? "ipv4" :
				(pkt->ethType == ETHER_TYPE_IPv6)? "ipv6" :
				(pkt->ethType == ETHER_TYPE_VLAN)? "vlan" : "unknown", i);
		fprintf(fd, "set %s %d\n",
				(pkt->ipProto == PG_IPPROTO_TCP)? "tcp" :
				(pkt->ipProto == PG_IPPROTO_ICMP)? "icmp" : "udp", i);
		fprintf(fd, "set ip dst %d %s\n", i, inet_ntop4(buff, sizeof(buff), ntohl(pkt->ip_dst_addr), 0xFFFFFFFF));
		fprintf(fd, "set ip src %d %s\n", i, inet_ntop4(buff, sizeof(buff), ntohl(pkt->ip_src_addr), pkt->ip_mask));
		fprintf(fd, "set mac %d %s\n", info->pid, inet_mtoa(buff, sizeof(buff), &pkt->eth_dst_addr));
		fprintf(fd, "vlanid %d %d\n\n", i, pkt->vlanid);

		fprintf(fd, "#\n# Port flag values:\n");
		fprintf(fd, "icmp.echo %d %sable\n", i, (flags & ICMP_ECHO_ENABLE_FLAG)? "en" : "dis");
		fprintf(fd, "pcap %d %sable\n", i, (flags & SEND_PCAP_PKTS)? "en" : "dis");
		fprintf(fd, "range %d %sable\n", i, (flags & SEND_RANGE_PKTS)? "en" : "dis");
		fprintf(fd, "process %d %sable\n", i, (flags & PROCESS_INPUT_PKTS)? "en" : "dis");
		fprintf(fd, "tap %d %sable\n", i, (flags & PROCESS_TAP_PKTS)? "en" : "dis");
		fprintf(fd, "vlan %d %sable\n\n", i, (flags & SEND_VLAN_ID)? "en" : "dis");

		fprintf(fd, "#\n# Range packet information:\n");
		fprintf(fd, "src.mac start %d %s\n", i, inet_mtoa(buff, sizeof(buff), inet_h64tom(range->src_mac, &eaddr)));
		fprintf(fd, "src.mac min %d %s\n", i, inet_mtoa(buff, sizeof(buff), inet_h64tom(range->src_mac_min, &eaddr)));
		fprintf(fd, "src.mac max %d %s\n", i, inet_mtoa(buff, sizeof(buff), inet_h64tom(range->src_mac_max, &eaddr)));
        fprintf(fd, "src.mac inc %d %s\n", i, inet_mtoa(buff, sizeof(buff), inet_h64tom(range->src_mac_inc, &eaddr)));

		fprintf(fd, "dst.mac start %d %s\n", i, inet_mtoa(buff, sizeof(buff), inet_h64tom(range->dst_mac, &eaddr)));
		fprintf(fd, "dst.mac min %d %s\n", i, inet_mtoa(buff, sizeof(buff), inet_h64tom(range->dst_mac_min, &eaddr)));
		fprintf(fd, "dst.mac max %d %s\n", i, inet_mtoa(buff, sizeof(buff), inet_h64tom(range->dst_mac_max, &eaddr)));
		fprintf(fd, "dst.mac inc %d %s\n", i, inet_mtoa(buff, sizeof(buff), inet_h64tom(range->dst_mac_inc, &eaddr)));

		fprintf(fd, "\n");
		fprintf(fd, "src.ip start %d %s\n", i, inet_ntop4(buff, sizeof(buff), ntohl(range->src_ip), 0xFFFFFFFF));
		fprintf(fd, "src.ip min %d %s\n", i, inet_ntop4(buff, sizeof(buff), ntohl(range->src_ip_min), 0xFFFFFFFF));
		fprintf(fd, "src.ip max %d %s\n", i, inet_ntop4(buff, sizeof(buff), ntohl(range->src_ip_max), 0xFFFFFFFF));
		fprintf(fd, "src.ip inc %d %s\n", i, inet_ntop4(buff, sizeof(buff), ntohl(range->src_ip_inc), 0xFFFFFFFF));

		fprintf(fd, "\n");
		fprintf(fd, "dst.ip start %d %s\n", i, inet_ntop4(buff, sizeof(buff), ntohl(range->dst_ip), 0xFFFFFFFF));
		fprintf(fd, "dst.ip min %d %s\n", i, inet_ntop4(buff, sizeof(buff), ntohl(range->dst_ip_min), 0xFFFFFFFF));
		fprintf(fd, "dst.ip max %d %s\n", i, inet_ntop4(buff, sizeof(buff), ntohl(range->dst_ip_max), 0xFFFFFFFF));
		fprintf(fd, "dst.ip inc %d %s\n", i, inet_ntop4(buff, sizeof(buff), ntohl(range->dst_ip_inc), 0xFFFFFFFF));

		fprintf(fd, "\n");
		fprintf(fd, "src.port start %d %d\n", i, range->src_port);
		fprintf(fd, "src.port min %d %d\n", i, range->src_port_min);
		fprintf(fd, "src.port max %d %d\n", i, range->src_port_max);
		fprintf(fd, "src.port inc %d %d\n", i, range->src_port_inc);

		fprintf(fd, "\n");
		fprintf(fd, "dst.port start %d %d\n", i, range->dst_port);
		fprintf(fd, "dst.port min %d %d\n", i, range->dst_port_min);
		fprintf(fd, "dst.port max %d %d\n", i, range->dst_port_max);
		fprintf(fd, "dst.port inc %d %d\n", i, range->dst_port_inc);

		fprintf(fd, "\n");
		fprintf(fd, "vlan.id start %d %d\n", i, range->vlan_id);
		fprintf(fd, "vlan.id min %d %d\n", i, range->vlan_id_min);
		fprintf(fd, "vlan.id max %d %d\n", i, range->vlan_id_max);
		fprintf(fd, "vlan.id inc %d %d\n", i, range->vlan_id_inc);

		fprintf(fd, "\n");
		fprintf(fd, "pkt.size start %d %d\n", i, range->pkt_size + FCS_SIZE);
		fprintf(fd, "pkt.size min %d %d\n", i, range->pkt_size_min + FCS_SIZE);
		fprintf(fd, "pkt.size max %d %d\n", i, range->pkt_size_max + FCS_SIZE);
		fprintf(fd, "pkt.size inc %d %d\n\n", i, range->pkt_size_inc);

		fprintf(fd, "#\n# Set up the sequence data for the port.\n");
		fprintf(fd, "set %d seqCnt %d\n", info->pid, info->seqCnt);
		for(j=0; j<info->seqCnt; j++) {
			pkt = &info->seq_pkt[j];
			fprintf(fd, "seq %d %d %s ", j, i, inet_mtoa(buff, sizeof(buff), &pkt->eth_dst_addr));
			fprintf(fd, "%s ", inet_mtoa(buff, sizeof(buff), &pkt->eth_src_addr));
			fprintf(fd, "%s ", inet_ntop4(buff, sizeof(buff), htonl(pkt->ip_dst_addr), 0xFFFFFFFF));
			fprintf(fd, "%s ", inet_ntop4(buff, sizeof(buff), htonl(pkt->ip_src_addr), pkt->ip_mask));
			fprintf(fd, "%d %d %s %s %d %d\n",
					pkt->sport,
					pkt->dport,
					(pkt->ethType == ETHER_TYPE_IPv4)? "ipv4" :
							(pkt->ethType == ETHER_TYPE_IPv6)? "ipv6" :
							(pkt->ethType == ETHER_TYPE_VLAN)? "vlan" : "Other",
					(pkt->ipProto == PG_IPPROTO_TCP)? "tcp" :
							(pkt->ipProto == PG_IPPROTO_ICMP)? "icmp" : "udp",
					pkt->vlanid,
					pkt->pktSize+FCS_SIZE);
		}

		if ( pktgen.info[i].pcap ) {
			fprintf(fd, "#\n# PCAP port %d\n", i);
			fprintf(fd, "#    Packet count: %d\n", pktgen.info[i].pcap->pkt_count);
			fprintf(fd, "#    Filename    : %s\n", pktgen.info[i].pcap->filename);
		}
		fprintf(fd, "\n");
	}
	fprintf(fd, "################################ Done #################################\n");

	fclose(fd);
	return 0;
}
Exemplo n.º 14
0
void
pktgen_process_ping4( struct rte_mbuf * m, uint32_t pid, uint32_t vlan )
{
    port_info_t   * info = &pktgen.info[pid];
    pkt_seq_t     * pkt;
    struct ether_hdr *eth = rte_pktmbuf_mtod(m, struct ether_hdr *);
    ipHdr_t       * ip = (ipHdr_t *)&eth[1];
    char            buff[24];

	/* Adjust for a vlan header if present */
	if ( vlan )
		ip = (ipHdr_t *)((char *)ip + sizeof(struct vlan_hdr));

    // Look for a ICMP echo requests, but only if enabled.
    if ( (rte_atomic32_read(&info->port_flags) & ICMP_ECHO_ENABLE_FLAG) &&
    		(ip->proto == PG_IPPROTO_ICMP) ) {
#if !defined(RTE_ARCH_X86_64)
        icmpv4Hdr_t * icmp = (icmpv4Hdr_t *)((uint32_t)ip + sizeof(ipHdr_t));
#else
        icmpv4Hdr_t * icmp = (icmpv4Hdr_t *)((uint64_t)ip + sizeof(ipHdr_t));
#endif

        // We do not handle IP options, which will effect the IP header size.
        if ( unlikely(cksum(icmp, (m->data_len - sizeof(struct ether_hdr) - sizeof(ipHdr_t)), 0)) ) {
            pktgen_log_error("ICMP checksum failed");
            return;
        }

        if ( unlikely(icmp->type == ICMP4_ECHO) ) {
            if ( ntohl(ip->dst) == INADDR_BROADCAST ) {
                pktgen_log_warning("IP address %s is a Broadcast",
                        inet_ntop4(buff, sizeof(buff), ip->dst, INADDR_BROADCAST));
                return;
            }

            // Toss all broadcast addresses and requests not for this port
            pkt = pktgen_find_matching_ipsrc(info, ip->dst);

            // ARP request not for this interface.
            if ( unlikely(pkt == NULL) ) {
                pktgen_log_warning("IP address %s not found",
                        inet_ntop4(buff, sizeof(buff), ip->dst, INADDR_BROADCAST));
                return;
            }

            info->stats.echo_pkts++;

            icmp->type  = ICMP4_ECHO_REPLY;

            /* Recompute the ICMP checksum */
            icmp->cksum = 0;
            icmp->cksum = cksum(icmp, (m->data_len - sizeof(struct ether_hdr) - sizeof(ipHdr_t)), 0);

            // Swap the IP addresses.
            inetAddrSwap(&ip->src, &ip->dst);

            // Bump the ident value
            ip->ident   = htons(ntohs(ip->ident) + m->data_len);

            // Recompute the IP checksum
            ip->cksum   = 0;
            ip->cksum   = cksum(ip, sizeof(ipHdr_t), 0);

            // Swap the MAC addresses
            ethAddrSwap(&eth->d_addr, &eth->s_addr);

            pktgen_send_mbuf(m, pid, 0);

            pktgen_set_q_flags(info, 0, DO_TX_FLUSH);

            // No need to free mbuf as it was reused.
            return;
        } else if ( unlikely(icmp->type == ICMP4_ECHO_REPLY) ) {
            info->stats.echo_pkts++;
        }
    }
}
Exemplo n.º 15
0
static int
nvmf_allocate_reactor(uint64_t cpumask)
{
	int i, selected_core;
	enum rte_lcore_state_t state;
	int master_lcore = rte_get_master_lcore();
	int32_t num_pollers, min_pollers;

	cpumask &= spdk_app_get_core_mask();
	if (cpumask == 0) {
		return 0;
	}

	min_pollers = INT_MAX;
	selected_core = 0;

	for (i = 0; i < RTE_MAX_LCORE; i++) {
		if (!((1ULL << i) & cpumask)) {
			continue;
		}

		/*
		 * DPDK returns WAIT for the master lcore instead of RUNNING.
		 * So we always treat the reactor on master core as RUNNING.
		 */
		if (i == master_lcore) {
			state = RUNNING;
		} else {
			state = rte_eal_get_lcore_state(i);
		}
		if (state == FINISHED) {
			rte_eal_wait_lcore(i);
		}

		switch (state) {
		case WAIT:
		case FINISHED:
			/* Idle cores have 0 pollers */
			if (0 < min_pollers) {
				selected_core = i;
				min_pollers = 0;
			}
			break;
		case RUNNING:
			/* This lcore is running, check how many pollers it already has */
			num_pollers = rte_atomic32_read(&g_num_connections[i]);

			/* Fill each lcore to target minimum, else select least loaded lcore */
			if (num_pollers < (SPDK_NVMF_DEFAULT_NUM_SESSIONS_PER_LCORE *
					   g_nvmf_tgt.MaxConnectionsPerSession)) {
				/* If fewer than the target number of session connections
				 * exist then add to this lcore
				 */
				return i;
			} else if (num_pollers < min_pollers) {
				/* Track the lcore that has the minimum number of pollers
				 * to be used if no lcores have already met our criteria
				 */
				selected_core = i;
				min_pollers = num_pollers;
			}
			break;
		}
	}

	return selected_core;
}
Exemplo n.º 16
0
void
pktgen_process_arp( struct rte_mbuf * m, uint32_t pid, uint32_t vlan )
{
    port_info_t   * info = &pktgen.info[pid];
    pkt_seq_t     * pkt;
    struct ether_hdr *eth = rte_pktmbuf_mtod(m, struct ether_hdr *);
    arpPkt_t      * arp = (arpPkt_t *)&eth[1];

	/* Adjust for a vlan header if present */
	if ( vlan )
		arp = (arpPkt_t *)((char *)arp + sizeof(struct vlan_hdr));

    // Process all ARP requests if they are for us.
    if ( arp->op == htons(ARP_REQUEST) ) {
		if ((rte_atomic32_read(&info->port_flags) & PROCESS_GARP_PKTS) &&
 			(arp->tpa._32 == arp->spa._32) ) {		/* Must be a GARP packet */

			pkt = pktgen_find_matching_ipdst(info, arp->spa._32);

			/* Found a matching packet, replace the dst address */
			if ( pkt ) {
				rte_memcpy(&pkt->eth_dst_addr, &arp->sha, 6);
				pktgen_set_q_flags(info, wr_get_txque(pktgen.l2p, rte_lcore_id(), pid), DO_TX_CLEANUP);
				pktgen_redisplay(0);
			}
			return;
		}

		pkt = pktgen_find_matching_ipsrc(info, arp->tpa._32);

		/* ARP request not for this interface. */
		if ( likely(pkt != NULL) ) {
			/* Grab the source MAC address as the destination address for the port. */
			if ( unlikely(pktgen.flags & MAC_FROM_ARP_FLAG) ) {
				uint32_t    i;

				rte_memcpy(&pkt->eth_dst_addr, &arp->sha, 6);
				for (i = 0; i < info->seqCnt; i++)
					pktgen_packet_ctor(info, i, -1);
			}

			// Swap the two MAC addresses
			ethAddrSwap(&arp->sha, &arp->tha);

			// Swap the two IP addresses
			inetAddrSwap(&arp->tpa._32, &arp->spa._32);

			// Set the packet to ARP reply
			arp->op = htons(ARP_REPLY);

			// Swap the MAC addresses
			ethAddrSwap(&eth->d_addr, &eth->s_addr);

			// Copy in the MAC address for the reply.
			rte_memcpy(&arp->sha, &pkt->eth_src_addr, 6);
			rte_memcpy(&eth->s_addr, &pkt->eth_src_addr, 6);

			pktgen_send_mbuf(m, pid, 0);

			// Flush all of the packets in the queue.
			pktgen_set_q_flags(info, 0, DO_TX_FLUSH);

			// No need to free mbuf as it was reused
			return;
		}
	} else if ( arp->op == htons(ARP_REPLY) ) {
		pkt = pktgen_find_matching_ipsrc(info, arp->tpa._32);

		// ARP request not for this interface.
		if ( likely(pkt != NULL) ) {
			// Grab the real destination MAC address
			if ( pkt->ip_dst_addr == ntohl(arp->spa._32) )
				rte_memcpy(&pkt->eth_dst_addr, &arp->sha, 6);

			pktgen.flags |= PRINT_LABELS_FLAG;
		}
	}
}
Exemplo n.º 17
0
// reconnect to server end perictly
void *reconnect_thread(void *arg) {
	int i;
	pthread_detach(pthread_self());
	rte_atomic32_inc(&thread_num);
	char ip[INET_ADDRSTRLEN] = {0};
	struct timespec req = {20, 0};
	struct in_addr addr4 = {0};
	while(rte_atomic32_read(&keep_running) && client_num > 0) {
		for(i=0; i<client_num; ++i) {
			slot_t *slot = &svr_hash.slots[i];
			svr_t *svr = (svr_t*)slot->data;
			pthread_spin_lock(&slot->lock);
			if(svr != NULL && svr->connected == 0) {
				// get server ip string from uint32_t
				addr4.s_addr = svr->ip;
				inet_ntop(AF_INET, &addr4, ip, INET_ADDRSTRLEN);
				// create socket
				int fd = socket(AF_INET, SOCK_STREAM, 0);
				if(fd < 0) {
#ifdef DEBUG_STDOUT
					printf("Failed to create socket for %s:%d, %s, %s, %d\n", ip, svr->port, __FUNCTION__, __FILE__, __LINE__);
#else
#endif
					pthread_spin_unlock(&slot->lock);
					continue;
				}
				if(fd >= DESCRIPTOR_MAX) {
#ifdef DEBUG_STDOUT
					printf("Too many connections %d/%d, %s, %s, %d\n", fd, DESCRIPTOR_MAX, __FUNCTION__, __FILE__, __LINE__);
#else
#endif
					close(fd);
					pthread_spin_unlock(&slot->lock);
					exit(EXIT_FAILURE);
				}
				// connect to server
				struct sockaddr_in addr;
				memset(&addr, 0, sizeof addr);
				addr.sin_family = AF_INET;
				addr.sin_port = htons(svr->port);
				addr.sin_addr.s_addr = svr->ip;
				if(connect(fd, (struct sockaddr*)&addr, sizeof addr) < 0) {
					if(errno != EINPROGRESS) {
#ifdef DEBUG_STDOUT
						printf("Failed to connect to %s:%d, %s, %s, %d\n", ip, svr->port, __FUNCTION__, __FILE__, __LINE__);
#endif
						close(fd);
						pthread_spin_unlock(&slot->lock);
						continue;
					}
				}
				svr->connected = 1;
				// add to fd manager
				sockinfo[fd].fd = fd;
				sockinfo[fd].ip = svr->ip;
				sockinfo[fd].type = TYPE_SERVER;
			}
			pthread_spin_unlock(&slot->lock);
		}
		nanosleep(&req, NULL);
	}
	rte_atomic32_dec(&thread_num);
	return NULL;
}
Exemplo n.º 18
0
void
app_lcore_main_loop_io(void *arg) {
  uint32_t lcore = rte_lcore_id();
  struct app_lcore_params_io *lp = &app.lcore_params[lcore].io;
  uint32_t n_workers = app_get_lcores_worker();
  uint64_t i = 0;

  uint32_t bsz_rx_rd = app.burst_size_io_rx_read;
  uint32_t bsz_rx_wr = app.burst_size_io_rx_write;
  uint32_t bsz_tx_rd = app.burst_size_io_tx_read;
  uint32_t bsz_tx_wr = app.burst_size_io_tx_write;

  if (lp->rx.n_nic_queues > 0 && lp->tx.n_nic_ports == 0) {
    /* receive loop */
    for (;;) {
      if (APP_LCORE_IO_FLUSH && (unlikely(i == APP_LCORE_IO_FLUSH))) {
        if (rte_atomic32_read(&dpdk_stop) != 0) {
          break;
        }
        app_lcore_io_rx_flush(lp, n_workers);
        i = 0;
      }
      app_lcore_io_rx(lp, n_workers, bsz_rx_rd, bsz_rx_wr);
      i++;
    }
  } else if (lp->rx.n_nic_queues == 0 && lp->tx.n_nic_ports > 0) {
    /* transimit loop */
    for (;;) {
      if (APP_LCORE_IO_FLUSH && (unlikely(i == APP_LCORE_IO_FLUSH))) {
        if (rte_atomic32_read(&dpdk_stop) != 0) {
          break;
        }
        app_lcore_io_tx_flush(lp, arg);
        i = 0;
      }
      app_lcore_io_tx(lp, n_workers, bsz_tx_rd, bsz_tx_wr);
#ifdef __linux__
      app_lcore_io_tx_kni(lp, bsz_tx_wr);
#endif /* __linux__ */
      i++;
    }
  } else {
    for (;;) {
      if (APP_LCORE_IO_FLUSH && (unlikely(i == APP_LCORE_IO_FLUSH))) {
        if (rte_atomic32_read(&dpdk_stop) != 0) {
          break;
        }
        app_lcore_io_rx_flush(lp, n_workers);
        app_lcore_io_tx_flush(lp, arg);
        i = 0;
      }
      app_lcore_io_rx(lp, n_workers, bsz_rx_rd, bsz_rx_wr);
      app_lcore_io_tx(lp, n_workers, bsz_tx_rd, bsz_tx_wr);
#ifdef __linux__
      app_lcore_io_tx_kni(lp, bsz_tx_wr);
#endif /* __linux__ */
      i++;
    }
  }
  /* cleanup */
  if (likely(lp->tx.n_nic_ports > 0)) {
    app_lcore_io_tx_cleanup(lp);
  }
}
Exemplo n.º 19
0
static int
paxos_rx_process(struct rte_mbuf *pkt, struct proposer* proposer)
{
    int ret = 0;
    uint8_t l4_proto = 0;
    uint16_t outer_header_len;
    union tunnel_offload_info info = { .data = 0 };
    struct udp_hdr *udp_hdr;
    struct paxos_hdr *paxos_hdr;
    struct ether_hdr *phdr = rte_pktmbuf_mtod(pkt, struct ether_hdr *);

    parse_ethernet(phdr, &info, &l4_proto);

    if (l4_proto != IPPROTO_UDP)
        return -1;

    udp_hdr = (struct udp_hdr *)((char *)phdr +
                                 info.outer_l2_len + info.outer_l3_len);

    /* if UDP dst port is not either PROPOSER or LEARNER port */
    if (!(udp_hdr->dst_port == rte_cpu_to_be_16(PROPOSER_PORT) ||
            udp_hdr->dst_port == rte_cpu_to_be_16(LEARNER_PORT)) &&
            (pkt->packet_type & RTE_PTYPE_TUNNEL_MASK) == 0)
        return -1;

    paxos_hdr = (struct paxos_hdr *)((char *)udp_hdr + sizeof(struct udp_hdr));

    if (rte_get_log_level() == RTE_LOG_DEBUG) {
        //rte_hexdump(stdout, "udp", udp_hdr, sizeof(struct udp_hdr));
        //rte_hexdump(stdout, "paxos", paxos_hdr, sizeof(struct paxos_hdr));
        print_paxos_hdr(paxos_hdr);
    }

    int value_len = rte_be_to_cpu_16(paxos_hdr->value_len);
    struct paxos_value *v = paxos_value_new((char *)paxos_hdr->paxosval, value_len);
    switch(rte_be_to_cpu_16(paxos_hdr->msgtype)) {
    case PAXOS_PROMISE: {
        struct paxos_promise promise = {
            .iid = rte_be_to_cpu_32(paxos_hdr->inst),
            .ballot = rte_be_to_cpu_16(paxos_hdr->rnd),
            .value_ballot = rte_be_to_cpu_16(paxos_hdr->vrnd),
            .aid = rte_be_to_cpu_16(paxos_hdr->acptid),
            .value = *v
        };
        proposer_handle_promise(proposer, &promise);
        break;
    }
    case PAXOS_ACCEPT: {
        if (first_time) {
            proposer_preexecute(proposer);
            first_time = false;
        }
        struct paxos_accept acpt = {
            .iid = rte_be_to_cpu_32(paxos_hdr->inst),
            .ballot = rte_be_to_cpu_16(paxos_hdr->rnd),
            .value_ballot = rte_be_to_cpu_16(paxos_hdr->vrnd),
            .aid = rte_be_to_cpu_16(paxos_hdr->acptid),
            .value = *v
        };
        proposer_handle_accept(proposer, &acpt);
        break;
    }
    case PAXOS_ACCEPTED: {
        struct paxos_accepted ack = {
            .iid = rte_be_to_cpu_32(paxos_hdr->inst),
            .ballot = rte_be_to_cpu_16(paxos_hdr->rnd),
            .value_ballot = rte_be_to_cpu_16(paxos_hdr->vrnd),
            .aid = rte_be_to_cpu_16(paxos_hdr->acptid),
            .value = *v
        };
        proposer_handle_accepted(proposer, &ack);
        break;
    }
    default:
        break;
    }
    outer_header_len = info.outer_l2_len + info.outer_l3_len
                       + sizeof(struct udp_hdr) + sizeof(struct paxos_hdr);

    rte_pktmbuf_adj(pkt, outer_header_len);

    return ret;

}

static uint16_t
add_timestamps(uint8_t port __rte_unused, uint16_t qidx __rte_unused,
               struct rte_mbuf **pkts, uint16_t nb_pkts,
               uint16_t max_pkts __rte_unused, void *user_param)
{
    struct proposer* proposer = (struct proposer *)user_param;
    unsigned i;
    uint64_t now = rte_rdtsc();

    for (i = 0; i < nb_pkts; i++) {
        pkts[i]->udata64 = now;
        paxos_rx_process(pkts[i], proposer);
    }
    return nb_pkts;
}


static inline int
port_init(uint8_t port, struct rte_mempool *mbuf_pool, struct proposer* proposer)
{
    struct rte_eth_dev_info dev_info;
    struct rte_eth_txconf *txconf;
    struct rte_eth_rxconf *rxconf;
    struct rte_eth_conf port_conf = port_conf_default;
    const uint16_t rx_rings = 1, tx_rings = 1;
    int retval;
    uint16_t q;

    rte_eth_dev_info_get(port, &dev_info);

    rxconf = &dev_info.default_rxconf;
    txconf = &dev_info.default_txconf;

    txconf->txq_flags &= PKT_TX_IPV4;
    txconf->txq_flags &= PKT_TX_UDP_CKSUM;
    if (port >= rte_eth_dev_count())
        return -1;

    retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
    if (retval != 0)
        return retval;

    for (q = 0; q < rx_rings; q++) {
        retval = rte_eth_rx_queue_setup(port, q, RX_RING_SIZE,
                                        rte_eth_dev_socket_id(port), rxconf, mbuf_pool);
        if (retval < 0)
            return retval;
    }

    for (q = 0; q < tx_rings; q++) {
        retval = rte_eth_tx_queue_setup(port, q, TX_RING_SIZE,
                                        rte_eth_dev_socket_id(port), txconf);
        if (retval < 0)
            return retval;
    }

    retval = rte_eth_dev_start(port);
    if (retval < 0)
        return retval;

    struct ether_addr addr;
    rte_eth_macaddr_get(port, &addr);
    rte_eth_promiscuous_enable(port);

    rte_eth_add_rx_callback(port, 0, add_timestamps, proposer);
    rte_eth_add_tx_callback(port, 0, calc_latency, NULL);
    return 0;
}


static void
lcore_main(uint8_t port, __rte_unused struct proposer *p)
{
    proposer_preexecute(p);

    for (;;) {
        // Check if signal is received
        if (force_quit)
            break;
        struct rte_mbuf *bufs[BURST_SIZE];
        const uint16_t nb_rx = rte_eth_rx_burst(port, 0, bufs, BURST_SIZE);
        if (unlikely(nb_rx == 0))
            continue;
        uint16_t buf;
        for (buf = 0; buf < nb_rx; buf++)
            rte_pktmbuf_free(bufs[buf]);
    }
}



static __attribute__((noreturn)) int
lcore_mainloop(__attribute__((unused)) void *arg)
{
    uint64_t prev_tsc = 0, cur_tsc, diff_tsc;
    unsigned lcore_id;

    lcore_id = rte_lcore_id();

    rte_log(RTE_LOG_DEBUG, RTE_LOGTYPE_TIMER,
            "Starting mainloop on core %u\n", lcore_id);

    while(1) {
        cur_tsc = rte_rdtsc();
        diff_tsc = cur_tsc - prev_tsc;
        if (diff_tsc > TIMER_RESOLUTION_CYCLES) {
            rte_timer_manage();
            prev_tsc = cur_tsc;
        }
    }
}

static void
report_stat(struct rte_timer *tim, __attribute((unused)) void *arg)
{
    /* print stat */
    uint32_t count = rte_atomic32_read(&stat);
    rte_log(RTE_LOG_INFO, RTE_LOGTYPE_USER8,
            "Throughput = %8u msg/s\n", count);
    /* reset stat */
    rte_atomic32_set(&stat, 0);
    /* this timer is automatically reloaded until we decide to stop it */
    if (force_quit)
        rte_timer_stop(tim);
}


static void
check_timeout(struct rte_timer *tim, void *arg)
{
    struct proposer* p = (struct proposer *) arg;
    unsigned lcore_id = rte_lcore_id();

    rte_log(RTE_LOG_DEBUG, RTE_LOGTYPE_USER8, "%s() on lcore_id %i\n", __func__, lcore_id);

    struct paxos_message out;
    out.type = PAXOS_PREPARE;
    struct timeout_iterator* iter = proposer_timeout_iterator(p);
    while(timeout_iterator_prepare(iter, &out.u.prepare)) {
        rte_log(RTE_LOG_DEBUG, RTE_LOGTYPE_USER8,
                "%s Send PREPARE inst %d ballot %d\n",
                __func__, out.u.prepare.iid, out.u.prepare.ballot);
        send_paxos_message(&out);
    }
    out.type = PAXOS_ACCEPT;
    while(timeout_iterator_accept(iter, &out.u.accept)) {
        rte_log(RTE_LOG_DEBUG, RTE_LOGTYPE_USER8,
                "%s: Send ACCEPT inst %d ballot %d\n",
                __func__, out.u.prepare.iid, out.u.prepare.ballot);
        send_paxos_message(&out);
    }
    timeout_iterator_free(iter);

    /* this timer is automatically reloaded until we decide to stop it */
    if (force_quit)
        rte_timer_stop(tim);
}

int
main(int argc, char *argv[])
{
    uint8_t portid = 0;
    unsigned master_core, lcore_id;
    signal(SIGTERM, signal_handler);
    signal(SIGINT, signal_handler);
    force_quit = false;
    int proposer_id = 0;

    if (rte_get_log_level() == RTE_LOG_DEBUG) {
        paxos_config.verbosity = PAXOS_LOG_DEBUG;
    }

    struct proposer *proposer = proposer_new(proposer_id, NUM_ACCEPTORS);
    first_time = true;
    /* init EAL */
    int ret = rte_eal_init(argc, argv);

    if (ret < 0)
        rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");

    /* init timer structure */
    rte_timer_init(&timer);
    rte_timer_init(&stat_timer);

    /* load deliver_timer, every 1 s, on a slave lcore, reloaded automatically */
    uint64_t hz = rte_get_timer_hz();

    /* Call rte_timer_manage every 10ms */
    TIMER_RESOLUTION_CYCLES = hz / 100;
    rte_log(RTE_LOG_INFO, RTE_LOGTYPE_USER1, "Clock: %"PRIu64"\n", hz);

    /* master core */
    master_core = rte_lcore_id();
    /* slave core */
    lcore_id = rte_get_next_lcore(master_core, 0, 1);
    rte_log(RTE_LOG_DEBUG, RTE_LOGTYPE_USER1, "lcore_id: %d\n", lcore_id);
    rte_timer_reset(&timer, hz, PERIODICAL, lcore_id, check_timeout, proposer);
    /* reset timer */
    rte_eal_remote_launch(lcore_mainloop, NULL, lcore_id);

    /* stat core */
    lcore_id = rte_get_next_lcore(lcore_id , 0, 1);
    rte_log(RTE_LOG_DEBUG, RTE_LOGTYPE_USER1, "lcore_id: %d\n", lcore_id);
    rte_timer_reset(&stat_timer, hz, PERIODICAL, lcore_id,
                    report_stat, NULL);

    /* init RTE timer library */
    rte_timer_subsystem_init();

    mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL",
                                        NUM_MBUFS, MBUF_CACHE_SIZE, 0,
                                        RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());

    if (mbuf_pool == NULL)
        rte_exit(EXIT_FAILURE, "Cannot create mbuf_pool\n");
    /* reset timer */
    rte_eal_remote_launch(lcore_mainloop, NULL, lcore_id);

    if (port_init(portid, mbuf_pool, proposer) != 0)
        rte_exit(EXIT_FAILURE, "Cannot init port %"PRIu8"\n", portid);


    lcore_main(portid, proposer);

    rte_log(RTE_LOG_DEBUG, RTE_LOGTYPE_USER8, "Free proposer\n");
    proposer_free(proposer);
    return 0;
}