Esempio n. 1
0
File: init.c Progetto: emmericp/dpdk
static void
app_init_kni(struct app_params *app) {
	uint32_t i;

	if (app->n_pktq_kni == 0)
		return;

	rte_kni_init(app->n_pktq_kni);

	for (i = 0; i < app->n_pktq_kni; i++) {
		struct app_pktq_kni_params *p_kni = &app->kni_params[i];
		struct app_link_params *p_link;
		struct rte_eth_dev_info dev_info;
		struct app_mempool_params *mempool_params;
		struct rte_mempool *mempool;
		struct rte_kni_conf conf;
		struct rte_kni_ops ops;

		/* LINK */
		p_link = app_get_link_for_kni(app, p_kni);
		memset(&dev_info, 0, sizeof(dev_info));
		rte_eth_dev_info_get(p_link->pmd_id, &dev_info);

		/* MEMPOOL */
		mempool_params = &app->mempool_params[p_kni->mempool_id];
		mempool = app->mempool[p_kni->mempool_id];

		/* KNI */
		memset(&conf, 0, sizeof(conf));
		snprintf(conf.name, RTE_KNI_NAMESIZE, "%s", p_kni->name);
		conf.force_bind = p_kni->force_bind;
		if (conf.force_bind) {
			int lcore_id;

			lcore_id = cpu_core_map_get_lcore_id(app->core_map,
				p_kni->socket_id,
				p_kni->core_id,
				p_kni->hyper_th_id);

			if (lcore_id < 0)
				rte_panic("%s invalid CPU core\n", p_kni->name);

			conf.core_id = (uint32_t) lcore_id;
		}
		conf.group_id = p_link->pmd_id;
		conf.mbuf_size = mempool_params->buffer_size;
		conf.addr = dev_info.pci_dev->addr;
		conf.id = dev_info.pci_dev->id;

		memset(&ops, 0, sizeof(ops));
		ops.port_id = (uint8_t) p_link->pmd_id;
		ops.change_mtu = kni_change_mtu;
		ops.config_network_if = kni_config_network_interface;

		APP_LOG(app, HIGH, "Initializing %s ...", p_kni->name);
		app->kni[i] = rte_kni_alloc(mempool, &conf, &ops);
		if (!app->kni[i])
			rte_panic("%s init error\n", p_kni->name);
	}
}
Esempio n. 2
0
int
kni_alloc(uint8_t port_id, struct rte_mempool* pktmbuf_pool)
{
	uint8_t i;
	struct rte_kni* kni;
	struct rte_kni_conf conf;
	struct kni_port_params** params = kni_port_params_array;

	if (port_id >= RTE_MAX_ETHPORTS || !params[port_id])
		return -1;

	params[port_id]->nb_kni =
	    params[port_id]->nb_lcore_k ? params[port_id]->nb_lcore_k : 1;

	for (i = 0; i < params[port_id]->nb_kni; i++) {
		/* Clear conf at first */
		memset(&conf, 0, sizeof(conf));
		if (params[port_id]->nb_lcore_k > 1) {
			snprintf(conf.name, RTE_KNI_NAMESIZE, "dpdk%u_%u",
				 port_id, i);
			conf.core_id = params[port_id]->lcore_k[i];
			conf.force_bind = 1;
		} else
			snprintf(conf.name, RTE_KNI_NAMESIZE, "dpdk%u",
				 port_id);
		conf.group_id = (uint16_t)port_id;
		conf.mbuf_size = MAX_PACKET_SZ;
		/*
		 * The first KNI device associated to a port
		 * is the master, for multiple kernel thread
		 * environment.
		 */
		if (i == 0) {
			struct rte_kni_ops ops;
			struct rte_eth_dev_info dev_info;

			memset(&dev_info, 0, sizeof(dev_info));
			rte_eth_dev_info_get(port_id, &dev_info);
			conf.addr = dev_info.pci_dev->addr;
			conf.id = dev_info.pci_dev->id;

			memset(&ops, 0, sizeof(ops));
			ops.port_id = port_id;
			ops.change_mtu = kni_change_mtu;
			ops.config_network_if = kni_config_network_interface;

			kni = rte_kni_alloc(pktmbuf_pool, &conf, &ops);
		} else
			kni = rte_kni_alloc(pktmbuf_pool, &conf, NULL);

		if (!kni)
			rte_exit(EXIT_FAILURE,
				 "Fail to create kni for "
				 "port: %d\n",
				 port_id);
		params[port_id]->kni[i] = kni;
	}

	return 0;
}
Esempio n. 3
0
char* pcap_lookupdev(char* errbuf)
{
    int    port  = 0;
    struct rte_eth_dev_info info;

    if (globalInit(errbuf) != DPDKPCAP_OK)
    {
        return NULL;
    }

    int portsNumber = rte_eth_dev_count();
    if (portsNumber < 1)
    {
        snprintf (errbuf, PCAP_ERRBUF_SIZE, "No devices found");
        return NULL;
    }

    if (deviceInit(port, errbuf) == DPDKPCAP_FAILURE)
    {
        return NULL;
    }

    rte_eth_dev_info_get(port, &info);

    snprintf(ifName, DPDKPCAP_IF_NAMESIZE, "enp%us%u",
             info.pci_dev->addr.bus,
             info.pci_dev->addr.devid);

    deviceNames[port] = ifName;

    return ifName;
}
int
tcpreplay_netport_init(struct arguments *args)
{   
    int ret;
    uint8_t rss_key [40];
    struct rte_eth_link link;
    struct rte_eth_dev_info dev_info;
    struct rte_eth_rss_conf rss_conf;
    struct rte_eth_fdir fdir_conf;
    
    /* Retreiving and printing device infos */
    rte_eth_dev_info_get(i, &dev_info);
    printf("Name:%s\n\tDriver name: %s\n\tMax rx queues: %d\n\tMax tx queues: %d\n", dev_info.pci_dev->driver->name,dev_info.driver_name, dev_info.max_rx_queues, dev_info.max_tx_queues);
    printf("\tPCI Adress: %04d:%02d:%02x:%01d\n", dev_info.pci_dev->addr.domain, dev_info.pci_dev->addr.bus, dev_info.pci_dev->addr.devid, dev_info.pci_dev->addr.function);
    
    /* Configure device with '1' rx queues and 1 tx queue */
    ret = rte_eth_dev_configure(i, 1, 1, &port_conf);
    if (ret < 0) rte_panic("Error configuring the port\n");
    
    /* For each RX queue in each NIC */
    /* Configure rx queue j of current device on current NUMA socket. It takes elements from the mempool */
    ret = rte_eth_rx_queue_setup(i, 0, RX_QUEUE_SZ, rte_socket_id(), &rx_conf, pktmbuf_pool);
    if (ret < 0) FATAL_ERROR("Error configuring receiving queue\n");
    /* Configure mapping [queue] -> [element in stats array] */
    ret = rte_eth_dev_set_rx_queue_stats_mapping    (i, 0, 0);
    if (ret < 0) FATAL_ERROR("Error configuring receiving queue stats\n");
    
    
    /* Configure tx queue of current device on current NUMA socket. Mandatory configuration even if you want only rx packet */
    ret = rte_eth_tx_queue_setup(i, 0, TX_QUEUE_SZ, rte_socket_id(), &tx_conf);
    if (ret < 0) FATAL_ERROR("Error configuring transmitting queue. Errno: %d (%d bad arg, %d no mem)\n", -ret, EINVAL ,ENOMEM);
    
    /* Start device */      
    ret = rte_eth_dev_start(i);
    if (ret < 0) FATAL_ERROR("Cannot start port\n");
    
    /* Enable receipt in promiscuous mode for an Ethernet device */
    rte_eth_promiscuous_enable(i);
    
    /* Print link status */
    rte_eth_link_get_nowait(i, &link);
    if (link.link_status)   printf("\tPort %d Link Up - speed %u Mbps - %s\n", (uint8_t)i, (unsigned)link.link_speed,(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?("full-duplex") : ("half-duplex\n"));
    else            printf("\tPort %d Link Down\n",(uint8_t)i);
    
    /* Print RSS support, not reliable because a NIC could support rss configuration just in rte_eth_dev_configure whithout supporting rte_eth_dev_rss_hash_conf_get*/
    rss_conf.rss_key = rss_key;
    ret = rte_eth_dev_rss_hash_conf_get (i,&rss_conf);
    if (ret == 0) printf("\tDevice supports RSS\n"); else printf("\tDevice DOES NOT support RSS\n");
    
    /* Print Flow director support */
    ret = rte_eth_dev_fdir_get_infos (i, &fdir_conf);
    if (ret == 0) printf("\tDevice supports Flow Director\n"); else printf("\tDevice DOES NOT support Flow Director\n"); 

    if (args)
        return 1;
    return 1;
}
Esempio n. 5
0
int
vfd_mlx5_get_ifname(uint16_t port_id, char *ifname)
{
	struct rte_eth_dev_info dev_info;
	rte_eth_dev_info_get(port_id, &dev_info);
	
	if (!if_indextoname(dev_info.if_index, ifname))
		return -1;

	return 0;
}
Esempio n. 6
0
/* Alloc KNI Devices for PORT_ID */
static int odp_kni_alloc(uint8_t port_id)
{
	uint8_t i;
	struct rte_kni *kni;
	struct rte_kni_conf conf;
	struct kni_port_params **params = kni_port_params_array;

	unsigned lcore_id = params[port_id]->lcore_id;
	unsigned lcore_socket = rte_lcore_to_socket_id(lcore_id);
	struct rte_mempool * kni_mempool = odp_pktmbuf_pool[lcore_socket];

	if (port_id >= RTE_MAX_ETHPORTS || !params[port_id])
		return -1;

	memset(&conf, 0, sizeof(conf));
	snprintf(conf.name, RTE_KNI_NAMESIZE,"keth%u", port_id);
	conf.group_id = (uint16_t)port_id;
	conf.mbuf_size = MAX_PACKET_SZ;

	struct rte_kni_ops ops;
	struct rte_eth_dev_info dev_info;

	memset(&dev_info, 0, sizeof(dev_info));
	rte_eth_dev_info_get(port_id, &dev_info);
	conf.addr = dev_info.pci_dev->addr;
	conf.id = dev_info.pci_dev->id;

	memset(&ops, 0, sizeof(ops));
	ops.port_id = port_id;
	ops.change_mtu = kni_change_mtu;
	ops.config_network_if = kni_config_network_interface;

	kni = rte_kni_alloc(kni_mempool, &conf, &ops);

	if (!kni)
		rte_exit(EXIT_FAILURE, "Fail to create kni for "
						"port: %d\n", port_id);

	params[port_id]->kni = kni;

	/* Create Ring to recieve the pkts from other cores */
	char ring_name[32];
	snprintf(ring_name,sizeof(ring_name),"kni_ring_s%u_p%u",lcore_socket,port_id);

	params[port_id]->ring = rte_ring_create(ring_name,ODP_KNI_RING_SIZE,
						lcore_socket,RING_F_SC_DEQ);
	
	if(!params[port_id]->ring)
		rte_exit(EXIT_FAILURE, "Fail to create ring for kni %s",ring_name);

	return 0;
}
Esempio n. 7
0
/**
 * @brief           Retrieve device information
 * 
 * @param devId     uint8_t, ID of DPDK device
 * @param info      EthDevInfo_t*, pointer to buffer where device info will be stored
 *
 * @return          EthDevInfo_t* if success and NULL otherwice
 */
EthDevInfo_t* DPDKAdapter::getDevInfo(uint8_t devId, EthDevInfo_t* info)
{
    if(devId > RTE_MAX_ETHPORTS)
    {
        qCritical("Device ID is out of range");
        return NULL;
    }

    memset(info, 0, sizeof(EthDevInfo_t));
    rte_eth_dev_info_get(devId, info);
    
    return info;
}
Esempio n. 8
0
int 
rw_piot_get_device_info(rw_piot_api_handle_t api_handle,
                        struct rte_eth_dev_info *dev_info)
{
  rw_piot_device_t *rw_piot_dev = RWPIOT_GET_DEVICE(api_handle);
  ASSERT(RWPIOT_VALID_DEVICE(rw_piot_dev));
  if (NULL == rw_piot_dev) {
    RW_PIOT_LOG(RTE_LOG_ERR, "PIOT Could not find device by handle\n");
    return -1;
  }
  ASSERT(dev_info);
  rte_eth_dev_info_get(rw_piot_dev->rte_port_id, dev_info);
  return 0;
}
Esempio n. 9
0
/* initialize rte devices and check the number of available ports */
static uint8_t init_rte_dev(void)
{
	uint8_t nb_ports;
	struct rte_eth_dev_info dev_info;


	/* initialize driver(s) */
	TGEN_PANIC(rte_ixgbe_pmd_init() < 0, "\tError: Cannot init ixgbe pmd\n");

	if (tgen_cfg.flags & TGSF_USE_VF) {
		TGEN_PANIC(rte_ixgbevf_pmd_init() < 0, "\tError: cannot init ixgbevf pmd\n");
	}

	TGEN_PANIC(rte_eal_pci_probe() < 0, "\tError: Cannot probe PCI\n");

	/* get available ports configuration */
	nb_ports = rte_eth_dev_count();
	TGEN_PANIC(nb_ports == 0, "\tError: DPDK could not find any port\n");
	mprintf("\tDPDK has found %u ports\n", nb_ports);

	if (nb_ports > TGEN_MAX_PORTS) {
		mprintf("\tWarning: I can deal with at most %u ports."
		        " Please update TGEN_MAX_PORTS and recompile.\n", TGEN_MAX_PORTS);

		nb_ports = TGEN_MAX_PORTS;
	}

	TGEN_PANIC(tgen_used_port_mask & ~((1U << nb_ports) - 1),
	           "\tError: invalid port(s) specified, used port mask is %#10x\n", tgen_used_port_mask);

	/* read max TX queues per port */
	for (uint8_t port_id = 0; port_id < nb_ports; ++port_id) {
		/* skip ports that are not enabled */
		if ((tgen_used_port_mask & (1U << port_id)) == 0) {
			continue;
		}
		rte_eth_dev_info_get(port_id, &dev_info);
		tgen_port_conf[port_id].max_tx_queue = dev_info.max_tx_queues;
		mprintf("\tPort %u, Max TX queue = %u\n", port_id, dev_info.max_tx_queues);
		if (strcmp(dev_info.driver_name, "rte_ixgbe_pmd") == 0) {
			tgen_port_conf[port_id].type = PORT_IXGBE;
		}
		else {
			tgen_port_conf[port_id].type = PORT_IGB;
		}
	}

	return nb_ports;
}
Esempio n. 10
0
/*
 * Initialize a given port using default settings and with the RX buffers
 * coming from the mbuf_pool passed as a parameter.
 * FIXME: Starting with assumption of one thread/core per port
 */
static inline int uhd_dpdk_port_init(struct uhd_dpdk_port *port,
                                     struct rte_mempool *rx_mbuf_pool,
                                     unsigned int mtu)
{
    int retval;

    /* Check for a valid port */
    if (port->id >= rte_eth_dev_count())
        return -ENODEV;

    /* Set up Ethernet device with defaults (1 RX ring, 1 TX ring) */
    retval = rte_eth_dev_set_mtu(port->id, mtu);
    if (retval) {
        uint16_t actual_mtu;
        RTE_LOG(WARNING, EAL, "%d: Could not set mtu to %d\n", retval, mtu);
        rte_eth_dev_get_mtu(port->id, &actual_mtu);
        RTE_LOG(WARNING, EAL, "Current mtu=%d\n", actual_mtu);
        mtu = actual_mtu;
    }

    // Require checksum offloads
    struct rte_eth_dev_info dev_info;
    rte_eth_dev_info_get(port->id, &dev_info);
    uint64_t rx_offloads = DEV_RX_OFFLOAD_IPV4_CKSUM;
    uint64_t tx_offloads = DEV_TX_OFFLOAD_IPV4_CKSUM;
    if ((dev_info.rx_offload_capa & rx_offloads) != rx_offloads) {
        RTE_LOG(WARNING, EAL, "%d: Only supports RX offloads 0x%0llx\n", port->id, dev_info.rx_offload_capa);
        rte_exit(EXIT_FAILURE, "Missing required RX offloads\n");
    }
    if ((dev_info.tx_offload_capa & tx_offloads) != tx_offloads) {
        RTE_LOG(WARNING, EAL, "%d: Only supports TX offloads 0x%0llx\n", port->id, dev_info.tx_offload_capa);
        rte_exit(EXIT_FAILURE, "Missing required TX offloads\n");
    }

    struct rte_eth_conf port_conf = {
        .rxmode = {
            .offloads = rx_offloads | DEV_RX_OFFLOAD_JUMBO_FRAME,
            .max_rx_pkt_len = mtu,
            .jumbo_frame = 1,
            .hw_ip_checksum = 1,
            .ignore_offload_bitfield = 0,
        },
        .txmode = {
            .offloads = tx_offloads,
        }
    };
Esempio n. 11
0
void
app_init_kni(void) {
  uint8_t portid;

  for (portid = 0; portid < rte_eth_dev_count(); portid++) {
    struct rte_kni *kni;
    struct rte_kni_ops ops;
    struct rte_kni_conf conf;
    struct rte_eth_dev_info dev_info;

    continue; /* XXX */
    /* Clear conf at first */
    memset(&conf, 0, sizeof(conf));
    memset(&dev_info, 0, sizeof(dev_info));
    memset(&ops, 0, sizeof(ops));
    snprintf(conf.name, RTE_KNI_NAMESIZE, "vEth%u", portid);
    conf.group_id = (uint16_t)portid;
    conf.mbuf_size = MAX_PACKET_SZ;
    rte_eth_dev_info_get(portid, &dev_info);
    conf.addr = dev_info.pci_dev->addr;
    conf.id = dev_info.pci_dev->id;
    ops.port_id = portid;
    ops.change_mtu = kni_change_mtu;
    ops.config_network_if = kni_config_network_interface,
        /* XXX socket 0 */
        kni = rte_kni_alloc(app.pools[0], &conf, &ops);
    if (kni == NULL) {
      lagopus_msg_error("Fail to create kni dev for port: %d\n", portid);
      continue;
    }
    lagopus_kni[portid] = kni;
    printf("KNI: %s is configured.\n", conf.name);

    printf("Port %u, MAC address: %02X:%02X:%02X:%02X:%02X:%02X\n\n",
           (unsigned) portid,
           lagopus_ports_eth_addr[portid].addr_bytes[0],
           lagopus_ports_eth_addr[portid].addr_bytes[1],
           lagopus_ports_eth_addr[portid].addr_bytes[2],
           lagopus_ports_eth_addr[portid].addr_bytes[3],
           lagopus_ports_eth_addr[portid].addr_bytes[4],
           lagopus_ports_eth_addr[portid].addr_bytes[5]);

    /* initialize port stats */
    memset(&port_statistics, 0, sizeof(port_statistics));
  }
}
/* Update device info */
static void
dpdk_ethdev_info_update(struct vr_dpdk_ethdev *ethdev)
{
    struct rte_eth_dev_info dev_info;

    rte_eth_dev_info_get(ethdev->ethdev_port_id, &dev_info);

    ethdev->ethdev_nb_rx_queues = RTE_MIN(dev_info.max_rx_queues,
        VR_DPDK_MAX_NB_RX_QUEUES);
    /* [PAKCET_ID..FWD_ID) lcores have just TX queues, so we increase
     * the number of TX queues here */
    ethdev->ethdev_nb_tx_queues = RTE_MIN(RTE_MIN(dev_info.max_tx_queues,
        vr_dpdk.nb_fwd_lcores + (VR_DPDK_FWD_LCORE_ID - VR_DPDK_PACKET_LCORE_ID)),
        VR_DPDK_MAX_NB_TX_QUEUES);

    /* Check if we have dedicated an lcore for SR-IOV VF IO. */
    if (vr_dpdk.vf_lcore_id) {
        ethdev->ethdev_nb_rx_queues = ethdev->ethdev_nb_tx_queues = 1;
    }

    ethdev->ethdev_nb_rss_queues = RTE_MIN(RTE_MIN(ethdev->ethdev_nb_rx_queues,
        vr_dpdk.nb_fwd_lcores), VR_DPDK_MAX_NB_RSS_QUEUES);
    ethdev->ethdev_reta_size = RTE_MIN(dev_info.reta_size,
        VR_DPDK_MAX_RETA_SIZE);

    RTE_LOG(DEBUG, VROUTER, "dev_info: driver_name=%s if_index=%u"
            " max_rx_queues=%" PRIu16 " max_tx_queues=%" PRIu16
            " max_vfs=%" PRIu16 " max_vmdq_pools=%" PRIu16
            " rx_offload_capa=%" PRIx32 " tx_offload_capa=%" PRIx32 "\n",
            dev_info.driver_name, dev_info.if_index,
            dev_info.max_rx_queues, dev_info.max_tx_queues,
            dev_info.max_vfs, dev_info.max_vmdq_pools,
            dev_info.rx_offload_capa, dev_info.tx_offload_capa);

#if !VR_DPDK_USE_HW_FILTERING
    /* use RSS queues only */
    ethdev->ethdev_nb_rx_queues = ethdev->ethdev_nb_rss_queues;
#else
    /* we use just RSS queues if the device does not support RETA */
    if (ethdev->ethdev_reta_size == 0)
        ethdev->ethdev_nb_rx_queues = ethdev->ethdev_nb_rss_queues;
#endif

    return;
}
Esempio n. 13
0
int rw_piot_is_virtio(rw_piot_api_handle_t api_handle)
{
 rw_piot_device_t *rw_piot_dev = RWPIOT_GET_DEVICE(api_handle);
 struct rte_eth_dev_info dev_info;
 
 memset(&dev_info, 0, sizeof(dev_info));

 ASSERT(RWPIOT_VALID_DEVICE(rw_piot_dev));
 if (NULL == rw_piot_dev ){
   return 0;
 }
 rte_eth_dev_info_get(rw_piot_dev->rte_port_id, &dev_info);
 if (strstr(dev_info.driver_name, "rte_virtio_pmd")){
   return 1;
 }
 
 return 0;
}
Esempio n. 14
0
File: kni.c Progetto: adolia/MoonGen
//struct rte_kni * mg_create_kni(uint8_t port_id, uint8_t core_id, struct rte_mempool* pktmbuf_pool){
struct rte_kni * mg_create_kni(uint8_t port_id, uint8_t core_id, void* mempool_ptr, const char name[]){
  //printf("create kni\n");
  //printf("mempool ptr 1 : %p\n", mempool_ptr);
  struct rte_kni *kni;
	struct rte_kni_conf conf;
  /* Clear conf at first */
  memset(&conf, 0, sizeof(conf));
  snprintf(conf.name, RTE_KNI_NAMESIZE, "%s", name);
  conf.core_id = core_id;
  conf.force_bind = 1;
  conf.group_id = (uint16_t)port_id;
  conf.mbuf_size = MAX_PACKET_SZ;

  struct rte_eth_dev_info dev_info;

  memset(&dev_info, 0, sizeof(dev_info));
  //printf("get dev info\n");
  rte_eth_dev_info_get(port_id, &dev_info);
  //printf("dev done\n");
  //printf("pci dev: %p\n", dev_info.pci_dev);
  conf.addr = dev_info.pci_dev->addr;
  //printf("a\n");
  conf.id = dev_info.pci_dev->id;
  //printf("b\n");

  struct rte_kni_ops ops;
  //printf("c\n");
  memset(&ops, 0, sizeof(ops));
  //printf("d\n");
  ops.port_id = port_id;
  //printf("e\n");
  ops.change_mtu = kni_change_mtu;
  //printf("f\n");
  ops.config_network_if = kni_config_network_interface;

  //printf("alloc\n");
  struct rte_mempool * pktmbuf_pool = (struct rte_mempool *)(mempool_ptr);
  //printf("mempool pointer: %p\n", pktmbuf_pool);
  kni = rte_kni_alloc(pktmbuf_pool, &conf, &ops);
  //printf("done, kni = %p\n", kni);

  //rte_eth_dev_start(port_id);
  return kni;
}
void init_port(int port_id)
{
	struct rte_eth_dev_info dev_info;
	int ret;
	struct rte_eth_link link;
	
	rte_eth_dev_info_get(port_id, &dev_info);
	printf("Name:%s\n\tDriver name: %s\n\tMax rx queues: %d\n\tMax tx queues: %d\n", dev_info.pci_dev->driver->name,dev_info.driver_name, dev_info.max_rx_queues, dev_info.max_tx_queues);
	printf("\tPCI Adress: %04d:%02d:%02x:%01d\n", dev_info.pci_dev->addr.domain, dev_info.pci_dev->addr.bus, dev_info.pci_dev->addr.devid, dev_info.pci_dev->addr.function);

	ret = rte_eth_dev_configure(port_id, 1, 1, &port_conf);
	if (ret < 0) 
		rte_panic("Error configuring the port\n");

	ret = rte_eth_rx_queue_setup(port_id, 0, RX_QUEUE_SZ, rte_socket_id(), &rx_conf, pktmbuf_pool);
	if (ret < 0) 
		FATAL_ERROR("Error configuring receiving queue= %d\n", ret);

	// TODO: Need to check whether it is supported in the VMXNET
	/*ret = rte_eth_dev_set_rx_queue_stats_mapping(port_id, 0, 0);       
	if (ret < 0) 
		FATAL_ERROR("Error configuring receiving queue stats= %d [ENOTSUP= %d]\n", ret, ENOTSUP); */

	ret = rte_eth_tx_queue_setup(port_id, 0, TX_QUEUE_SZ, rte_socket_id(), &tx_conf);
	if (ret < 0) 
		FATAL_ERROR("Error configuring transmitting queue. Errno: %d (%d bad arg, %d no mem)\n", -ret, EINVAL ,ENOMEM);

	/* Start device */    
	ret = rte_eth_dev_start(port_id);
	if (ret < 0) 
		FATAL_ERROR("Cannot start port\n");

	/* Enable receipt in promiscuous mode for an Ethernet device */
	//rte_eth_promiscuous_enable(port_id);

	/* Print link status */
	rte_eth_link_get_nowait(port_id, &link);                
	if (link.link_status)   
		printf("\tPort %d Link Up - speed %u Mbps - %s\n", (uint8_t)port_id, (unsigned)link.link_speed,(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?("full-duplex") : ("half-duplex\n"));
	else
		printf("\tPort %d Link Down\n",(uint8_t)port_id);
}
Esempio n. 16
0
int
rw_piot_get_device_offload_capability(rw_piot_api_handle_t api_handle,
                                      uint32_t  *rx_offload_capa,                   
                                      uint32_t  *tx_offload_capa)
{
  struct rte_eth_dev_info dev_info;
  rw_piot_device_t *rw_piot_dev = RWPIOT_GET_DEVICE(api_handle);
  ASSERT(RWPIOT_VALID_DEVICE(rw_piot_dev));
  if (NULL == rw_piot_dev) {
    RW_PIOT_LOG(RTE_LOG_ERR, "PIOT Could not find device by handle\n");
    return -1;
  }
  ASSERT(rx_offload_capa);
  ASSERT(tx_offload_capa);
  memset(&dev_info, 0, sizeof(dev_info));
  rte_eth_dev_info_get(rw_piot_dev->rte_port_id, &dev_info);
  *rx_offload_capa = dev_info.rx_offload_capa;
  *tx_offload_capa = dev_info.tx_offload_capa;
  return 0;
}
Esempio n. 17
0
static void
pktgen_port_conf_setup(uint32_t pid, rxtx_t * rt, const struct rte_eth_conf * dpc)
{
	port_info_t * info = &pktgen.info[pid];
	struct rte_eth_conf *conf = &info->port_conf;
	struct rte_eth_dev_info *dev = &info->dev_info;

	memcpy(conf, dpc, sizeof(struct rte_eth_conf));
	memcpy(&info->ring_conf, &default_ring_conf, sizeof(ring_conf_t));

	rte_eth_dev_info_get(pid, dev);

	pktgen_dump_dev_info(stdout, dev);

	if (rt->rx > 1) {
		conf->rx_adv_conf.rss_conf.rss_key	= NULL;
		conf->rx_adv_conf.rss_conf.rss_hf	= ETH_RSS_IP;
	} else {
		conf->rx_adv_conf.rss_conf.rss_key	= NULL;
		conf->rx_adv_conf.rss_conf.rss_hf	= 0;
	}

	if ( dev->max_vfs == 0) {
		if( conf->rx_adv_conf.rss_conf.rss_hf != 0)
			conf->rxmode.mq_mode = ETH_MQ_RX_RSS;
		else
			conf->rxmode.mq_mode = ETH_MQ_RX_NONE;
	}

	if (dev->max_vfs != 0) {
		if (conf->rx_adv_conf.rss_conf.rss_hf != 0)
			conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_RSS;
		else
			conf->rxmode.mq_mode = ETH_MQ_RX_NONE;

		conf->txmode.mq_mode = ETH_MQ_TX_NONE;
	}

	rxtx_port_config(pid);
}
Esempio n. 18
0
File: kni.c Progetto: daniel666/dpvs
static void kni_fill_conf(const struct netif_port *dev, const char *ifname,
                          struct rte_kni_conf *conf)
{
    struct rte_eth_dev_info info = {0};

    memset(conf, 0, sizeof(*conf));
    conf->group_id = dev->id;
    conf->mbuf_size = KNI_DEF_MBUF_SIZE;

    if (dev->type == PORT_TYPE_GENERAL) { /* dpdk phy device */
        rte_eth_dev_info_get(dev->id, &info);
        conf->addr = info.pci_dev->addr;
        conf->id = info.pci_dev->id;
    }

    if (ifname && strlen(ifname))
        snprintf(conf->name, sizeof(conf->name), "%s", ifname);
    else
        snprintf(conf->name, sizeof(conf->name), "%s.kni", dev->name);

    return;
}
Esempio n. 19
0
int
rw_piot_config_device(rw_piot_device_t *dev,
                      rw_piot_open_request_info_t *req,
                      rw_piot_open_response_info_t *rsp)
{
  struct rte_eth_dev_info dev_info;

  ASSERT(RWPIOT_VALID_DEVICE(dev));

  bzero(rsp, sizeof(*rsp));
  bzero(&dev_info, sizeof(dev_info));

  rte_eth_dev_info_get(dev->rte_port_id, &dev_info);
  if (0 == dev_info.max_rx_queues ||  0 == dev_info.max_tx_queues) {
    printf("Eth device return 0 max rx/tx queues\n");
    return 0;
  } 
  rsp->num_rx_queues = req->num_rx_queues;
  if (rsp->num_rx_queues > dev_info.max_rx_queues) {
    rsp->num_rx_queues = dev_info.max_rx_queues;
  }
  rsp->num_tx_queues = req->num_tx_queues;
  if (rsp->num_tx_queues > dev_info.max_tx_queues) {
    rsp->num_tx_queues = dev_info.max_tx_queues;
  }

  if (!dev_info.rx_offload_capa) {
    req->dev_conf.rxmode.hw_ip_checksum = 0;
  }
  if (dev_info.pci_dev){
    if (dev_info.pci_dev->driver) {
      if (!(dev_info.pci_dev->driver->drv_flags & RTE_PCI_DRV_INTR_LSC)) {
        req->dev_conf.intr_conf.lsc = 0;
      }
    }
  }
  return(rte_eth_dev_configure(dev->rte_port_id,  rsp->num_rx_queues,
                               rsp->num_tx_queues, &req->dev_conf));
}
Esempio n. 20
0
/*
 * Create KNI for specified device
 */
struct rte_kni *
rw_piot_kni_create(rw_piot_api_handle_t api_handle,
                   struct rte_kni_conf *conf,
                   struct rte_mempool * pktmbuf_pool)
{
  struct rte_kni_ops ops;
  struct rte_eth_dev_info dev_info;
  struct rte_kni *kni;
  rw_piot_device_t *rw_piot_dev = RWPIOT_GET_DEVICE(api_handle);
  
  if ((conf->name[0] == 0)) {
    ASSERT(rw_piot_dev);
    rte_snprintf(conf->name, RTE_KNI_NAMESIZE,"vEth%u", rw_piot_dev->rte_port_id);
  }
  conf->mbuf_size = MAX_PACKET_SZ;

  memset(&dev_info, 0, sizeof(dev_info));
  memset(&ops, 0, sizeof(ops));
  if (rw_piot_dev) {
    rte_eth_dev_info_get(rw_piot_dev->rte_port_id, &dev_info);
    conf->group_id = (uint16_t)rw_piot_dev->rte_port_id;
    conf->addr = dev_info.pci_dev->addr;
    conf->id = dev_info.pci_dev->id;
    ops.port_id = rw_piot_dev->rte_port_id;
  }
  ops.change_mtu = rw_piot_kni_change_mtu;
  ops.config_network_if = rw_piot_kni_config_network_interface;
  
  kni = rte_kni_alloc(pktmbuf_pool, conf, &ops); 
  
  if (NULL == kni) {
    printf("rte_kni_alloc returned failure\n");
    return NULL;
  }

  return kni;
}
Esempio n. 21
0
/* Update device info */
static void
dpdk_ethdev_info_update(struct vr_dpdk_ethdev *ethdev)
{
    struct rte_eth_dev_info dev_info;

    rte_eth_dev_info_get(ethdev->ethdev_port_id, &dev_info);

    ethdev->ethdev_nb_rx_queues = RTE_MIN(dev_info.max_rx_queues,
        VR_DPDK_MAX_NB_RX_QUEUES);
    ethdev->ethdev_nb_tx_queues = RTE_MIN(RTE_MIN(dev_info.max_tx_queues,
        vr_dpdk.nb_fwd_lcores + 1), VR_DPDK_MAX_NB_TX_QUEUES);
    ethdev->ethdev_nb_rss_queues = RTE_MIN(RTE_MIN(ethdev->ethdev_nb_rx_queues,
        vr_dpdk.nb_fwd_lcores), VR_DPDK_MAX_NB_RSS_QUEUES);
    ethdev->ethdev_reta_size = RTE_MIN(dev_info.reta_size,
        VR_DPDK_MAX_RETA_SIZE);

    RTE_LOG(DEBUG, VROUTER, "dev_info: driver_name=%s if_index=%u"
            " max_rx_queues=%" PRIu16 " max_tx_queues=%" PRIu16
            " max_vfs=%" PRIu16 " max_vmdq_pools=%" PRIu16
            " rx_offload_capa=%" PRIx32 " tx_offload_capa=%" PRIx32 "\n",
            dev_info.driver_name, dev_info.if_index,
            dev_info.max_rx_queues, dev_info.max_tx_queues,
            dev_info.max_vfs, dev_info.max_vmdq_pools,
            dev_info.rx_offload_capa, dev_info.tx_offload_capa);

#if !VR_DPDK_USE_HW_FILTERING
    /* use RSS queues only */
    ethdev->ethdev_nb_rx_queues = ethdev->ethdev_nb_rss_queues;
#else
    /* we use just RSS queues if the device does not support RETA */
    if (ethdev->ethdev_reta_size == 0)
        ethdev->ethdev_nb_rx_queues = ethdev->ethdev_nb_rss_queues;
#endif

    return;
}
Esempio n. 22
0
/* It is deprecated and just for backward compatibility */
struct rte_kni *
rte_kni_create(uint8_t port_id,
	       unsigned mbuf_size,
	       struct rte_mempool *pktmbuf_pool,
	       struct rte_kni_ops *ops)
{
	struct rte_kni_conf conf;
	struct rte_eth_dev_info info;

	memset(&info, 0, sizeof(info));
	memset(&conf, 0, sizeof(conf));
	rte_eth_dev_info_get(port_id, &info);

	snprintf(conf.name, sizeof(conf.name), "vEth%u", port_id);
	conf.addr = info.pci_dev->addr;
	conf.id = info.pci_dev->id;
	conf.group_id = (uint16_t)port_id;
	conf.mbuf_size = mbuf_size;

	/* Save the port id for request handling */
	ops->port_id = port_id;

	return rte_kni_alloc(pktmbuf_pool, &conf, ops);
}
Esempio n. 23
0
/*
 * Initialises a given port using global settings and with the rx buffers
 * coming from the mbuf_pool passed as parameter
 */
static inline int
port_init(uint8_t port, struct rte_mempool *mbuf_pool)
{
	struct rte_eth_dev_info dev_info;
	struct rte_eth_conf port_conf;
	uint16_t rx_rings, tx_rings = (uint16_t)rte_lcore_count();
	const uint16_t rx_ring_size = RTE_TEST_RX_DESC_DEFAULT, tx_ring_size = RTE_TEST_TX_DESC_DEFAULT;
	int retval;
	uint16_t q;

	/* The max pool number from dev_info will be used to validate the pool number specified in cmd line */
	rte_eth_dev_info_get (port, &dev_info);

	/*configure the number of supported virtio devices based on VMDQ limits */
	num_devices = dev_info.max_vmdq_pools;
	num_queues = dev_info.max_rx_queues;

	retval = validate_num_devices(MAX_DEVICES);
	if (retval < 0)
		return retval;

	/* Get port configuration. */
	retval = get_eth_conf(&port_conf, num_devices);
	if (retval < 0)
		return retval;

	if (port >= rte_eth_dev_count()) return -1;

	rx_rings = (uint16_t)num_queues,
	/* Configure ethernet device. */
	retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
	if (retval != 0)
		return retval;

	/* Setup the queues. */
	for (q = 0; q < rx_rings; q ++) {
		retval = rte_eth_rx_queue_setup(port, q, rx_ring_size,
						rte_eth_dev_socket_id(port), &rx_conf_default,
						mbuf_pool);
		if (retval < 0)
			return retval;
	}
	for (q = 0; q < tx_rings; q ++) {
		retval = rte_eth_tx_queue_setup(port, q, tx_ring_size,
						rte_eth_dev_socket_id(port), &tx_conf_default);
		if (retval < 0)
			return retval;
	}

	/* Start the device. */
	retval  = rte_eth_dev_start(port);
	if (retval < 0)
		return retval;

	rte_eth_macaddr_get(port, &vmdq_ports_eth_addr[port]);
	RTE_LOG(INFO, VHOST_PORT, "Max virtio devices supported: %u\n", num_devices);
	RTE_LOG(INFO, VHOST_PORT, "Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
			" %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
			(unsigned)port,
			vmdq_ports_eth_addr[port].addr_bytes[0],
			vmdq_ports_eth_addr[port].addr_bytes[1],
			vmdq_ports_eth_addr[port].addr_bytes[2],
			vmdq_ports_eth_addr[port].addr_bytes[3],
			vmdq_ports_eth_addr[port].addr_bytes[4],
			vmdq_ports_eth_addr[port].addr_bytes[5]);

	return 0;
}
Esempio n. 24
0
static void
app_init_nics(void)
{
	unsigned socket;
	uint32_t lcore;
	uint16_t port;
	uint8_t queue;
	int ret;
	uint32_t n_rx_queues, n_tx_queues;

	/* Init NIC ports and queues, then start the ports */
	for (port = 0; port < APP_MAX_NIC_PORTS; port ++) {
		struct rte_mempool *pool;
		uint16_t nic_rx_ring_size;
		uint16_t nic_tx_ring_size;
		struct rte_eth_rxconf rxq_conf;
		struct rte_eth_txconf txq_conf;
		struct rte_eth_dev_info dev_info;
		struct rte_eth_conf local_port_conf = port_conf;

		n_rx_queues = app_get_nic_rx_queues_per_port(port);
		n_tx_queues = app.nic_tx_port_mask[port];

		if ((n_rx_queues == 0) && (n_tx_queues == 0)) {
			continue;
		}

		/* Init port */
		printf("Initializing NIC port %u ...\n", port);
		rte_eth_dev_info_get(port, &dev_info);
		if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
			local_port_conf.txmode.offloads |=
				DEV_TX_OFFLOAD_MBUF_FAST_FREE;

		local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
			dev_info.flow_type_rss_offloads;
		if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
				port_conf.rx_adv_conf.rss_conf.rss_hf) {
			printf("Port %u modified RSS hash function based on hardware support,"
				"requested:%#"PRIx64" configured:%#"PRIx64"\n",
				port,
				port_conf.rx_adv_conf.rss_conf.rss_hf,
				local_port_conf.rx_adv_conf.rss_conf.rss_hf);
		}

		ret = rte_eth_dev_configure(
			port,
			(uint8_t) n_rx_queues,
			(uint8_t) n_tx_queues,
			&local_port_conf);
		if (ret < 0) {
			rte_panic("Cannot init NIC port %u (%d)\n", port, ret);
		}
		rte_eth_promiscuous_enable(port);

		nic_rx_ring_size = app.nic_rx_ring_size;
		nic_tx_ring_size = app.nic_tx_ring_size;
		ret = rte_eth_dev_adjust_nb_rx_tx_desc(
			port, &nic_rx_ring_size, &nic_tx_ring_size);
		if (ret < 0) {
			rte_panic("Cannot adjust number of descriptors for port %u (%d)\n",
				  port, ret);
		}
		app.nic_rx_ring_size = nic_rx_ring_size;
		app.nic_tx_ring_size = nic_tx_ring_size;

		rxq_conf = dev_info.default_rxconf;
		rxq_conf.offloads = local_port_conf.rxmode.offloads;
		/* Init RX queues */
		for (queue = 0; queue < APP_MAX_RX_QUEUES_PER_NIC_PORT; queue ++) {
			if (app.nic_rx_queue_mask[port][queue] == 0) {
				continue;
			}

			app_get_lcore_for_nic_rx(port, queue, &lcore);
			socket = rte_lcore_to_socket_id(lcore);
			pool = app.lcore_params[lcore].pool;

			printf("Initializing NIC port %u RX queue %u ...\n",
				port, queue);
			ret = rte_eth_rx_queue_setup(
				port,
				queue,
				(uint16_t) app.nic_rx_ring_size,
				socket,
				&rxq_conf,
				pool);
			if (ret < 0) {
				rte_panic("Cannot init RX queue %u for port %u (%d)\n",
					  queue, port, ret);
			}
		}

		txq_conf = dev_info.default_txconf;
		txq_conf.offloads = local_port_conf.txmode.offloads;
		/* Init TX queues */
		if (app.nic_tx_port_mask[port] == 1) {
			app_get_lcore_for_nic_tx(port, &lcore);
			socket = rte_lcore_to_socket_id(lcore);
			printf("Initializing NIC port %u TX queue 0 ...\n",
				port);
			ret = rte_eth_tx_queue_setup(
				port,
				0,
				(uint16_t) app.nic_tx_ring_size,
				socket,
				&txq_conf);
			if (ret < 0) {
				rte_panic("Cannot init TX queue 0 for port %d (%d)\n",
					port,
					ret);
			}
		}

		/* Start port */
		ret = rte_eth_dev_start(port);
		if (ret < 0) {
			rte_panic("Cannot start port %d (%d)\n", port, ret);
		}
	}

	check_all_ports_link_status(APP_MAX_NIC_PORTS, (~0x0));
}
Esempio n. 25
0
int32_t populateNodeInfo (void)
{
    int32_t i = 0, socketId = -1, lcoreIndex = 0, enable = 0;
    uint8_t coreCount, portCount;
    struct rte_eth_dev_info devInfo;

    /* fetch total lcore count under DPDK */
    coreCount = rte_lcore_count();
    for (i = 0; i < coreCount; i++)
    {
       socketId = rte_lcore_to_socket_id(i);
       lcoreIndex = rte_lcore_index(i);
       enable = rte_lcore_is_enabled(i);

       //printf ("\n Logical %d Physical %d Socket %d Enabled %d \n",
       //        i, lcoreIndex, socketId, enable);

       if (likely(enable)) {
           /* classify the lcore info per NUMA node */
           numaNodeInfo[socketId].lcoreAvail = numaNodeInfo[socketId].lcoreAvail | (1 << lcoreIndex);
           numaNodeInfo[socketId].lcoreTotal += 1;
       }
       else {
            rte_panic("ERROR: Lcore %d Socket %d not enabled\n", lcoreIndex, socketId);
            exit(EXIT_FAILURE);
       }
    }

    /* Create mempool per numa node based on interface available */
    portCount = rte_eth_dev_count();
    for (i =0; i < portCount; i++)
    {
        rte_eth_dev_info_get(i, &devInfo);
        printf("\n Inteface %d", i);
        printf("\n - driver: %s", devInfo.driver_name);
        printf("\n - if_index: %d", devInfo.if_index);
        if (devInfo.pci_dev) {
            printf("\n - PCI INFO ");
            printf("\n -- ADDR - domain:bus:devid:function %x:%x:%x:%x",
                  devInfo.pci_dev->addr.domain,
                  devInfo.pci_dev->addr.bus,
                  devInfo.pci_dev->addr.devid,
                  devInfo.pci_dev->addr.function);
            printf("\n == PCI ID - vendor:device:sub-vendor:sub-device %x:%x:%x:%x",
                  devInfo.pci_dev->id.vendor_id,
                  devInfo.pci_dev->id.device_id,
                  devInfo.pci_dev->id.subsystem_vendor_id,
                  devInfo.pci_dev->id.subsystem_device_id);
            printf("\n -- numa node: %d", devInfo.pci_dev->numa_node);
        }

        socketId = (devInfo.pci_dev->numa_node == -1)?0:devInfo.pci_dev->numa_node;
        numaNodeInfo[socketId].intfAvail = numaNodeInfo[socketId].intfAvail | (1 << i);
        numaNodeInfo[socketId].intfTotal += 1;
    }

    /* allocate mempool for numa which has NIC interfaces */
    for (i = 0; i < MAX_NUMANODE; i++)
    {
        if (likely(numaNodeInfo[i].intfAvail)) {
            /* ToDo: per interface */
            uint8_t portIndex = 0;
            char mempoolName[25];

            /* create mempool for TX */
            sprintf(mempoolName, "mbuf_pool-%d-%d-tx", i, portIndex);
            numaNodeInfo[i].tx[portIndex] = rte_mempool_create(
                        mempoolName, NB_MBUF,
                        MBUF_SIZE, 64,
                        sizeof(struct rte_pktmbuf_pool_private),
                        rte_pktmbuf_pool_init, NULL,
                        rte_pktmbuf_init, NULL,
                        i,/*SOCKET_ID_ANY*/
                         0/*MEMPOOL_F_SP_PUT*/);
            if (unlikely(numaNodeInfo[i].tx[portIndex] == NULL)) {
                rte_panic("\n ERROR: failed to get mem-pool for tx on node %d intf %d\n", i, portIndex);
                exit(EXIT_FAILURE);
            }

            /* create mempool for RX */
            sprintf(mempoolName, "mbuf_pool-%d-%d-rx", i, portIndex);
            numaNodeInfo[i].rx[portIndex] = rte_mempool_create(
                        mempoolName, NB_MBUF,
                        MBUF_SIZE, 64,
                        sizeof(struct rte_pktmbuf_pool_private),
                        rte_pktmbuf_pool_init, NULL,
                        rte_pktmbuf_init, NULL,
                        i,/*SOCKET_ID_ANY*/
                         0/*MEMPOOL_F_SP_PUT*/);
            if (unlikely(numaNodeInfo[i].rx[portIndex] == NULL)) {
                rte_panic("\n ERROR: failed to get mem-pool for rx on node %d intf %d\n", i, portIndex);
                exit(EXIT_FAILURE);
            }

        }
    }

    return 0;
}
Esempio n. 26
0
static int dpdk_main(int port_id, int argc, char* argv[])
{
    struct rte_eth_dev_info dev_info;
    unsigned nb_queues;
    FILE* lfile;
    uint8_t core_id;
    int ret;

    printf("In dpdk_main\n");

    // Open the log file
    lfile = fopen("./vrouter.log", "w");

    // Program the rte log
    rte_openlog_stream(lfile);

    ret = rte_eal_init(argc, argv);
    if (ret < 0) {
		log_crit( "Invalid EAL parameters\n");
        return -1;
    }

    log_info( "Programming cmd rings now!\n");
    rx_event_fd = (int *) malloc(sizeof(int *) * rte_lcore_count());
    if (!rx_event_fd) {
        log_crit("Failed to allocate memory for rx event fd arrays\n");
        return -ENOMEM;
    }

    rte_eth_macaddr_get(port_id, &port_eth_addr);
    log_info("Port%d: MAC Address: ", port_id);
    print_ethaddr(&port_eth_addr);


    /* Determine the number of RX/TX pairs supported by NIC */
    rte_eth_dev_info_get(port_id, &dev_info);

    dev_info.pci_dev->intr_handle.type = RTE_INTR_HANDLE_VFIO_MSIX;
    dev_info.pci_dev->intr_handle.max_intr =
                    dev_info.max_rx_queues + dev_info.max_tx_queues;
    ret = rte_intr_efd_enable(&dev_info.pci_dev->intr_handle,
            dev_info.max_rx_queues);
    if (ret < 0) {
        rte_exit(EXIT_FAILURE, "Failed to enable rx interrupts\n");
    }

    ret = rte_intr_enable(&dev_info.pci_dev->intr_handle);
    if (ret < 0) {
        rte_exit(EXIT_FAILURE, "Failed to enable interrupts\n");
    }

    ret = rte_eth_dev_configure(port_id, dev_info.max_rx_queues,
                dev_info.max_tx_queues, &port_conf);
    if (ret < 0) {
        rte_exit(EXIT_FAILURE, "Failed to configure ethernet device\n");
    }

    /* For each RX/TX pair */
    nb_queues = dev_info.max_tx_queues;
    for (core_id = 0; core_id < nb_queues; core_id++) {
        char s[64];
        if (rte_lcore_is_enabled(core_id) == 0)
            continue;

        /* NUMA socket number */
        unsigned socketid = rte_lcore_to_socket_id(core_id);
        if (socketid >= NB_SOCKETS) {
            log_crit( "Socket %d of lcore %u is out of range %d\n",
				socketid, core_id, NB_SOCKETS);
            return -EBADF;
        }

        /* Create memory pool */
        if (pktmbuf_pool[socketid] == NULL) {
            log_info("Creating mempool on %d of ~%lx bytes\n",
                            socketid, NB_MBUF * MBUF_SIZE);
            printf("Creating mempool on %d of ~%lx bytes\n",
                        socketid, NB_MBUF * MBUF_SIZE);
            snprintf(s, sizeof(s), "mbuf_pool_%d", socketid);
            pktmbuf_pool[socketid] = rte_mempool_create(s,
                                                        NB_MBUF,
                                                        MBUF_SIZE,
                                                        MEMPOOL_CACHE_SIZE,
                                                        PKTMBUF_PRIV_SZ,
                                                        rte_pktmbuf_pool_init,
                                                        NULL,
                                                        rte_pktmbuf_init,
                                                        NULL,
                                                        socketid,
                                                        0);
            if (!pktmbuf_pool[socketid]) {
                log_crit( "Cannot init mbuf pool on socket %d\n", socketid);
                return -ENOMEM;
            }
        }

        /* Setup the TX queue */
        ret = rte_eth_tx_queue_setup(port_id,
                                     core_id,
                                     RTE_TX_DESC_DEFAULT,
                                     socketid,
                                     &tx_conf);
        if (ret < 0) {
            log_crit( "Cannot initialize TX queue (%d)\n", core_id);
            return -ENODEV;
        }

        /* Setup the RX queue */
        ret = rte_eth_rx_queue_setup(port_id,
                                     core_id,
                                     RTE_RX_DESC_DEFAULT,
                                     socketid,
                                     &rx_conf,
                                     pktmbuf_pool[socketid]);
        if (ret < 0) {
            log_crit( "Cannot initialize RX queue (%d)\n", core_id);
            return -ENODEV;
        }

        /* Create the event fds for event notification */
        lcore_cmd_event_fd[core_id] = eventfd(0, 0);
    }

    // Start the eth device
    ret = rte_eth_dev_start(port_id);
    if (ret < 0) {
        log_crit( "rte_eth_dev_start: err=%d, port=%d\n", ret, core_id);
        return -ENODEV;
    }

    // Put the device in promiscuous mode
    rte_eth_promiscuous_enable(port_id);

    // Wait for link up
    //check_all_ports_link_status(1, 1u << port_id);

    log_info( "Starting engines on every core\n");

    rte_eal_mp_remote_launch(engine_loop, &dev_info, CALL_MASTER);

    return 0;
}
static inline int
port_init_common(uint8_t port, const struct rte_eth_conf *port_conf,
		struct rte_mempool *mp)
{
	const uint16_t rx_ring_size = 512, tx_ring_size = 512;
	int retval;
	uint16_t q;
	struct rte_eth_dev_info dev_info;

	if (!rte_eth_dev_is_valid_port(port))
		return -1;

	retval = rte_eth_dev_configure(port, 0, 0, port_conf);

	rte_eth_dev_info_get(port, &dev_info);

	default_params.rx_rings = RTE_MIN(dev_info.max_rx_queues,
					MAX_NUM_RX_QUEUE);
	default_params.tx_rings = 1;

	/* Configure the Ethernet device. */
	retval = rte_eth_dev_configure(port, default_params.rx_rings,
				default_params.tx_rings, port_conf);
	if (retval != 0)
		return retval;

	for (q = 0; q < default_params.rx_rings; q++) {
		retval = rte_eth_rx_queue_setup(port, q, rx_ring_size,
				rte_eth_dev_socket_id(port), NULL, mp);
		if (retval < 0)
			return retval;
	}

	/* Allocate and set up 1 TX queue per Ethernet port. */
	for (q = 0; q < default_params.tx_rings; q++) {
		retval = rte_eth_tx_queue_setup(port, q, tx_ring_size,
				rte_eth_dev_socket_id(port), NULL);
		if (retval < 0)
			return retval;
	}

	/* Start the Ethernet port. */
	retval = rte_eth_dev_start(port);
	if (retval < 0)
		return retval;

	/* Display the port MAC address. */
	struct ether_addr addr;
	rte_eth_macaddr_get(port, &addr);
	printf("Port %u MAC: %02" PRIx8 " %02" PRIx8 " %02" PRIx8
			   " %02" PRIx8 " %02" PRIx8 " %02" PRIx8 "\n",
			(unsigned int)port,
			addr.addr_bytes[0], addr.addr_bytes[1],
			addr.addr_bytes[2], addr.addr_bytes[3],
			addr.addr_bytes[4], addr.addr_bytes[5]);

	/* Enable RX in promiscuous mode for the Ethernet device. */
	rte_eth_promiscuous_enable(port);

	return 0;
}
Esempio n. 28
0
struct rte_kni *
rte_kni_create(uint8_t port_id,
		unsigned mbuf_size,
		struct rte_mempool *pktmbuf_pool,
		struct rte_kni_ops *ops)
{
	struct rte_kni_device_info dev_info;
	struct rte_eth_dev_info eth_dev_info;
	struct rte_kni *ctx;
	char itf_name[IFNAMSIZ];
#define OBJNAMSIZ 32
	char obj_name[OBJNAMSIZ];
	const struct rte_memzone *mz;

	if (port_id >= RTE_MAX_ETHPORTS || pktmbuf_pool == NULL || !ops)
		return NULL;

	/* Check FD and open once */
	if (kni_fd < 0) {
		kni_fd = open("/dev/" KNI_DEVICE, O_RDWR);
		if (kni_fd < 0) {
			RTE_LOG(ERR, KNI, "Can not open /dev/%s\n",
							KNI_DEVICE);
			return NULL;
		}
	}

	rte_eth_dev_info_get(port_id, &eth_dev_info);
	RTE_LOG(INFO, KNI, "pci: %02x:%02x:%02x \t %02x:%02x\n",
					eth_dev_info.pci_dev->addr.bus,
					eth_dev_info.pci_dev->addr.devid,
					eth_dev_info.pci_dev->addr.function,
					eth_dev_info.pci_dev->id.vendor_id,
					eth_dev_info.pci_dev->id.device_id);
	dev_info.bus = eth_dev_info.pci_dev->addr.bus;
	dev_info.devid = eth_dev_info.pci_dev->addr.devid;
	dev_info.function = eth_dev_info.pci_dev->addr.function;
	dev_info.vendor_id = eth_dev_info.pci_dev->id.vendor_id;
	dev_info.device_id = eth_dev_info.pci_dev->id.device_id;

	ctx = rte_zmalloc("kni devs", sizeof(struct rte_kni), 0);
	if (ctx == NULL)
		rte_panic("Cannot allocate memory for kni dev\n");
	memcpy(&ctx->ops, ops, sizeof(struct rte_kni_ops));

	rte_snprintf(itf_name, IFNAMSIZ, "vEth%u", port_id);
	rte_snprintf(ctx->name, IFNAMSIZ, itf_name);
	rte_snprintf(dev_info.name, IFNAMSIZ, itf_name);

	/* TX RING */
	rte_snprintf(obj_name, OBJNAMSIZ, "kni_tx_%d", port_id);
	mz = rte_memzone_reserve(obj_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
	if (mz == NULL || mz->addr == NULL)
		rte_panic("Cannot create kni_tx_%d queue\n", port_id);
	ctx->tx_q = mz->addr;
	kni_fifo_init(ctx->tx_q, KNI_FIFO_COUNT_MAX);
	dev_info.tx_phys = mz->phys_addr;

	/* RX RING */
	rte_snprintf(obj_name, OBJNAMSIZ, "kni_rx_%d", port_id);
	mz = rte_memzone_reserve(obj_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
	if (mz == NULL || mz->addr == NULL)
		rte_panic("Cannot create kni_rx_%d queue\n", port_id);
	ctx->rx_q = mz->addr;
	kni_fifo_init(ctx->rx_q, KNI_FIFO_COUNT_MAX);
	dev_info.rx_phys = mz->phys_addr;

	/* ALLOC RING */
	rte_snprintf(obj_name, OBJNAMSIZ, "kni_alloc_%d", port_id);
	mz = rte_memzone_reserve(obj_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
	if (mz == NULL || mz->addr == NULL)
		rte_panic("Cannot create kni_alloc_%d queue\n", port_id);
	ctx->alloc_q = mz->addr;
	kni_fifo_init(ctx->alloc_q, KNI_FIFO_COUNT_MAX);
	dev_info.alloc_phys = mz->phys_addr;

	/* FREE RING */
	rte_snprintf(obj_name, OBJNAMSIZ, "kni_free_%d", port_id);
	mz = rte_memzone_reserve(obj_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
	if (mz == NULL || mz->addr == NULL)
		rte_panic("Cannot create kni_free_%d queue\n", port_id);
	ctx->free_q = mz->addr;
	kni_fifo_init(ctx->free_q, KNI_FIFO_COUNT_MAX);
	dev_info.free_phys = mz->phys_addr;

	/* Request RING */
	rte_snprintf(obj_name, OBJNAMSIZ, "kni_req_%d", port_id);
	mz = rte_memzone_reserve(obj_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
	if (mz == NULL || mz->addr == NULL)
		rte_panic("Cannot create kni_req_%d ring\n", port_id);
	ctx->req_q = mz->addr;
	kni_fifo_init(ctx->req_q, KNI_FIFO_COUNT_MAX);
	dev_info.req_phys = mz->phys_addr;

	/* Response RING */
	rte_snprintf(obj_name, OBJNAMSIZ, "kni_resp_%d", port_id);
	mz = rte_memzone_reserve(obj_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
	if (mz == NULL || mz->addr == NULL)
		rte_panic("Cannot create kni_resp_%d ring\n", port_id);
	ctx->resp_q = mz->addr;
	kni_fifo_init(ctx->resp_q, KNI_FIFO_COUNT_MAX);
	dev_info.resp_phys = mz->phys_addr;

	/* Req/Resp sync mem area */
	rte_snprintf(obj_name, OBJNAMSIZ, "kni_sync_%d", port_id);
	mz = rte_memzone_reserve(obj_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
	if (mz == NULL || mz->addr == NULL)
		rte_panic("Cannot create kni_sync_%d mem\n", port_id);
	ctx->sync_addr = mz->addr;
	dev_info.sync_va = mz->addr;
	dev_info.sync_phys = mz->phys_addr;

	/* MBUF mempool */
	mz = rte_memzone_lookup("MP_mbuf_pool");
	if (mz == NULL) {
		RTE_LOG(ERR, KNI, "Can not find MP_mbuf_pool\n");
		goto fail;
	}
	dev_info.mbuf_va = mz->addr;
	dev_info.mbuf_phys = mz->phys_addr;
	ctx->pktmbuf_pool = pktmbuf_pool;
	ctx->port_id = port_id;
	ctx->mbuf_size = mbuf_size;

	/* Configure the buffer size which will be checked in kernel module */
	dev_info.mbuf_size = ctx->mbuf_size;

	if (ioctl(kni_fd, RTE_KNI_IOCTL_CREATE, &dev_info) < 0) {
		RTE_LOG(ERR, KNI, "Fail to create kni device\n");
		goto fail;
	}

	return ctx;

fail:
	if (ctx != NULL)
		rte_free(ctx);

	return NULL;
}
/* Init KNI RX queue */
struct vr_dpdk_queue *
vr_dpdk_kni_rx_queue_init(unsigned lcore_id, struct vr_interface *vif,
    unsigned host_lcore_id)
{
    struct vr_dpdk_lcore *lcore = vr_dpdk.lcores[lcore_id];
    const unsigned socket_id = rte_lcore_to_socket_id(lcore_id);
    uint8_t port_id = 0;
    unsigned vif_idx = vif->vif_idx;
    struct vr_dpdk_queue *rx_queue = &lcore->lcore_rx_queues[vif_idx];
    struct vr_dpdk_queue_params *rx_queue_params
                    = &lcore->lcore_rx_queue_params[vif_idx];

    if (vif->vif_type == VIF_TYPE_HOST) {
        port_id = (((struct vr_dpdk_ethdev *)(vif->vif_bridge->vif_os))->
                ethdev_port_id);
    }

    /* init queue */
    rx_queue->rxq_ops = dpdk_knidev_reader_ops;
    rx_queue->q_queue_h = NULL;
    rx_queue->q_vif = vrouter_get_interface(vif->vif_rid, vif_idx);

    /* create the queue */
    struct dpdk_knidev_reader_params reader_params = {
        .kni = vif->vif_os,
    };
    rx_queue->q_queue_h = rx_queue->rxq_ops.f_create(&reader_params, socket_id);
    if (rx_queue->q_queue_h == NULL) {
        RTE_LOG(ERR, VROUTER, "    error creating KNI device %s RX queue"
            " at eth device %" PRIu8 "\n", vif->vif_name, port_id);
        return NULL;
    }

    /* store queue params */
    rx_queue_params->qp_release_op = &dpdk_kni_rx_queue_release;

    return rx_queue;
}

/* Release KNI TX queue */
static void
dpdk_kni_tx_queue_release(unsigned lcore_id, struct vr_interface *vif)
{
    struct vr_dpdk_lcore *lcore = vr_dpdk.lcores[lcore_id];
    struct vr_dpdk_queue *tx_queue = &lcore->lcore_tx_queues[vif->vif_idx];
    struct vr_dpdk_queue_params *tx_queue_params
                        = &lcore->lcore_tx_queue_params[vif->vif_idx];

    tx_queue->txq_ops.f_tx = NULL;
    rte_wmb();

    /* flush and free the queue */
    if (tx_queue->txq_ops.f_free(tx_queue->q_queue_h)) {
        RTE_LOG(ERR, VROUTER, "    error freeing lcore %u KNI device TX queue\n",
                    lcore_id);
    }

    /* reset the queue */
    vrouter_put_interface(tx_queue->q_vif);
    memset(tx_queue, 0, sizeof(*tx_queue));
    memset(tx_queue_params, 0, sizeof(*tx_queue_params));
}

/* Init KNI TX queue */
struct vr_dpdk_queue *
vr_dpdk_kni_tx_queue_init(unsigned lcore_id, struct vr_interface *vif,
    unsigned host_lcore_id)
{
    struct vr_dpdk_lcore *lcore = vr_dpdk.lcores[lcore_id];
    const unsigned socket_id = rte_lcore_to_socket_id(lcore_id);
    uint8_t port_id = 0;
    unsigned vif_idx = vif->vif_idx;
    struct vr_dpdk_queue *tx_queue = &lcore->lcore_tx_queues[vif_idx];
    struct vr_dpdk_queue_params *tx_queue_params
                    = &lcore->lcore_tx_queue_params[vif_idx];
    struct vr_dpdk_ethdev *ethdev;

    if (vif->vif_type == VIF_TYPE_HOST) {
        ethdev = vif->vif_bridge->vif_os;
        if (ethdev == NULL) {
            RTE_LOG(ERR, VROUTER, "    error creating KNI device %s TX queue:"
                " bridge vif %u ethdev is not initialized\n",
                vif->vif_name, vif->vif_bridge->vif_idx);
            return NULL;
        }
        port_id = ethdev->ethdev_port_id;
    }

    /* init queue */
    tx_queue->txq_ops = dpdk_knidev_writer_ops;
    tx_queue->q_queue_h = NULL;
    tx_queue->q_vif = vrouter_get_interface(vif->vif_rid, vif_idx);

    /* create the queue */
    struct dpdk_knidev_writer_params writer_params = {
        .kni = vif->vif_os,
        .tx_burst_sz = VR_DPDK_TX_BURST_SZ,
    };
    tx_queue->q_queue_h = tx_queue->txq_ops.f_create(&writer_params, socket_id);
    if (tx_queue->q_queue_h == NULL) {
        RTE_LOG(ERR, VROUTER, "    error creating KNI device %s TX queue"
            " at eth device %" PRIu8 "\n", vif->vif_name, port_id);
        return NULL;
    }

    /* store queue params */
    tx_queue_params->qp_release_op = &dpdk_kni_tx_queue_release;

    return tx_queue;
}

/* Change KNI MTU size callback */
static int
dpdk_knidev_change_mtu(uint8_t port_id, unsigned new_mtu)
{
    struct vrouter *router = vrouter_get(0);
    struct vr_interface *vif;
    int i, ret;
    uint8_t ethdev_port_id, slave_port_id;
    struct vr_dpdk_ethdev *ethdev = NULL;

    RTE_LOG(INFO, VROUTER, "Changing eth device %" PRIu8 " MTU to %u\n",
                    port_id, new_mtu);
    if (port_id >= rte_eth_dev_count()) {
        RTE_LOG(ERR, VROUTER, "Error changing eth device %"PRIu8" MTU: invalid eth device\n", port_id);
        return -EINVAL;
    }

    /*
     * TODO: DPDK bond PMD does not implement mtu_set op, so we need to
     * set the MTU manually for all the slaves.
     */
    /* Bond vif uses first slave port ID. */
    if (router->vr_eth_if) {
        ethdev = (struct vr_dpdk_ethdev *)router->vr_eth_if->vif_os;
        if (ethdev && ethdev->ethdev_nb_slaves > 0) {
            for (i = 0; i < ethdev->ethdev_nb_slaves; i++) {
                if (port_id == ethdev->ethdev_slaves[i])
                    break;
            }
            /* Clear ethdev if no port match. */
            if (i >= ethdev->ethdev_nb_slaves)
                ethdev = NULL;
        }
    }
    if (ethdev && ethdev->ethdev_nb_slaves > 0) {
        for (i = 0; i < ethdev->ethdev_nb_slaves; i++) {
            slave_port_id = ethdev->ethdev_slaves[i];
            RTE_LOG(INFO, VROUTER, "    changing bond member eth device %" PRIu8
                " MTU to %u\n", slave_port_id, new_mtu);

            ret =  rte_eth_dev_set_mtu(slave_port_id, new_mtu);
            if (ret < 0) {
                RTE_LOG(ERR, VROUTER, "    error changing bond member eth device %" PRIu8
                    " MTU: %s (%d)\n", slave_port_id, rte_strerror(-ret), -ret);
                return ret;
            }
        }
    } else {
        ret =  rte_eth_dev_set_mtu(port_id, new_mtu);
        if (ret < 0) {
            RTE_LOG(ERR, VROUTER, "Error changing eth device %" PRIu8
                " MTU: %s (%d)\n", port_id, rte_strerror(-ret), -ret);
        }
        return ret;
    }

    /* On success, inform vrouter about new MTU */
    for (i = 0; i < router->vr_max_interfaces; i++) {
        vif = __vrouter_get_interface(router, i);
        if (vif && (vif->vif_type == VIF_TYPE_PHYSICAL)) {
            ethdev_port_id = (((struct vr_dpdk_ethdev *)(vif->vif_os))->
                        ethdev_port_id);
            if (ethdev_port_id == port_id) {
                /* Ethernet header size */
                new_mtu += sizeof(struct vr_eth);
                if (vr_dpdk.vlan_tag != VLAN_ID_INVALID) {
                    /* 802.1q header size */
                    new_mtu += sizeof(uint32_t);
                }
                vif->vif_mtu = new_mtu;
                if (vif->vif_bridge)
                    vif->vif_bridge->vif_mtu = new_mtu;
            }
        }
    }

    return 0;
}


/* Configure KNI state callback */
static int
dpdk_knidev_config_network_if(uint8_t port_id, uint8_t if_up)
{
    int ret = 0;

    RTE_LOG(INFO, VROUTER, "Configuring eth device %" PRIu8 " %s\n",
                    port_id, if_up ? "UP" : "DOWN");
    if (port_id >= rte_eth_dev_count() || port_id >= RTE_MAX_ETHPORTS) {
        RTE_LOG(ERR, VROUTER, "Invalid eth device %" PRIu8 "\n", port_id);
        return -EINVAL;
    }

    if (if_up)
        ret = rte_eth_dev_start(port_id);
    else
        rte_eth_dev_stop(port_id);

    if (ret < 0) {
        RTE_LOG(ERR, VROUTER, "Configuring eth device %" PRIu8 " UP"
                    "failed (%d)\n", port_id, ret);
    }

    return ret;
}

/* Init KNI */
int
vr_dpdk_knidev_init(uint8_t port_id, struct vr_interface *vif)
{
    int i;
    struct rte_eth_dev_info dev_info;
    struct rte_kni_conf kni_conf;
    struct rte_kni_ops kni_ops;
    struct rte_kni *kni;
    struct rte_config *rte_conf = rte_eal_get_configuration();

    if (!vr_dpdk.kni_inited) {
        /*
         * If the host does not support KNIs (i.e. RedHat), we'll get
         * a panic here.
         */
        rte_kni_init(VR_DPDK_MAX_KNI_INTERFACES);
        vr_dpdk.kni_inited = true;
    }

    /* get eth device info */
    memset(&dev_info, 0, sizeof(dev_info));
    rte_eth_dev_info_get(port_id, &dev_info);

    /* create KNI configuration */
    memset(&kni_conf, 0, sizeof(kni_conf));
    strncpy(kni_conf.name, (char *)vif->vif_name, sizeof(kni_conf.name) - 1);

    kni_conf.addr = dev_info.pci_dev->addr;
    kni_conf.id = dev_info.pci_dev->id;
    kni_conf.group_id = port_id;
    kni_conf.mbuf_size = VR_DPDK_MAX_PACKET_SZ;
    /*
     * Due to DPDK commit 41a6ebd, now to prevent packet reordering in KNI
     * we have to bind KNI kernel thread to a first online unused CPU.
     */
    for (i = 0; i < RTE_MAX_LCORE; i++) {
        if (lcore_config[i].detected
                && rte_conf->lcore_role[VR_DPDK_FWD_LCORE_ID + i] == ROLE_OFF) {
            kni_conf.force_bind = 1;
            kni_conf.core_id = i;
            RTE_LOG(INFO, VROUTER, "    bind KNI kernel thread to CPU %d\n", i);
            break;
        }
    }

    /* KNI options
     *
     * Changing state of the KNI interface can change state of the physical
     * interface. This is useful for the vhost, but not for the VLAN
     * forwarding interface.
     */
    if (vif->vif_type == VIF_TYPE_VLAN) {
        memset(&kni_ops, 0, sizeof(kni_ops));
    } else {
        kni_ops.port_id = port_id;
        kni_ops.change_mtu = dpdk_knidev_change_mtu;
        kni_ops.config_network_if = dpdk_knidev_config_network_if;
    }

    /* allocate KNI device */
    kni = rte_kni_alloc(vr_dpdk.rss_mempool, &kni_conf, &kni_ops);
    if (kni == NULL) {
        RTE_LOG(ERR, VROUTER, "    error allocation KNI device %s"
            " at eth device %" PRIu8 "\n", vif->vif_name, port_id);
        return -ENOMEM;
    }

    /* store pointer to KNI for further use */
    vif->vif_os = kni;

    /* add interface to the table of KNIs */
    for (i = 0; i < VR_DPDK_MAX_KNI_INTERFACES; i++) {
        if (vr_dpdk.knis[i] == NULL) {
            vr_dpdk.knis[i] = vif->vif_os;
            break;
        }
    }

    return 0;
}
Esempio n. 30
0
lagopus_result_t
dpdk_configure_interface(struct interface *ifp) {
  unsigned socket;
  uint32_t lcore;
  uint8_t queue;
  int ret;
  uint32_t n_rx_queues, n_tx_queues;
  uint8_t portid;
  struct rte_mempool *pool;

  portid = ifp->info.eth.port_number;

  n_rx_queues = app_get_nic_rx_queues_per_port(portid);
  n_tx_queues = app.nic_tx_port_mask[portid];

  if ((n_rx_queues == 0) && (n_tx_queues == 0)) {
    return LAGOPUS_RESULT_INVALID_ARGS;
  }

  if (ifp->info.eth_dpdk_phy.mtu < 64 ||
      ifp->info.eth_dpdk_phy.mtu > MAX_PACKET_SZ) {
    return LAGOPUS_RESULT_OUT_OF_RANGE;
  }

  rte_eth_dev_info_get(portid, &ifp->devinfo);

  /* Init port */
  printf("Initializing NIC port %u ...\n", (unsigned) portid);

  ret = rte_eth_dev_configure(portid,
                              (uint8_t) n_rx_queues,
                              (uint8_t) n_tx_queues,
                              &port_conf);
  if (ret < 0) {
    rte_panic("Cannot init NIC port %u (%s)\n",
              (unsigned) portid, strerror(-ret));
  }
  ret = rte_eth_dev_set_mtu(portid, ifp->info.eth_dpdk_phy.mtu);
  if (ret < 0) {
    rte_panic("Cannot set MTU(%d) for port %d (%d)\n",
              ifp->info.eth_dpdk_phy.mtu,
              portid,
              ret);
  }
  rte_eth_promiscuous_enable(portid);

  /* Init RX queues */
  for (queue = 0; queue < APP_MAX_RX_QUEUES_PER_NIC_PORT; queue ++) {
    struct app_lcore_params_io *lp;
    uint8_t i;

    if (app.nic_rx_queue_mask[portid][queue] == NIC_RX_QUEUE_UNCONFIGURED) {
      continue;
    }
    app_get_lcore_for_nic_rx(portid, queue, &lcore);
    lp = &app.lcore_params[lcore].io;
    socket = rte_lcore_to_socket_id(lcore);
    pool = app.lcore_params[lcore].pool;

    printf("Initializing NIC port %u RX queue %u ...\n",
           (unsigned) portid,
           (unsigned) queue);
    ret = rte_eth_rx_queue_setup(portid,
                                 queue,
                                 (uint16_t) app.nic_rx_ring_size,
                                 socket,
#if defined(RTE_VERSION_NUM) && RTE_VERSION >= RTE_VERSION_NUM(1, 8, 0, 0)
                                 &ifp->devinfo.default_rxconf,
#else
                                 &rx_conf,
#endif /* RTE_VERSION_NUM */
                                 pool);
    if (ret < 0) {
      rte_panic("Cannot init RX queue %u for port %u (%d)\n",
                (unsigned) queue,
                (unsigned) portid,
                ret);
    }
    for (i = 0; i < lp->rx.n_nic_queues; i++) {
      if (lp->rx.nic_queues[i].port != portid ||
          lp->rx.nic_queues[i].queue != queue) {
        continue;
      }
      lp->rx.nic_queues[i].enabled = true;
      break;
    }
  }

  /* Init TX queues */
  if (app.nic_tx_port_mask[portid] == 1) {
    app_get_lcore_for_nic_tx(portid, &lcore);
    socket = rte_lcore_to_socket_id(lcore);
    printf("Initializing NIC port %u TX queue 0 ...\n",
           (unsigned) portid);
    ret = rte_eth_tx_queue_setup(portid,
                                 0,
                                 (uint16_t) app.nic_tx_ring_size,
                                 socket,
#if defined(RTE_VERSION_NUM) && RTE_VERSION >= RTE_VERSION_NUM(1, 8, 0, 0)
                                 &ifp->devinfo.default_txconf
#else
                                 &tx_conf
#endif /* RTE_VERSION_NUM */
                                );
    if (ret < 0) {
      rte_panic("Cannot init TX queue 0 for port %d (%d)\n",
                portid,
                ret);
    }
  }

  ifp->stats = port_stats;
  dpdk_interface_set_index(ifp);

  return LAGOPUS_RESULT_OK;
}