/* Check the link status of all ports in up to 9s, and print them finally */ static void check_all_ports_link_status(uint16_t port_num, uint32_t port_mask) { #define CHECK_INTERVAL 100 /* 100ms */ #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */ uint16_t portid; uint8_t count, all_ports_up, print_flag = 0; struct rte_eth_link link; uint32_t n_rx_queues, n_tx_queues; printf("\nChecking link status"); fflush(stdout); for (count = 0; count <= MAX_CHECK_TIME; count++) { all_ports_up = 1; for (portid = 0; portid < port_num; portid++) { if ((port_mask & (1 << portid)) == 0) continue; n_rx_queues = app_get_nic_rx_queues_per_port(portid); n_tx_queues = app.nic_tx_port_mask[portid]; if ((n_rx_queues == 0) && (n_tx_queues == 0)) continue; memset(&link, 0, sizeof(link)); rte_eth_link_get_nowait(portid, &link); /* print link status if flag set */ if (print_flag == 1) { if (link.link_status) printf( "Port%d Link Up - speed %uMbps - %s\n", portid, link.link_speed, (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? ("full-duplex") : ("half-duplex\n")); else printf("Port %d Link Down\n", portid); continue; } /* clear all_ports_up flag if any link down */ if (link.link_status == ETH_LINK_DOWN) { all_ports_up = 0; break; } } /* after finally printing all link status, get out */ if (print_flag == 1) break; if (all_ports_up == 0) { printf("."); fflush(stdout); rte_delay_ms(CHECK_INTERVAL); } /* set the print_flag if all ports up or timeout */ if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) { print_flag = 1; printf("done\n"); } } }
static int app_check_every_rx_port_is_tx_enabled(void) { uint8_t port; for (port = 0; port < APP_MAX_NIC_PORTS; port ++) { if ((app_get_nic_rx_queues_per_port(port) > 0) && (app.nic_tx_port_mask[port] == 0)) { return -1; } } return 0; }
static void app_init_nics(void) { unsigned socket; uint32_t lcore; uint16_t port; uint8_t queue; int ret; uint32_t n_rx_queues, n_tx_queues; /* Init NIC ports and queues, then start the ports */ for (port = 0; port < APP_MAX_NIC_PORTS; port ++) { struct rte_mempool *pool; uint16_t nic_rx_ring_size; uint16_t nic_tx_ring_size; struct rte_eth_rxconf rxq_conf; struct rte_eth_txconf txq_conf; struct rte_eth_dev_info dev_info; struct rte_eth_conf local_port_conf = port_conf; n_rx_queues = app_get_nic_rx_queues_per_port(port); n_tx_queues = app.nic_tx_port_mask[port]; if ((n_rx_queues == 0) && (n_tx_queues == 0)) { continue; } /* Init port */ printf("Initializing NIC port %u ...\n", port); rte_eth_dev_info_get(port, &dev_info); if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE) local_port_conf.txmode.offloads |= DEV_TX_OFFLOAD_MBUF_FAST_FREE; local_port_conf.rx_adv_conf.rss_conf.rss_hf &= dev_info.flow_type_rss_offloads; if (local_port_conf.rx_adv_conf.rss_conf.rss_hf != port_conf.rx_adv_conf.rss_conf.rss_hf) { printf("Port %u modified RSS hash function based on hardware support," "requested:%#"PRIx64" configured:%#"PRIx64"\n", port, port_conf.rx_adv_conf.rss_conf.rss_hf, local_port_conf.rx_adv_conf.rss_conf.rss_hf); } ret = rte_eth_dev_configure( port, (uint8_t) n_rx_queues, (uint8_t) n_tx_queues, &local_port_conf); if (ret < 0) { rte_panic("Cannot init NIC port %u (%d)\n", port, ret); } rte_eth_promiscuous_enable(port); nic_rx_ring_size = app.nic_rx_ring_size; nic_tx_ring_size = app.nic_tx_ring_size; ret = rte_eth_dev_adjust_nb_rx_tx_desc( port, &nic_rx_ring_size, &nic_tx_ring_size); if (ret < 0) { rte_panic("Cannot adjust number of descriptors for port %u (%d)\n", port, ret); } app.nic_rx_ring_size = nic_rx_ring_size; app.nic_tx_ring_size = nic_tx_ring_size; rxq_conf = dev_info.default_rxconf; rxq_conf.offloads = local_port_conf.rxmode.offloads; /* Init RX queues */ for (queue = 0; queue < APP_MAX_RX_QUEUES_PER_NIC_PORT; queue ++) { if (app.nic_rx_queue_mask[port][queue] == 0) { continue; } app_get_lcore_for_nic_rx(port, queue, &lcore); socket = rte_lcore_to_socket_id(lcore); pool = app.lcore_params[lcore].pool; printf("Initializing NIC port %u RX queue %u ...\n", port, queue); ret = rte_eth_rx_queue_setup( port, queue, (uint16_t) app.nic_rx_ring_size, socket, &rxq_conf, pool); if (ret < 0) { rte_panic("Cannot init RX queue %u for port %u (%d)\n", queue, port, ret); } } txq_conf = dev_info.default_txconf; txq_conf.offloads = local_port_conf.txmode.offloads; /* Init TX queues */ if (app.nic_tx_port_mask[port] == 1) { app_get_lcore_for_nic_tx(port, &lcore); socket = rte_lcore_to_socket_id(lcore); printf("Initializing NIC port %u TX queue 0 ...\n", port); ret = rte_eth_tx_queue_setup( port, 0, (uint16_t) app.nic_tx_ring_size, socket, &txq_conf); if (ret < 0) { rte_panic("Cannot init TX queue 0 for port %d (%d)\n", port, ret); } } /* Start port */ ret = rte_eth_dev_start(port); if (ret < 0) { rte_panic("Cannot start port %d (%d)\n", port, ret); } } check_all_ports_link_status(APP_MAX_NIC_PORTS, (~0x0)); }
lagopus_result_t dpdk_configure_interface(struct interface *ifp) { unsigned socket; uint32_t lcore; uint8_t queue; int ret; uint32_t n_rx_queues, n_tx_queues; uint8_t portid; struct rte_mempool *pool; portid = ifp->info.eth.port_number; n_rx_queues = app_get_nic_rx_queues_per_port(portid); n_tx_queues = app.nic_tx_port_mask[portid]; if ((n_rx_queues == 0) && (n_tx_queues == 0)) { return LAGOPUS_RESULT_INVALID_ARGS; } if (ifp->info.eth_dpdk_phy.mtu < 64 || ifp->info.eth_dpdk_phy.mtu > MAX_PACKET_SZ) { return LAGOPUS_RESULT_OUT_OF_RANGE; } rte_eth_dev_info_get(portid, &ifp->devinfo); /* Init port */ printf("Initializing NIC port %u ...\n", (unsigned) portid); ret = rte_eth_dev_configure(portid, (uint8_t) n_rx_queues, (uint8_t) n_tx_queues, &port_conf); if (ret < 0) { rte_panic("Cannot init NIC port %u (%s)\n", (unsigned) portid, strerror(-ret)); } ret = rte_eth_dev_set_mtu(portid, ifp->info.eth_dpdk_phy.mtu); if (ret < 0) { rte_panic("Cannot set MTU(%d) for port %d (%d)\n", ifp->info.eth_dpdk_phy.mtu, portid, ret); } rte_eth_promiscuous_enable(portid); /* Init RX queues */ for (queue = 0; queue < APP_MAX_RX_QUEUES_PER_NIC_PORT; queue ++) { struct app_lcore_params_io *lp; uint8_t i; if (app.nic_rx_queue_mask[portid][queue] == NIC_RX_QUEUE_UNCONFIGURED) { continue; } app_get_lcore_for_nic_rx(portid, queue, &lcore); lp = &app.lcore_params[lcore].io; socket = rte_lcore_to_socket_id(lcore); pool = app.lcore_params[lcore].pool; printf("Initializing NIC port %u RX queue %u ...\n", (unsigned) portid, (unsigned) queue); ret = rte_eth_rx_queue_setup(portid, queue, (uint16_t) app.nic_rx_ring_size, socket, #if defined(RTE_VERSION_NUM) && RTE_VERSION >= RTE_VERSION_NUM(1, 8, 0, 0) &ifp->devinfo.default_rxconf, #else &rx_conf, #endif /* RTE_VERSION_NUM */ pool); if (ret < 0) { rte_panic("Cannot init RX queue %u for port %u (%d)\n", (unsigned) queue, (unsigned) portid, ret); } for (i = 0; i < lp->rx.n_nic_queues; i++) { if (lp->rx.nic_queues[i].port != portid || lp->rx.nic_queues[i].queue != queue) { continue; } lp->rx.nic_queues[i].enabled = true; break; } } /* Init TX queues */ if (app.nic_tx_port_mask[portid] == 1) { app_get_lcore_for_nic_tx(portid, &lcore); socket = rte_lcore_to_socket_id(lcore); printf("Initializing NIC port %u TX queue 0 ...\n", (unsigned) portid); ret = rte_eth_tx_queue_setup(portid, 0, (uint16_t) app.nic_tx_ring_size, socket, #if defined(RTE_VERSION_NUM) && RTE_VERSION >= RTE_VERSION_NUM(1, 8, 0, 0) &ifp->devinfo.default_txconf #else &tx_conf #endif /* RTE_VERSION_NUM */ ); if (ret < 0) { rte_panic("Cannot init TX queue 0 for port %d (%d)\n", portid, ret); } } ifp->stats = port_stats; dpdk_interface_set_index(ifp); return LAGOPUS_RESULT_OK; }
void app_print_params(void) { unsigned port, queue, lcore, i, j; /* Print NIC RX configuration */ printf("NIC RX ports: "); for (port = 0; port < APP_MAX_NIC_PORTS; port ++) { uint32_t n_rx_queues = app_get_nic_rx_queues_per_port((uint8_t) port); if (n_rx_queues == 0) { continue; } printf("%u (", port); for (queue = 0; queue < APP_MAX_RX_QUEUES_PER_NIC_PORT; queue ++) { if (app.nic_rx_queue_mask[port][queue] == 1) { printf("%u ", queue); } } printf(") "); } printf(";\n"); /* Print I/O lcore RX params */ for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) { struct app_lcore_params_io *lp = &app.lcore_params[lcore].io; if ((app.lcore_params[lcore].type != e_APP_LCORE_IO) || (lp->rx.n_nic_queues == 0)) { continue; } printf("I/O lcore %u (socket %u): ", lcore, rte_lcore_to_socket_id(lcore)); printf("RX ports "); for (i = 0; i < lp->rx.n_nic_queues; i ++) { printf("(%u, %u) ", (unsigned) lp->rx.nic_queues[i].port, (unsigned) lp->rx.nic_queues[i].queue); } printf("; "); printf("Output rings "); for (i = 0; i < lp->rx.n_rings; i ++) { printf("%p ", lp->rx.rings[i]); } printf(";\n"); } /* Print worker lcore RX params */ for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) { struct app_lcore_params_worker *lp = &app.lcore_params[lcore].worker; if (app.lcore_params[lcore].type != e_APP_LCORE_WORKER) { continue; } printf("Worker lcore %u (socket %u) ID %u: ", lcore, rte_lcore_to_socket_id(lcore), (unsigned)lp->worker_id); printf("Input rings "); for (i = 0; i < lp->n_rings_in; i ++) { printf("%p ", lp->rings_in[i]); } printf(";\n"); } printf("\n"); /* Print NIC TX configuration */ printf("NIC TX ports: "); for (port = 0; port < APP_MAX_NIC_PORTS; port ++) { if (app.nic_tx_port_mask[port] == 1) { printf("%u ", port); } } printf(";\n"); /* Print I/O TX lcore params */ for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) { struct app_lcore_params_io *lp = &app.lcore_params[lcore].io; uint32_t n_workers = app_get_lcores_worker(); if ((app.lcore_params[lcore].type != e_APP_LCORE_IO) || (lp->tx.n_nic_ports == 0)) { continue; } printf("I/O lcore %u (socket %u): ", lcore, rte_lcore_to_socket_id(lcore)); printf("Input rings per TX port "); for (i = 0; i < lp->tx.n_nic_ports; i ++) { port = lp->tx.nic_ports[i]; printf("%u (", port); for (j = 0; j < n_workers; j ++) { printf("%p ", lp->tx.rings[port][j]); } printf(") "); } printf(";\n"); } /* Print worker lcore TX params */ for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) { struct app_lcore_params_worker *lp = &app.lcore_params[lcore].worker; if (app.lcore_params[lcore].type != e_APP_LCORE_WORKER) { continue; } printf("Worker lcore %u (socket %u) ID %u: \n", lcore, rte_lcore_to_socket_id(lcore), (unsigned)lp->worker_id); printf("Output rings per TX port "); for (port = 0; port < APP_MAX_NIC_PORTS; port ++) { if (lp->rings_out[port] != NULL) { printf("%u (%p) ", port, lp->rings_out[port]); } } printf(";\n"); } /* Rings */ printf("Ring sizes: NIC RX = %u; Worker in = %u; Worker out = %u; NIC TX = %u;\n", (unsigned) app.nic_rx_ring_size, (unsigned) app.ring_rx_size, (unsigned) app.ring_tx_size, (unsigned) app.nic_tx_ring_size); /* Bursts */ printf("Burst sizes: I/O RX (rd = %u, wr = %u); Worker (rd = %u, wr = %u); I/O TX (rd = %u, wr = %u)\n", (unsigned) app.burst_size_io_rx_read, (unsigned) app.burst_size_io_rx_write, (unsigned) app.burst_size_worker_read, (unsigned) app.burst_size_worker_write, (unsigned) app.burst_size_io_tx_read, (unsigned) app.burst_size_io_tx_write); }
static void app_init_nics(void) { uint32_t socket, lcore; uint8_t port, queue; struct ether_addr mac_addr; int ret; /* Init driver */ printf("Initializing the PMD driver ...\n"); #ifdef RTE_LIBRTE_IGB_PMD if (rte_igb_pmd_init() < 0) { rte_panic("Cannot init IGB PMD\n"); } #endif #ifdef RTE_LIBRTE_IXGBE_PMD if (rte_ixgbe_pmd_init() < 0) { rte_panic("Cannot init IXGBE PMD\n"); } #endif if (rte_eal_pci_probe() < 0) { rte_panic("Cannot probe PCI\n"); } memset(port_stat,0,sizeof(struct port_stat)*MAX_PORT_NUM); /* Init NIC ports and queues, then start the ports */ for (port = 0; port < APP_MAX_NIC_PORTS; port ++) { struct rte_eth_link link; struct rte_mempool *pool; uint32_t n_rx_queues, n_tx_queues; n_rx_queues = app_get_nic_rx_queues_per_port(port); n_tx_queues = app.nic_tx_port_mask[port]; if ((n_rx_queues == 0) && (n_tx_queues == 0)) { continue; } /* Init port */ printf("Initializing NIC port %u ...\n", (uint32_t) port); ret = rte_eth_dev_configure( port, (uint8_t) n_rx_queues, (uint8_t) n_tx_queues, &port_conf); if (ret < 0) { rte_panic("Cannot init NIC port %u (%d)\n", (uint32_t) port, ret); } rte_eth_promiscuous_enable(port); /* Init RX queues */ for (queue = 0; queue < APP_MAX_RX_QUEUES_PER_NIC_PORT; queue ++) { if (app.nic_rx_queue_mask[port][queue] == 0) { continue; } app_get_lcore_for_nic_rx(port, queue, &lcore); socket = rte_lcore_to_socket_id(lcore); pool = app.lcore_params[lcore].pool; printf("Initializing NIC port %u RX queue %u ...\n", (uint32_t) port, (uint32_t) queue); ret = rte_eth_rx_queue_setup( port, queue, (uint16_t) app.nic_rx_ring_size, socket, &rx_conf, pool); if (ret < 0) { rte_panic("Cannot init RX queue %u for port %u (%d)\n", (uint32_t) queue, (uint32_t) port, ret); } } /* Init TX queues */ if (app.nic_tx_port_mask[port] == 1) { app_get_lcore_for_nic_tx(port, &lcore); socket = rte_lcore_to_socket_id(lcore); printf("Initializing NIC port %u TX queue 0 ...\n", (uint32_t) port); ret = rte_eth_tx_queue_setup( port, 0, (uint16_t) app.nic_tx_ring_size, socket, &tx_conf); if (ret < 0) { rte_panic("Cannot init TX queue 0 for port %d (%d)\n", port, ret); } } /* Start port */ ret = rte_eth_dev_start(port); if (ret < 0) { rte_panic("Cannot start port %d (%d)\n", port, ret); } /* Get link status */ rte_eth_link_get(port, &link); rte_eth_macaddr_get(port,&mac_addr); int i=0; for(i=0;i<5;i++) printf("%02x:",mac_addr.addr_bytes[i]); printf("%02x\n",mac_addr.addr_bytes[i]); memcpy(port_stat[port].mac_addr,mac_addr.addr_bytes,6); for(i=0;i<5;i++) printf("%02x:",port_stat[port].mac_addr[i]); printf("%02x\n",port_stat[port].mac_addr[i]); if (link.link_status) { printf("Port %u is UP (%u Mbps)\n", (uint32_t) port, (unsigned) link.link_speed); port_stat[port].port_status=1; port_stat[port].port_speed=link.link_speed; } else { printf("Port %u is DOWN\n", (uint32_t) port); port_stat[port].port_status=0; } } }
static void app_init_nics(void) { unsigned socket; uint32_t lcore; uint8_t port, queue; int ret; uint32_t n_rx_queues, n_tx_queues; /* Init driver */ printf("Initializing the PMD driver ...\n"); if (rte_pmd_init_all() < 0) { rte_panic("Cannot init PMD\n"); } if (rte_eal_pci_probe() < 0) { rte_panic("Cannot probe PCI\n"); } /* Init NIC ports and queues, then start the ports */ for (port = 0; port < APP_MAX_NIC_PORTS; port ++) { struct rte_mempool *pool; n_rx_queues = app_get_nic_rx_queues_per_port(port); n_tx_queues = app.nic_tx_port_mask[port]; if ((n_rx_queues == 0) && (n_tx_queues == 0)) { continue; } /* Init port */ printf("Initializing NIC port %u ...\n", (unsigned) port); ret = rte_eth_dev_configure( port, (uint8_t) n_rx_queues, (uint8_t) n_tx_queues, &port_conf); if (ret < 0) { rte_panic("Cannot init NIC port %u (%d)\n", (unsigned) port, ret); } rte_eth_promiscuous_enable(port); /* Init RX queues */ for (queue = 0; queue < APP_MAX_RX_QUEUES_PER_NIC_PORT; queue ++) { if (app.nic_rx_queue_mask[port][queue] == 0) { continue; } app_get_lcore_for_nic_rx(port, queue, &lcore); socket = rte_lcore_to_socket_id(lcore); pool = app.lcore_params[lcore].pool; printf("Initializing NIC port %u RX queue %u ...\n", (unsigned) port, (unsigned) queue); ret = rte_eth_rx_queue_setup( port, queue, (uint16_t) app.nic_rx_ring_size, socket, &rx_conf, pool); if (ret < 0) { rte_panic("Cannot init RX queue %u for port %u (%d)\n", (unsigned) queue, (unsigned) port, ret); } } /* Init TX queues */ if (app.nic_tx_port_mask[port] == 1) { app_get_lcore_for_nic_tx(port, &lcore); socket = rte_lcore_to_socket_id(lcore); printf("Initializing NIC port %u TX queue 0 ...\n", (unsigned) port); ret = rte_eth_tx_queue_setup( port, 0, (uint16_t) app.nic_tx_ring_size, socket, &tx_conf); if (ret < 0) { rte_panic("Cannot init TX queue 0 for port %d (%d)\n", port, ret); } } /* Start port */ ret = rte_eth_dev_start(port); if (ret < 0) { rte_panic("Cannot start port %d (%d)\n", port, ret); } } check_all_ports_link_status(APP_MAX_NIC_PORTS, (~0x0)); }