static void bond_port_init(struct rte_mempool *mbuf_pool) { int retval; uint8_t i; retval = rte_eth_bond_create("bond0", BONDING_MODE_ALB, 0 /*SOCKET_ID_ANY*/); if (retval < 0) rte_exit(EXIT_FAILURE, "Faled to create bond port\n"); BOND_PORT = (uint8_t)retval; retval = rte_eth_dev_configure(BOND_PORT, 1, 1, &port_conf); if (retval != 0) rte_exit(EXIT_FAILURE, "port %u: configuration failed (res=%d)\n", BOND_PORT, retval); /* RX setup */ retval = rte_eth_rx_queue_setup(BOND_PORT, 0, RTE_RX_DESC_DEFAULT, rte_eth_dev_socket_id(BOND_PORT), NULL, mbuf_pool); if (retval < 0) rte_exit(retval, " port %u: RX queue 0 setup failed (res=%d)", BOND_PORT, retval); /* TX setup */ retval = rte_eth_tx_queue_setup(BOND_PORT, 0, RTE_TX_DESC_DEFAULT, rte_eth_dev_socket_id(BOND_PORT), NULL); if (retval < 0) rte_exit(retval, "port %u: TX queue 0 setup failed (res=%d)", BOND_PORT, retval); for (i = 0; i < slaves_count; i++) { if (rte_eth_bond_slave_add(BOND_PORT, slaves[i]) == -1) rte_exit(-1, "Oooops! adding slave (%u) to bond (%u) failed!\n", slaves[i], BOND_PORT); } retval = rte_eth_dev_start(BOND_PORT); if (retval < 0) rte_exit(retval, "Start port %d failed (res=%d)", BOND_PORT, retval); rte_eth_promiscuous_enable(BOND_PORT); struct ether_addr addr; rte_eth_macaddr_get(BOND_PORT, &addr); printf("Port %u MAC: ", (unsigned)BOND_PORT); PRINT_MAC(addr); printf("\n"); }
static void app_init_ports(void) { uint32_t i; /* Init NIC ports, then start the ports */ for (i = 0; i < app.n_ports; i++) { uint16_t port; int ret; port = app.ports[i]; RTE_LOG(INFO, USER1, "Initializing NIC port %u ...\n", port); /* Init port */ ret = rte_eth_dev_configure( port, 1, 1, &port_conf); if (ret < 0) rte_panic("Cannot init NIC port %u (%d)\n", port, ret); rte_eth_promiscuous_enable(port); /* Init RX queues */ ret = rte_eth_rx_queue_setup( port, 0, app.port_rx_ring_size, rte_eth_dev_socket_id(port), &rx_conf, app.pool); if (ret < 0) rte_panic("Cannot init RX for port %u (%d)\n", (uint32_t) port, ret); /* Init TX queues */ ret = rte_eth_tx_queue_setup( port, 0, app.port_tx_ring_size, rte_eth_dev_socket_id(port), &tx_conf); if (ret < 0) rte_panic("Cannot init TX for port %u (%d)\n", (uint32_t) port, ret); /* Start port */ ret = rte_eth_dev_start(port); if (ret < 0) rte_panic("Cannot start port %u (%d)\n", port, ret); } app_ports_check_link(); }
static inline int port_init(uint16_t port, struct rte_mempool *mbuf_pool) { struct rte_eth_conf port_conf = port_conf_default; const uint16_t rx_rings = 1, tx_rings = 1; int retval; uint16_t q; if (port >= rte_eth_dev_count()) return -1; /* Configure the Ethernet device. */ retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf); if (retval != 0) return retval; /* Allocate and set up 1 RX queue per Ethernet port. */ for (q = 0; q < rx_rings; q++) { retval = rte_eth_rx_queue_setup(port, q, RX_RING_SIZE, rte_eth_dev_socket_id(port), NULL, mbuf_pool); if (retval < 0) return retval; } /* Allocate and set up 1 TX queue per Ethernet port. */ for (q = 0; q < tx_rings; q++) { retval = rte_eth_tx_queue_setup(port, q, TX_RING_SIZE, rte_eth_dev_socket_id(port), NULL); if (retval < 0) return retval; } /* Start the Ethernet port. */ retval = rte_eth_dev_start(port); if (retval < 0) return retval; /* Display the port MAC address. */ struct ether_addr addr; rte_eth_macaddr_get(port, &addr); printf("Port %u MAC: %02" PRIx8 " %02" PRIx8 " %02" PRIx8 " %02" PRIx8 " %02" PRIx8 " %02" PRIx8 "\n", (unsigned int)port, addr.addr_bytes[0], addr.addr_bytes[1], addr.addr_bytes[2], addr.addr_bytes[3], addr.addr_bytes[4], addr.addr_bytes[5]); /* Enable RX in promiscuous mode for the Ethernet device. */ rte_eth_promiscuous_enable(port); return 0; }
/** * Initialise an individual port: * - configure number of rx and tx rings * - set up each rx ring, to pull from the main mbuf pool * - set up each tx ring * - start the port and report its status to stdout */ int init_port(uint8_t port_num) { /* for port configuration all features are off by default */\ const struct rte_eth_conf port_conf = { .rxmode = { .hw_vlan_filter = 0, .hw_vlan_strip = 0, .hw_vlan_extend = 0, .mq_mode = ETH_MQ_RX_RSS } }; const uint16_t rx_rings = 1, tx_rings = 1; const uint16_t rx_ring_size = RTE_MP_RX_DESC_DEFAULT; const uint16_t tx_ring_size = RTE_MP_TX_DESC_DEFAULT; uint16_t q; int retval; printf("Port %u init ... ", (unsigned)port_num); fflush(stdout); /* Standard DPDK port initialisation - config port, then set up * rx and tx rings */ if ((retval = rte_eth_dev_configure(port_num, rx_rings, tx_rings, &port_conf)) != 0) return retval; for (q = 0; q < rx_rings; q++) { retval = rte_eth_rx_queue_setup(port_num, q, rx_ring_size, rte_eth_dev_socket_id(port_num), NULL, pktmbuf_pool); if (retval < 0) return retval; } for ( q = 0; q < tx_rings; q ++ ) { retval = rte_eth_tx_queue_setup(port_num, q, tx_ring_size, rte_eth_dev_socket_id(port_num), NULL); if (retval < 0) return retval; } rte_eth_promiscuous_enable(port_num); retval = rte_eth_dev_start(port_num); if (retval < 0) return retval; printf( "Port %d Init done\n", port_num); return 0; }
int VIFHYPER_CREATE(const char *devstr, struct virtif_sc *vif_sc, uint8_t *enaddr, struct virtif_user **viup) { struct rte_eth_conf portconf; struct rte_eth_link link; struct ether_addr ea; struct virtif_user *viu; int rv = EINVAL; /* XXX: not very accurate ;) */ viu = malloc(sizeof(*viu)); memset(viu, 0, sizeof(*viu)); viu->viu_devstr = strdup(devstr); viu->viu_virtifsc = vif_sc; /* this is here only for simplicity */ if ((rv = globalinit(viu)) != 0) goto out; memset(&portconf, 0, sizeof(portconf)); if ((rv = rte_eth_dev_configure(IF_PORTID, NQUEUE, NQUEUE, &portconf)) < 0) OUT("configure device"); if ((rv = rte_eth_rx_queue_setup(IF_PORTID, 0, NDESCRX, 0, &rxconf, mbpool_rx)) <0) OUT("rx queue setup"); if ((rv = rte_eth_tx_queue_setup(IF_PORTID, 0, NDESCTX, 0, &txconf)) < 0) OUT("tx queue setup"); if ((rv = rte_eth_dev_start(IF_PORTID)) < 0) OUT("device start"); rte_eth_link_get(IF_PORTID, &link); if (!link.link_status) { ifwarn(viu, "link down"); } rte_eth_promiscuous_enable(IF_PORTID); rte_eth_macaddr_get(IF_PORTID, &ea); memcpy(enaddr, ea.addr_bytes, ETHER_ADDR_LEN); rv = pthread_create(&viu->viu_rcvpt, NULL, receiver, viu); out: /* XXX: well this isn't much of an unrolling ... */ if (rv != 0) free(viu); else *viup = viu; return rumpuser_component_errtrans(-rv); }
/* * Initialises a given port using global settings and with the rx buffers * coming from the mbuf_pool passed as parameter */ static inline int port_init(uint8_t port, struct rte_mempool *mbuf_pool) { struct rte_eth_conf port_conf; const uint16_t rxRings = ETH_VMDQ_DCB_NUM_QUEUES, txRings = (uint16_t)rte_lcore_count(); const uint16_t rxRingSize = 128, txRingSize = 512; int retval; uint16_t q; retval = get_eth_conf(&port_conf, num_pools); if (retval < 0) return retval; if (port >= rte_eth_dev_count()) return -1; retval = rte_eth_dev_configure(port, rxRings, txRings, &port_conf); if (retval != 0) return retval; for (q = 0; q < rxRings; q ++) { retval = rte_eth_rx_queue_setup(port, q, rxRingSize, rte_eth_dev_socket_id(port), NULL, mbuf_pool); if (retval < 0) return retval; } for (q = 0; q < txRings; q ++) { retval = rte_eth_tx_queue_setup(port, q, txRingSize, rte_eth_dev_socket_id(port), NULL); if (retval < 0) return retval; } retval = rte_eth_dev_start(port); if (retval < 0) return retval; struct ether_addr addr; rte_eth_macaddr_get(port, &addr); printf("Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8 " %02"PRIx8" %02"PRIx8" %02"PRIx8"\n", (unsigned)port, addr.addr_bytes[0], addr.addr_bytes[1], addr.addr_bytes[2], addr.addr_bytes[3], addr.addr_bytes[4], addr.addr_bytes[5]); return 0; }
static void slave_port_init(uint16_t portid, struct rte_mempool *mbuf_pool) { int retval; uint16_t nb_rxd = RTE_RX_DESC_DEFAULT; uint16_t nb_txd = RTE_TX_DESC_DEFAULT; if (portid >= rte_eth_dev_count()) rte_exit(EXIT_FAILURE, "Invalid port\n"); retval = rte_eth_dev_configure(portid, 1, 1, &port_conf); if (retval != 0) rte_exit(EXIT_FAILURE, "port %u: configuration failed (res=%d)\n", portid, retval); retval = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd, &nb_txd); if (retval != 0) rte_exit(EXIT_FAILURE, "port %u: rte_eth_dev_adjust_nb_rx_tx_desc " "failed (res=%d)\n", portid, retval); /* RX setup */ retval = rte_eth_rx_queue_setup(portid, 0, nb_rxd, rte_eth_dev_socket_id(portid), NULL, mbuf_pool); if (retval < 0) rte_exit(retval, " port %u: RX queue 0 setup failed (res=%d)", portid, retval); /* TX setup */ retval = rte_eth_tx_queue_setup(portid, 0, nb_txd, rte_eth_dev_socket_id(portid), NULL); if (retval < 0) rte_exit(retval, "port %u: TX queue 0 setup failed (res=%d)", portid, retval); retval = rte_eth_dev_start(portid); if (retval < 0) rte_exit(retval, "Start port %d failed (res=%d)", portid, retval); struct ether_addr addr; rte_eth_macaddr_get(portid, &addr); printf("Port %u MAC: ", portid); PRINT_MAC(addr); printf("\n"); }
int32_t interfaceSetup(void) { uint8_t portIndex = 0, portCount = rte_eth_dev_count(); int32_t ret = 0, socket_id = -1; struct rte_eth_link link; for (portIndex = 0; portIndex < portCount; portIndex++) { /* fetch the socket Id to which the port the mapped */ for (ret = 0; ret < GTP_MAX_NUMANODE; ret++) { if (numaNodeInfo[ret].intfTotal) { if (numaNodeInfo[ret].intfAvail & (1 << portIndex)) { socket_id = ret; break; } } } memset(&link, 0x00, sizeof(struct rte_eth_link)); ret = rte_eth_dev_configure(portIndex, 1, 1, &portConf); if (unlikely(ret < 0)) { rte_panic("ERROR: Dev Configure\n"); return -1; } ret = rte_eth_rx_queue_setup(portIndex, 0, RTE_TEST_RX_DESC_DEFAULT, 0, NULL, numaNodeInfo[socket_id].rx[0]); if (unlikely(ret < 0)) { rte_panic("ERROR: Rx Queue Setup\n"); return -2; } ret = rte_eth_tx_queue_setup(portIndex, 0, RTE_TEST_TX_DESC_DEFAULT, 0, NULL); if (unlikely(ret < 0)) { rte_panic("ERROR: Tx Queue Setup\n"); return -3; } rte_eth_promiscuous_enable(portIndex); rte_eth_dev_start(portIndex); } return 0; }
int rw_piot_rx_queue_setup(rw_piot_api_handle_t api_handle, uint16_t rx_queue_id, uint16_t nb_rx_desc, unsigned int socket_id, const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp) { rw_piot_device_t *rw_piot_dev = RWPIOT_GET_DEVICE(api_handle); ASSERT(RWPIOT_VALID_DEVICE(rw_piot_dev)); if (NULL == rw_piot_dev) { RW_PIOT_LOG(RTE_LOG_ERR, "PIOT Could not find device by handle\n"); return -1; } return(rte_eth_rx_queue_setup(rw_piot_dev->rte_port_id, rx_queue_id, nb_rx_desc, socket_id, rx_conf, mp)); }
/* * Initialises a given port using global settings and with the rx buffers * coming from the mbuf_pool passed as parameter */ static inline int port_init(uint8_t port, struct rte_mempool *mbuf_pool) { struct rte_eth_conf port_conf = port_conf_default; const uint16_t rx_rings = 1, tx_rings = 1; int retval; uint16_t q; if (port >= rte_eth_dev_count()) return -1; retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf); if (retval != 0) return retval; for (q = 0; q < rx_rings; q++) { retval = rte_eth_rx_queue_setup(port, q, RX_RING_SIZE, rte_eth_dev_socket_id(port), NULL, mbuf_pool); if (retval < 0) return retval; } for (q = 0; q < tx_rings; q++) { retval = rte_eth_tx_queue_setup(port, q, TX_RING_SIZE, rte_eth_dev_socket_id(port), NULL); if (retval < 0) return retval; } retval = rte_eth_dev_start(port); if (retval < 0) return retval; struct ether_addr addr; rte_eth_macaddr_get(port, &addr); printf("Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8 " %02"PRIx8" %02"PRIx8" %02"PRIx8"\n", (unsigned)port, addr.addr_bytes[0], addr.addr_bytes[1], addr.addr_bytes[2], addr.addr_bytes[3], addr.addr_bytes[4], addr.addr_bytes[5]); rte_eth_promiscuous_enable(port); return 0; }
static inline int configure_eth_port(uint8_t port_id) { struct ether_addr addr; const uint16_t rxRings = 1, txRings = 1; const uint8_t nb_ports = rte_eth_dev_count(); int ret; uint16_t q; if (port_id > nb_ports) return -1; ret = rte_eth_dev_configure(port_id, rxRings, txRings, &port_conf_default); if (ret != 0) return ret; for (q = 0; q < rxRings; q++) { ret = rte_eth_rx_queue_setup(port_id, q, RX_DESC_PER_QUEUE, rte_eth_dev_socket_id(port_id), NULL, mbuf_pool); if (ret < 0) return ret; } for (q = 0; q < txRings; q++) { ret = rte_eth_tx_queue_setup(port_id, q, TX_DESC_PER_QUEUE, rte_eth_dev_socket_id(port_id), NULL); if (ret < 0) return ret; } ret = rte_eth_dev_start(port_id); if (ret < 0) return ret; rte_eth_macaddr_get(port_id, &addr); printf("Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8 " %02"PRIx8" %02"PRIx8" %02"PRIx8"\n", (unsigned)port_id, addr.addr_bytes[0], addr.addr_bytes[1], addr.addr_bytes[2], addr.addr_bytes[3], addr.addr_bytes[4], addr.addr_bytes[5]); rte_eth_promiscuous_enable(port_id); return 0; }
static int setup_and_bond_ports(struct rte_mempool *mp) { int portid, queueid; int ret; int pl_idx; int nb_queue; nb_queue = rte_lcore_count(); nb_port = rte_eth_dev_count(); memset(lcore_args, 0, sizeof(struct lcore_arg_t) * RTE_MAX_LCORE); for(portid = 0; portid < nb_port; portid++) { ret = rte_eth_dev_configure(portid, nb_queue, nb_queue, &port_conf); if(unlikely(ret < 0)) { rte_exit(EINVAL, "port %d configure failed!\n", portid); } for(queueid = 0; queueid < nb_queue; queueid++) { ret = rte_eth_rx_queue_setup(portid, queueid, NB_RXD, rte_socket_id(), NULL, mp); if(unlikely(ret < 0)) { rte_exit(EINVAL, "port %d rx queue %d setup failed!\n", portid, queueid); } ret = rte_eth_tx_queue_setup(portid, queueid, NB_TXD, rte_socket_id(), NULL); if(unlikely(ret < 0)) { rte_exit(EINVAL, "port %d tx queue %d setup failed!\n", portid, queueid); } pl_idx = lcore_args[queueid].pl_len; lcore_args[queueid].pl[pl_idx].portid = portid; lcore_args[queueid].pl[pl_idx].queueid = queueid; lcore_args[queueid].mp = mp; lcore_args[queueid].pl_len = pl_idx + 1; } ret = rte_eth_dev_start(portid); if(unlikely(ret < 0)) { rte_exit(EINVAL, "port %d start failed!\n", portid); } rte_eth_promiscuous_enable(portid); } return 0; }
void configure_eth_port(uint8_t port_id) { int ret; rte_eth_dev_stop(port_id); ret = rte_eth_dev_configure(port_id, 1, 1, &port_conf); if (ret < 0) rte_exit(EXIT_FAILURE, "Cannot configure port %u (error %d)\n", (unsigned) port_id, ret); /* Initialize the port's RX queue */ ret = rte_eth_rx_queue_setup(port_id, 0, RX_DESC_PER_QUEUE, rte_eth_dev_socket_id(port_id), NULL, mbuf_pool); if (ret < 0) rte_exit(EXIT_FAILURE, "Failed to setup RX queue on " "port %u (error %d)\n", (unsigned) port_id, ret); /* Initialize the port's TX queue */ ret = rte_eth_tx_queue_setup(port_id, 0, TX_DESC_PER_QUEUE, rte_eth_dev_socket_id(port_id), NULL); if (ret < 0) rte_exit(EXIT_FAILURE, "Failed to setup TX queue on " "port %u (error %d)\n", (unsigned) port_id, ret); /* Initialize the port's flow control */ ret = rte_eth_dev_flow_ctrl_set(port_id, &fc_conf); if (ret < 0) rte_exit(EXIT_FAILURE, "Failed to setup hardware flow control on " "port %u (error %d)\n", (unsigned) port_id, ret); /* Start the port */ ret = rte_eth_dev_start(port_id); if (ret < 0) rte_exit(EXIT_FAILURE, "Failed to start port %u (error %d)\n", (unsigned) port_id, ret); /* Put it in promiscuous mode */ rte_eth_promiscuous_enable(port_id); }
void init_port(int port_id) { struct rte_eth_dev_info dev_info; int ret; struct rte_eth_link link; rte_eth_dev_info_get(port_id, &dev_info); printf("Name:%s\n\tDriver name: %s\n\tMax rx queues: %d\n\tMax tx queues: %d\n", dev_info.pci_dev->driver->name,dev_info.driver_name, dev_info.max_rx_queues, dev_info.max_tx_queues); printf("\tPCI Adress: %04d:%02d:%02x:%01d\n", dev_info.pci_dev->addr.domain, dev_info.pci_dev->addr.bus, dev_info.pci_dev->addr.devid, dev_info.pci_dev->addr.function); ret = rte_eth_dev_configure(port_id, 1, 1, &port_conf); if (ret < 0) rte_panic("Error configuring the port\n"); ret = rte_eth_rx_queue_setup(port_id, 0, RX_QUEUE_SZ, rte_socket_id(), &rx_conf, pktmbuf_pool); if (ret < 0) FATAL_ERROR("Error configuring receiving queue= %d\n", ret); // TODO: Need to check whether it is supported in the VMXNET /*ret = rte_eth_dev_set_rx_queue_stats_mapping(port_id, 0, 0); if (ret < 0) FATAL_ERROR("Error configuring receiving queue stats= %d [ENOTSUP= %d]\n", ret, ENOTSUP); */ ret = rte_eth_tx_queue_setup(port_id, 0, TX_QUEUE_SZ, rte_socket_id(), &tx_conf); if (ret < 0) FATAL_ERROR("Error configuring transmitting queue. Errno: %d (%d bad arg, %d no mem)\n", -ret, EINVAL ,ENOMEM); /* Start device */ ret = rte_eth_dev_start(port_id); if (ret < 0) FATAL_ERROR("Cannot start port\n"); /* Enable receipt in promiscuous mode for an Ethernet device */ //rte_eth_promiscuous_enable(port_id); /* Print link status */ rte_eth_link_get_nowait(port_id, &link); if (link.link_status) printf("\tPort %d Link Up - speed %u Mbps - %s\n", (uint8_t)port_id, (unsigned)link.link_speed,(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?("full-duplex") : ("half-duplex\n")); else printf("\tPort %d Link Down\n",(uint8_t)port_id); }
int rumpcomp_virtif_create(int devnum, struct virtif_user **viup) { struct rte_eth_conf portconf; struct rte_eth_link link; int rv = EINVAL; /* XXX: not very accurate ;) */ /* this is here only for simplicity */ if (globalinit() != 0) goto out; memset(&portconf, 0, sizeof(portconf)); if (rte_eth_dev_configure(IF_PORTID, NQUEUE, NQUEUE, &portconf) < 0) OUT("configure device\n"); if (rte_eth_rx_queue_setup(IF_PORTID, 0, NDESC, 0, &rxconf, mbpool) <0) OUT("rx queue setup\n"); if (rte_eth_tx_queue_setup(IF_PORTID, 0, NDESC, 0, &txconf) < 0) OUT("tx queue setup\n"); if (rte_eth_dev_start(IF_PORTID) < 0) OUT("device start\n"); rte_eth_link_get(IF_PORTID, &link); if (!link.link_status) { printf("warning: virt link down\n"); } rte_eth_promiscuous_enable(IF_PORTID); rv = 0; out: *viup = NULL; /* not used by the driver in its current state */ return rv; }
/** * @brief Initialize device RX queue * * @param devId uint8_t, ID of DPDK device * @param queueId uint16_t, RX queue ID to initialize * @param desc uint16_t, max queue description length * * @return 0 if success and DPDK related error code otherwice */ int DPDKAdapter::initRxQueue(uint8_t devId, uint16_t queueId, uint16_t desc) { if(devId > RTE_MAX_ETHPORTS) { qCritical("Device ID is out of range"); return -1; } qDebug("Initializing RX queue %d of device %d", queueId, devId); struct rte_eth_rxconf rxConf; memset(&rxConf, 0, sizeof(rte_eth_rxconf)); //Set default RX queue params rxConf.rx_free_thresh = DPDK_RX_FREE_THRESH; rxConf.rx_thresh.pthresh = DPDK_RX_PTHRESH; rxConf.rx_thresh.hthresh = DPDK_RX_HTHRESH; rxConf.rx_thresh.wthresh = DPDK_RX_WTHRESH; MemPool_t* mp = findMPool(devId, RX_); int ret = rte_eth_rx_queue_setup(devId, queueId, desc, SOCKET_ID_ANY, &rxConf, mp); if(ret < 0) { qCritical("RX Queue setup error: err=%d, dev=%u", ret, devId); } return ret; }
/* Setup ethdev hardware queues */ static int dpdk_ethdev_queues_setup(struct vr_dpdk_ethdev *ethdev) { int ret, i; uint8_t port_id = ethdev->ethdev_port_id; struct rte_mempool *mempool; /* configure RX queues */ RTE_LOG(DEBUG, VROUTER, "%s: nb_rx_queues=%u nb_tx_queues=%u\n", __func__, (unsigned)ethdev->ethdev_nb_rx_queues, (unsigned)ethdev->ethdev_nb_tx_queues); for (i = 0; i < VR_DPDK_MAX_NB_RX_QUEUES; i++) { if (i < ethdev->ethdev_nb_rss_queues) { mempool = vr_dpdk.rss_mempool; ethdev->ethdev_queue_states[i] = VR_DPDK_QUEUE_RSS_STATE; } else if (i < ethdev->ethdev_nb_rx_queues) { if (vr_dpdk.nb_free_mempools == 0) { RTE_LOG(ERR, VROUTER, " error assigning mempool to eth device %" PRIu8 " RX queue %d\n", port_id, i); return -ENOMEM; } vr_dpdk.nb_free_mempools--; mempool = vr_dpdk.free_mempools[vr_dpdk.nb_free_mempools]; ethdev->ethdev_queue_states[i] = VR_DPDK_QUEUE_READY_STATE; } else { ethdev->ethdev_queue_states[i] = VR_DPDK_QUEUE_NONE; continue; } ret = rte_eth_rx_queue_setup(port_id, i, VR_DPDK_NB_RXD, SOCKET_ID_ANY, &rx_queue_conf, mempool); if (ret < 0) { /* return mempool to the list */ if (mempool != vr_dpdk.rss_mempool) vr_dpdk.nb_free_mempools++; RTE_LOG(ERR, VROUTER, " error setting up eth device %" PRIu8 " RX queue %d" ": %s (%d)\n", port_id, i, rte_strerror(-ret), -ret); return ret; } /* map RX queue to stats counter ignoring any errors */ rte_eth_dev_set_rx_queue_stats_mapping(port_id, i, i); /* save queue mempool pointer */ ethdev->ethdev_mempools[i] = mempool; } i = ethdev->ethdev_nb_rx_queues - ethdev->ethdev_nb_rss_queues; RTE_LOG(INFO, VROUTER, " setup %d RSS queue(s) and %d filtering queue(s)\n", (int)ethdev->ethdev_nb_rss_queues, i); /* configure TX queues */ for (i = 0; i < ethdev->ethdev_nb_tx_queues; i++) { ret = rte_eth_tx_queue_setup(port_id, i, VR_DPDK_NB_TXD, SOCKET_ID_ANY, &tx_queue_conf); if (ret < 0) { RTE_LOG(ERR, VROUTER, " error setting up eth device %" PRIu8 " TX queue %d" ": %s (%d)\n", port_id, i, rte_strerror(-ret), -ret); return ret; } /* map TX queue to stats counter ignoring any errors */ rte_eth_dev_set_tx_queue_stats_mapping(port_id, i, i); } return 0; }
static void app_init_nics(void) { unsigned socket; uint32_t lcore; uint16_t port; uint8_t queue; int ret; uint32_t n_rx_queues, n_tx_queues; /* Init NIC ports and queues, then start the ports */ for (port = 0; port < APP_MAX_NIC_PORTS; port ++) { struct rte_mempool *pool; uint16_t nic_rx_ring_size; uint16_t nic_tx_ring_size; struct rte_eth_rxconf rxq_conf; struct rte_eth_txconf txq_conf; struct rte_eth_dev_info dev_info; struct rte_eth_conf local_port_conf = port_conf; n_rx_queues = app_get_nic_rx_queues_per_port(port); n_tx_queues = app.nic_tx_port_mask[port]; if ((n_rx_queues == 0) && (n_tx_queues == 0)) { continue; } /* Init port */ printf("Initializing NIC port %u ...\n", port); rte_eth_dev_info_get(port, &dev_info); if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE) local_port_conf.txmode.offloads |= DEV_TX_OFFLOAD_MBUF_FAST_FREE; local_port_conf.rx_adv_conf.rss_conf.rss_hf &= dev_info.flow_type_rss_offloads; if (local_port_conf.rx_adv_conf.rss_conf.rss_hf != port_conf.rx_adv_conf.rss_conf.rss_hf) { printf("Port %u modified RSS hash function based on hardware support," "requested:%#"PRIx64" configured:%#"PRIx64"\n", port, port_conf.rx_adv_conf.rss_conf.rss_hf, local_port_conf.rx_adv_conf.rss_conf.rss_hf); } ret = rte_eth_dev_configure( port, (uint8_t) n_rx_queues, (uint8_t) n_tx_queues, &local_port_conf); if (ret < 0) { rte_panic("Cannot init NIC port %u (%d)\n", port, ret); } rte_eth_promiscuous_enable(port); nic_rx_ring_size = app.nic_rx_ring_size; nic_tx_ring_size = app.nic_tx_ring_size; ret = rte_eth_dev_adjust_nb_rx_tx_desc( port, &nic_rx_ring_size, &nic_tx_ring_size); if (ret < 0) { rte_panic("Cannot adjust number of descriptors for port %u (%d)\n", port, ret); } app.nic_rx_ring_size = nic_rx_ring_size; app.nic_tx_ring_size = nic_tx_ring_size; rxq_conf = dev_info.default_rxconf; rxq_conf.offloads = local_port_conf.rxmode.offloads; /* Init RX queues */ for (queue = 0; queue < APP_MAX_RX_QUEUES_PER_NIC_PORT; queue ++) { if (app.nic_rx_queue_mask[port][queue] == 0) { continue; } app_get_lcore_for_nic_rx(port, queue, &lcore); socket = rte_lcore_to_socket_id(lcore); pool = app.lcore_params[lcore].pool; printf("Initializing NIC port %u RX queue %u ...\n", port, queue); ret = rte_eth_rx_queue_setup( port, queue, (uint16_t) app.nic_rx_ring_size, socket, &rxq_conf, pool); if (ret < 0) { rte_panic("Cannot init RX queue %u for port %u (%d)\n", queue, port, ret); } } txq_conf = dev_info.default_txconf; txq_conf.offloads = local_port_conf.txmode.offloads; /* Init TX queues */ if (app.nic_tx_port_mask[port] == 1) { app_get_lcore_for_nic_tx(port, &lcore); socket = rte_lcore_to_socket_id(lcore); printf("Initializing NIC port %u TX queue 0 ...\n", port); ret = rte_eth_tx_queue_setup( port, 0, (uint16_t) app.nic_tx_ring_size, socket, &txq_conf); if (ret < 0) { rte_panic("Cannot init TX queue 0 for port %d (%d)\n", port, ret); } } /* Start port */ ret = rte_eth_dev_start(port); if (ret < 0) { rte_panic("Cannot start port %d (%d)\n", port, ret); } } check_all_ports_link_status(APP_MAX_NIC_PORTS, (~0x0)); }
void pktgen_config_ports(void) { uint32_t lid, pid, i, s, q, sid; rxtx_t rt; pkt_seq_t * pkt; port_info_t * info; char buff[RTE_MEMZONE_NAMESIZE]; int32_t ret, cache_size; char output_buff[256] = { 0 }; // Find out the total number of ports in the system. // We have already blacklisted the ones we needed to in main routine. pktgen.nb_ports = rte_eth_dev_count(); if (pktgen.nb_ports > RTE_MAX_ETHPORTS) pktgen.nb_ports = RTE_MAX_ETHPORTS; if ( pktgen.nb_ports == 0 ) pktgen_log_panic("*** Did not find any ports to use ***"); pktgen.starting_port = 0; // Setup the number of ports to display at a time if ( pktgen.nb_ports > pktgen.nb_ports_per_page ) pktgen.ending_port = pktgen.starting_port + pktgen.nb_ports_per_page; else pktgen.ending_port = pktgen.starting_port + pktgen.nb_ports; wr_port_matrix_dump(pktgen.l2p); pktgen_log_info("Configuring %d ports, MBUF Size %d, MBUF Cache Size %d", pktgen.nb_ports, MBUF_SIZE, MBUF_CACHE_SIZE); // For each lcore setup each port that is handled by that lcore. for(lid = 0; lid < RTE_MAX_LCORE; lid++) { if ( wr_get_map(pktgen.l2p, RTE_MAX_ETHPORTS, lid) == 0 ) continue; // For each port attached or handled by the lcore for(pid = 0; pid < pktgen.nb_ports; pid++) { // If non-zero then this port is handled by this lcore. if ( wr_get_map(pktgen.l2p, pid, lid) == 0 ) continue; wr_set_port_private(pktgen.l2p, pid, &pktgen.info[pid]); pktgen.info[pid].pid = pid; } } wr_dump_l2p(pktgen.l2p); pktgen.total_mem_used = 0; for(pid = 0; pid < pktgen.nb_ports; pid++) { // Skip if we do not have any lcores attached to a port. if ( (rt.rxtx = wr_get_map(pktgen.l2p, pid, RTE_MAX_LCORE)) == 0 ) continue; pktgen.port_cnt++; snprintf(output_buff, sizeof(output_buff), "Initialize Port %d -- TxQ %d, RxQ %d", pid, rt.tx, rt.rx); info = wr_get_port_private(pktgen.l2p, pid); // Create the pkt header structures for transmitting sequence of packets. snprintf(buff, sizeof(buff), "seq_hdr_%d", pid); info->seq_pkt = (pkt_seq_t *)rte_zmalloc(buff, (sizeof(pkt_seq_t) * NUM_TOTAL_PKTS), RTE_CACHE_LINE_SIZE); if ( info->seq_pkt == NULL ) pktgen_log_panic("Unable to allocate %d pkt_seq_t headers", NUM_TOTAL_PKTS); info->seqIdx = 0; info->seqCnt = 0; info->nb_mbufs = MAX_MBUFS_PER_PORT; cache_size = (info->nb_mbufs > RTE_MEMPOOL_CACHE_MAX_SIZE)? RTE_MEMPOOL_CACHE_MAX_SIZE : info->nb_mbufs; pktgen_port_conf_setup(pid, &rt, &default_port_conf); if ( (ret = rte_eth_dev_configure(pid, rt.rx, rt.tx, &info->port_conf)) < 0) pktgen_log_panic("Cannot configure device: port=%d, Num queues %d,%d (%d)%s", pid, rt.rx, rt.tx, errno, rte_strerror(-ret)); pkt = &info->seq_pkt[SINGLE_PKT]; // Grab the source MAC addresses */ rte_eth_macaddr_get(pid, &pkt->eth_src_addr); pktgen_log_info("%s, Src MAC %02x:%02x:%02x:%02x:%02x:%02x", output_buff, pkt->eth_src_addr.addr_bytes[0], pkt->eth_src_addr.addr_bytes[1], pkt->eth_src_addr.addr_bytes[2], pkt->eth_src_addr.addr_bytes[3], pkt->eth_src_addr.addr_bytes[4], pkt->eth_src_addr.addr_bytes[5]); // Copy the first Src MAC address in SINGLE_PKT to the rest of the sequence packets. for (i = 0; i < NUM_SEQ_PKTS; i++) ethAddrCopy( &info->seq_pkt[i].eth_src_addr, &pkt->eth_src_addr ); pktgen.mem_used = 0; for(q = 0; q < rt.rx; q++) { // grab the socket id value based on the lcore being used. sid = rte_lcore_to_socket_id(wr_get_port_lid(pktgen.l2p, pid, q)); // Create and initialize the default Receive buffers. info->q[q].rx_mp = pktgen_mbuf_pool_create("Default RX", pid, q, info->nb_mbufs, sid, cache_size); if ( info->q[q].rx_mp == NULL ) pktgen_log_panic("Cannot init port %d for Default RX mbufs", pid); ret = rte_eth_rx_queue_setup(pid, q, pktgen.nb_rxd, sid, &info->rx_conf, pktgen.info[pid].q[q].rx_mp); if (ret < 0) pktgen_log_panic("rte_eth_rx_queue_setup: err=%d, port=%d, %s", ret, pid, rte_strerror(-ret)); } pktgen_log_info(""); for(q = 0; q < rt.tx; q++) { // grab the socket id value based on the lcore being used. sid = rte_lcore_to_socket_id(wr_get_port_lid(pktgen.l2p, pid, q)); // Create and initialize the default Transmit buffers. info->q[q].tx_mp = pktgen_mbuf_pool_create("Default TX", pid, q, MAX_MBUFS_PER_PORT, sid, cache_size); if ( info->q[q].tx_mp == NULL ) pktgen_log_panic("Cannot init port %d for Default TX mbufs", pid); // Create and initialize the range Transmit buffers. info->q[q].range_mp = pktgen_mbuf_pool_create("Range TX", pid, q, MAX_MBUFS_PER_PORT, sid, 0); if ( info->q[q].range_mp == NULL ) pktgen_log_panic("Cannot init port %d for Range TX mbufs", pid); // Create and initialize the sequence Transmit buffers. info->q[q].seq_mp = pktgen_mbuf_pool_create("Sequence TX", pid, q, MAX_MBUFS_PER_PORT, sid, cache_size); if ( info->q[q].seq_mp == NULL ) pktgen_log_panic("Cannot init port %d for Sequence TX mbufs", pid); // Used for sending special packets like ARP requests info->q[q].special_mp = pktgen_mbuf_pool_create("Special TX", pid, q, MAX_SPECIAL_MBUFS, sid, cache_size); if (info->q[q].special_mp == NULL) pktgen_log_panic("Cannot init port %d for Special TX mbufs", pid); // Setup the PCAP file for each port if ( pktgen.info[pid].pcap != NULL ) { if ( pktgen_pcap_parse(pktgen.info[pid].pcap, info, q) == -1 ) pktgen_log_panic("Cannot load PCAP file for port %d", pid); } // Find out the link speed to program the WTHRESH value correctly. pktgen_get_link_status(info, pid, 0); //info->tx_conf.tx_thresh.wthresh = (info->link.link_speed == 1000)? TX_WTHRESH_1GB : TX_WTHRESH; ret = rte_eth_tx_queue_setup(pid, q, pktgen.nb_txd, sid, &info->tx_conf); if (ret < 0) pktgen_log_panic("rte_eth_tx_queue_setup: err=%d, port=%d, %s", ret, pid, rte_strerror(-ret)); #if 0 ret = rte_eth_dev_flow_ctrl_set(pid, &fc_conf); if (ret < 0) pktgen_log_panic("rte_eth_dev_flow_ctrl_set: err=%d, port=%d, %s", ret, pid, rte_strerror(-ret)); #endif pktgen_log_info(""); } pktgen_log_info("%*sPort memory used = %6lu KB", 71, " ", (pktgen.mem_used + 1023)/1024); } pktgen_log_info("%*sTotal memory used = %6lu KB", 70, " ", (pktgen.total_mem_used + 1023)/1024); // Start up the ports and display the port Link status for(pid = 0; pid < pktgen.nb_ports; pid++) { if ( wr_get_map(pktgen.l2p, pid, RTE_MAX_LCORE) == 0 ) continue; info = wr_get_port_private(pktgen.l2p, pid); /* Start device */ if ( (ret = rte_eth_dev_start(pid)) < 0 ) pktgen_log_panic("rte_eth_dev_start: port=%d, %s", pid, rte_strerror(-ret)); pktgen_get_link_status(info, pid, 1); if (info->link.link_status) { snprintf(output_buff, sizeof(output_buff), "Port %2d: Link Up - speed %u Mbps - %s", pid, (uint32_t) info->link.link_speed, (info->link.link_duplex == ETH_LINK_FULL_DUPLEX) ? ("full-duplex") : ("half-duplex")); } else snprintf(output_buff, sizeof(output_buff), "Port %2d: Link Down", pid); // If enabled, put device in promiscuous mode. if (pktgen.flags & PROMISCUOUS_ON_FLAG) { strncatf(output_buff, " <Enable promiscuous mode>"); rte_eth_promiscuous_enable(pid); } pktgen_log_info("%s", output_buff); pktgen.info[pid].seq_pkt[SINGLE_PKT].pktSize = MIN_PKT_SIZE; // Setup the port and packet defaults. (must be after link speed is found) for (s = 0; s < NUM_TOTAL_PKTS; s++) pktgen_port_defaults(pid, s); pktgen_range_setup(info); pktgen_rnd_bits_init(&pktgen.info[pid].rnd_bitfields); } pktgen_log_info(""); for (sid = 0; sid < wr_coremap_cnt(pktgen.core_info, pktgen.core_cnt, 0); sid++) pktgen_packet_capture_init(&pktgen.capture[sid], sid); }
/********************************************************************** *@description: * * *@parameters: * [in]: * [in]: * *@return values: * **********************************************************************/ static int odp_init_ports(unsigned short nb_ports, struct odp_user_config *user_conf, struct odp_lcore_config *lcore_conf) { int ret; uint8_t portid; uint16_t queueid; unsigned lcore_id; uint8_t nb_rx_queue =0; uint8_t max_rx_queue =0; uint8_t queue, socketid; uint32_t n_tx_queue, nb_lcores, nb_mbuf; struct ether_addr eth_addr; struct rte_eth_dev_info dev_info; struct rte_eth_txconf *txconf; nb_lcores = rte_lcore_count(); n_tx_queue = nb_lcores; if (n_tx_queue > MAX_TX_QUEUE_PER_PORT) n_tx_queue = MAX_TX_QUEUE_PER_PORT; printf("\nStart to Init port \n" ); /* initialize all ports */ for (portid = 0; portid < nb_ports; portid++) { /* skip ports that are not enabled */ if ((user_conf->port_mask & (1 << portid)) == 0) { printf("\nSkipping disabled port %d\n", portid); continue; } /* init port */ printf("\t port %d: \n", portid ); nb_rx_queue = odp_get_port_rx_queues_nb(portid, user_conf); if(max_rx_queue < nb_rx_queue) max_rx_queue = nb_rx_queue; printf("\t Creating queues: rx queue number=%d tx queue number=%u... \n", nb_rx_queue, (unsigned)n_tx_queue ); ret = rte_eth_dev_configure(portid, nb_rx_queue, (uint16_t)n_tx_queue, &odp_port_conf); if (ret < 0) rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%d\n", ret, portid); rte_eth_macaddr_get(portid, ð_addr); printf ("\t MAC Address:%02X:%02X:%02X:%02X:%02X:%02X \n", eth_addr.addr_bytes[0], eth_addr.addr_bytes[1], eth_addr.addr_bytes[2], eth_addr.addr_bytes[3], eth_addr.addr_bytes[4], eth_addr.addr_bytes[5]); /* init one TX queue per couple (lcore,port) */ queueid = 0; for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { if (rte_lcore_is_enabled(lcore_id) == 0) continue; if (user_conf->numa_on) socketid = (uint8_t)rte_lcore_to_socket_id(lcore_id); else socketid = 0; printf("\t lcore id:%u, tx queue id:%d, socket id:%d \n", lcore_id, queueid, socketid); ret = rte_eth_tx_queue_setup(portid, queueid, ODP_TX_DESC_DEFAULT, socketid, &odp_tx_conf); if (ret < 0) rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d, " "port=%d\n", ret, portid); lcore_conf[lcore_id].tx_queue_id[portid] = queueid; queueid++; } printf("\n"); } nb_mbuf = RTE_MAX((nb_ports*nb_rx_queue*ODP_RX_DESC_DEFAULT + nb_ports*nb_lcores*MAX_PKT_BURST + nb_ports*n_tx_queue*ODP_TX_DESC_DEFAULT + nb_lcores*MEMPOOL_CACHE_SIZE), (unsigned)8192); /* init memory */ ret = odp_init_mbuf_pool(nb_mbuf, user_conf); if (ret < 0) rte_exit(EXIT_FAILURE, "init_mem failed\n"); for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { if (rte_lcore_is_enabled(lcore_id) == 0) continue; printf("\nInitializing rx queues on lcore %u ... \n", lcore_id ); /* init RX queues */ for(queue = 0; queue < lcore_conf[lcore_id].n_rx_queue; ++queue) { portid = lcore_conf[lcore_id].rx_queue_list[queue].port_id; queueid = lcore_conf[lcore_id].rx_queue_list[queue].queue_id; if (user_conf->numa_on) socketid = (uint8_t)rte_lcore_to_socket_id(lcore_id); else socketid = 0; printf("port id:%d, rx queue id: %d, socket id:%d \n", portid, queueid, socketid); ret = rte_eth_rx_queue_setup(portid, queueid, ODP_RX_DESC_DEFAULT, socketid, &odp_rx_conf, odp_pktmbuf_pool[socketid]); if (ret < 0) rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup: err=%d," "port=%d\n", ret, portid); } } return 0; }
static inline int app_link_filter_arp_add(struct app_link_params *link) { struct rte_eth_ethertype_filter filter = { .ether_type = ETHER_TYPE_ARP, .flags = 0, .queue = link->arp_q, }; return rte_eth_dev_filter_ctrl(link->pmd_id, RTE_ETH_FILTER_ETHERTYPE, RTE_ETH_FILTER_ADD, &filter); } static inline int app_link_filter_tcp_syn_add(struct app_link_params *link) { struct rte_eth_syn_filter filter = { .hig_pri = 1, .queue = link->tcp_syn_q, }; return rte_eth_dev_filter_ctrl(link->pmd_id, RTE_ETH_FILTER_SYN, RTE_ETH_FILTER_ADD, &filter); } static inline int app_link_filter_ip_add(struct app_link_params *l1, struct app_link_params *l2) { struct rte_eth_ntuple_filter filter = { .flags = RTE_5TUPLE_FLAGS, .dst_ip = rte_bswap32(l2->ip), .dst_ip_mask = UINT32_MAX, /* Enable */ .src_ip = 0, .src_ip_mask = 0, /* Disable */ .dst_port = 0, .dst_port_mask = 0, /* Disable */ .src_port = 0, .src_port_mask = 0, /* Disable */ .proto = 0, .proto_mask = 0, /* Disable */ .tcp_flags = 0, .priority = 1, /* Lowest */ .queue = l1->ip_local_q, }; return rte_eth_dev_filter_ctrl(l1->pmd_id, RTE_ETH_FILTER_NTUPLE, RTE_ETH_FILTER_ADD, &filter); } static inline int app_link_filter_ip_del(struct app_link_params *l1, struct app_link_params *l2) { struct rte_eth_ntuple_filter filter = { .flags = RTE_5TUPLE_FLAGS, .dst_ip = rte_bswap32(l2->ip), .dst_ip_mask = UINT32_MAX, /* Enable */ .src_ip = 0, .src_ip_mask = 0, /* Disable */ .dst_port = 0, .dst_port_mask = 0, /* Disable */ .src_port = 0, .src_port_mask = 0, /* Disable */ .proto = 0, .proto_mask = 0, /* Disable */ .tcp_flags = 0, .priority = 1, /* Lowest */ .queue = l1->ip_local_q, }; return rte_eth_dev_filter_ctrl(l1->pmd_id, RTE_ETH_FILTER_NTUPLE, RTE_ETH_FILTER_DELETE, &filter); } static inline int app_link_filter_tcp_add(struct app_link_params *l1, struct app_link_params *l2) { struct rte_eth_ntuple_filter filter = { .flags = RTE_5TUPLE_FLAGS, .dst_ip = rte_bswap32(l2->ip), .dst_ip_mask = UINT32_MAX, /* Enable */ .src_ip = 0, .src_ip_mask = 0, /* Disable */ .dst_port = 0, .dst_port_mask = 0, /* Disable */ .src_port = 0, .src_port_mask = 0, /* Disable */ .proto = IPPROTO_TCP, .proto_mask = UINT8_MAX, /* Enable */ .tcp_flags = 0, .priority = 2, /* Higher priority than IP */ .queue = l1->tcp_local_q, }; return rte_eth_dev_filter_ctrl(l1->pmd_id, RTE_ETH_FILTER_NTUPLE, RTE_ETH_FILTER_ADD, &filter); } static inline int app_link_filter_tcp_del(struct app_link_params *l1, struct app_link_params *l2) { struct rte_eth_ntuple_filter filter = { .flags = RTE_5TUPLE_FLAGS, .dst_ip = rte_bswap32(l2->ip), .dst_ip_mask = UINT32_MAX, /* Enable */ .src_ip = 0, .src_ip_mask = 0, /* Disable */ .dst_port = 0, .dst_port_mask = 0, /* Disable */ .src_port = 0, .src_port_mask = 0, /* Disable */ .proto = IPPROTO_TCP, .proto_mask = UINT8_MAX, /* Enable */ .tcp_flags = 0, .priority = 2, /* Higher priority than IP */ .queue = l1->tcp_local_q, }; return rte_eth_dev_filter_ctrl(l1->pmd_id, RTE_ETH_FILTER_NTUPLE, RTE_ETH_FILTER_DELETE, &filter); } static inline int app_link_filter_udp_add(struct app_link_params *l1, struct app_link_params *l2) { struct rte_eth_ntuple_filter filter = { .flags = RTE_5TUPLE_FLAGS, .dst_ip = rte_bswap32(l2->ip), .dst_ip_mask = UINT32_MAX, /* Enable */ .src_ip = 0, .src_ip_mask = 0, /* Disable */ .dst_port = 0, .dst_port_mask = 0, /* Disable */ .src_port = 0, .src_port_mask = 0, /* Disable */ .proto = IPPROTO_UDP, .proto_mask = UINT8_MAX, /* Enable */ .tcp_flags = 0, .priority = 2, /* Higher priority than IP */ .queue = l1->udp_local_q, }; return rte_eth_dev_filter_ctrl(l1->pmd_id, RTE_ETH_FILTER_NTUPLE, RTE_ETH_FILTER_ADD, &filter); } static inline int app_link_filter_udp_del(struct app_link_params *l1, struct app_link_params *l2) { struct rte_eth_ntuple_filter filter = { .flags = RTE_5TUPLE_FLAGS, .dst_ip = rte_bswap32(l2->ip), .dst_ip_mask = UINT32_MAX, /* Enable */ .src_ip = 0, .src_ip_mask = 0, /* Disable */ .dst_port = 0, .dst_port_mask = 0, /* Disable */ .src_port = 0, .src_port_mask = 0, /* Disable */ .proto = IPPROTO_UDP, .proto_mask = UINT8_MAX, /* Enable */ .tcp_flags = 0, .priority = 2, /* Higher priority than IP */ .queue = l1->udp_local_q, }; return rte_eth_dev_filter_ctrl(l1->pmd_id, RTE_ETH_FILTER_NTUPLE, RTE_ETH_FILTER_DELETE, &filter); } static inline int app_link_filter_sctp_add(struct app_link_params *l1, struct app_link_params *l2) { struct rte_eth_ntuple_filter filter = { .flags = RTE_5TUPLE_FLAGS, .dst_ip = rte_bswap32(l2->ip), .dst_ip_mask = UINT32_MAX, /* Enable */ .src_ip = 0, .src_ip_mask = 0, /* Disable */ .dst_port = 0, .dst_port_mask = 0, /* Disable */ .src_port = 0, .src_port_mask = 0, /* Disable */ .proto = IPPROTO_SCTP, .proto_mask = UINT8_MAX, /* Enable */ .tcp_flags = 0, .priority = 2, /* Higher priority than IP */ .queue = l1->sctp_local_q, }; return rte_eth_dev_filter_ctrl(l1->pmd_id, RTE_ETH_FILTER_NTUPLE, RTE_ETH_FILTER_ADD, &filter); } static inline int app_link_filter_sctp_del(struct app_link_params *l1, struct app_link_params *l2) { struct rte_eth_ntuple_filter filter = { .flags = RTE_5TUPLE_FLAGS, .dst_ip = rte_bswap32(l2->ip), .dst_ip_mask = UINT32_MAX, /* Enable */ .src_ip = 0, .src_ip_mask = 0, /* Disable */ .dst_port = 0, .dst_port_mask = 0, /* Disable */ .src_port = 0, .src_port_mask = 0, /* Disable */ .proto = IPPROTO_SCTP, .proto_mask = UINT8_MAX, /* Enable */ .tcp_flags = 0, .priority = 2, /* Higher priority than IP */ .queue = l1->sctp_local_q, }; return rte_eth_dev_filter_ctrl(l1->pmd_id, RTE_ETH_FILTER_NTUPLE, RTE_ETH_FILTER_DELETE, &filter); } static void app_link_set_arp_filter(struct app_params *app, struct app_link_params *cp) { if (cp->arp_q != 0) { int status = app_link_filter_arp_add(cp); APP_LOG(app, LOW, "%s (%" PRIu32 "): " "Adding ARP filter (queue = %" PRIu32 ")", cp->name, cp->pmd_id, cp->arp_q); if (status) rte_panic("%s (%" PRIu32 "): " "Error adding ARP filter " "(queue = %" PRIu32 ") (%" PRId32 ")\n", cp->name, cp->pmd_id, cp->arp_q, status); } } static void app_link_set_tcp_syn_filter(struct app_params *app, struct app_link_params *cp) { if (cp->tcp_syn_q != 0) { int status = app_link_filter_tcp_syn_add(cp); APP_LOG(app, LOW, "%s (%" PRIu32 "): " "Adding TCP SYN filter (queue = %" PRIu32 ")", cp->name, cp->pmd_id, cp->tcp_syn_q); if (status) rte_panic("%s (%" PRIu32 "): " "Error adding TCP SYN filter " "(queue = %" PRIu32 ") (%" PRId32 ")\n", cp->name, cp->pmd_id, cp->tcp_syn_q, status); } } void app_link_up_internal(struct app_params *app, struct app_link_params *cp) { uint32_t i; int status; /* For each link, add filters for IP of current link */ if (cp->ip != 0) { for (i = 0; i < app->n_links; i++) { struct app_link_params *p = &app->link_params[i]; /* IP */ if (p->ip_local_q != 0) { int status = app_link_filter_ip_add(p, cp); APP_LOG(app, LOW, "%s (%" PRIu32 "): " "Adding IP filter (queue= %" PRIu32 ", IP = 0x%08" PRIx32 ")", p->name, p->pmd_id, p->ip_local_q, cp->ip); if (status) rte_panic("%s (%" PRIu32 "): " "Error adding IP " "filter (queue= %" PRIu32 ", " "IP = 0x%08" PRIx32 ") (%" PRId32 ")\n", p->name, p->pmd_id, p->ip_local_q, cp->ip, status); } /* TCP */ if (p->tcp_local_q != 0) { int status = app_link_filter_tcp_add(p, cp); APP_LOG(app, LOW, "%s (%" PRIu32 "): " "Adding TCP filter " "(queue = %" PRIu32 ", IP = 0x%08" PRIx32 ")", p->name, p->pmd_id, p->tcp_local_q, cp->ip); if (status) rte_panic("%s (%" PRIu32 "): " "Error adding TCP " "filter (queue = %" PRIu32 ", " "IP = 0x%08" PRIx32 ") (%" PRId32 ")\n", p->name, p->pmd_id, p->tcp_local_q, cp->ip, status); } /* UDP */ if (p->udp_local_q != 0) { int status = app_link_filter_udp_add(p, cp); APP_LOG(app, LOW, "%s (%" PRIu32 "): " "Adding UDP filter " "(queue = %" PRIu32 ", IP = 0x%08" PRIx32 ")", p->name, p->pmd_id, p->udp_local_q, cp->ip); if (status) rte_panic("%s (%" PRIu32 "): " "Error adding UDP " "filter (queue = %" PRIu32 ", " "IP = 0x%08" PRIx32 ") (%" PRId32 ")\n", p->name, p->pmd_id, p->udp_local_q, cp->ip, status); } /* SCTP */ if (p->sctp_local_q != 0) { int status = app_link_filter_sctp_add(p, cp); APP_LOG(app, LOW, "%s (%" PRIu32 "): Adding SCTP filter " "(queue = %" PRIu32 ", IP = 0x%08" PRIx32 ")", p->name, p->pmd_id, p->sctp_local_q, cp->ip); if (status) rte_panic("%s (%" PRIu32 "): " "Error adding SCTP " "filter (queue = %" PRIu32 ", " "IP = 0x%08" PRIx32 ") (%" PRId32 ")\n", p->name, p->pmd_id, p->sctp_local_q, cp->ip, status); } } } /* PMD link up */ status = rte_eth_dev_set_link_up(cp->pmd_id); /* Do not panic if PMD does not provide link up functionality */ if (status < 0 && status != -ENOTSUP) rte_panic("%s (%" PRIu32 "): PMD set link up error %" PRId32 "\n", cp->name, cp->pmd_id, status); /* Mark link as UP */ cp->state = 1; } void app_link_down_internal(struct app_params *app, struct app_link_params *cp) { uint32_t i; int status; /* PMD link down */ status = rte_eth_dev_set_link_down(cp->pmd_id); /* Do not panic if PMD does not provide link down functionality */ if (status < 0 && status != -ENOTSUP) rte_panic("%s (%" PRIu32 "): PMD set link down error %" PRId32 "\n", cp->name, cp->pmd_id, status); /* Mark link as DOWN */ cp->state = 0; /* Return if current link IP is not valid */ if (cp->ip == 0) return; /* For each link, remove filters for IP of current link */ for (i = 0; i < app->n_links; i++) { struct app_link_params *p = &app->link_params[i]; /* IP */ if (p->ip_local_q != 0) { int status = app_link_filter_ip_del(p, cp); APP_LOG(app, LOW, "%s (%" PRIu32 "): Deleting IP filter " "(queue = %" PRIu32 ", IP = 0x%" PRIx32 ")", p->name, p->pmd_id, p->ip_local_q, cp->ip); if (status) rte_panic("%s (%" PRIu32 "): Error deleting IP filter " "(queue = %" PRIu32 ", IP = 0x%" PRIx32 ") (%" PRId32 ")\n", p->name, p->pmd_id, p->ip_local_q, cp->ip, status); } /* TCP */ if (p->tcp_local_q != 0) { int status = app_link_filter_tcp_del(p, cp); APP_LOG(app, LOW, "%s (%" PRIu32 "): Deleting TCP filter " "(queue = %" PRIu32 ", IP = 0x%" PRIx32 ")", p->name, p->pmd_id, p->tcp_local_q, cp->ip); if (status) rte_panic("%s (%" PRIu32 "): Error deleting TCP filter " "(queue = %" PRIu32 ", IP = 0x%" PRIx32 ") (%" PRId32 ")\n", p->name, p->pmd_id, p->tcp_local_q, cp->ip, status); } /* UDP */ if (p->udp_local_q != 0) { int status = app_link_filter_udp_del(p, cp); APP_LOG(app, LOW, "%s (%" PRIu32 "): Deleting UDP filter " "(queue = %" PRIu32 ", IP = 0x%" PRIx32 ")", p->name, p->pmd_id, p->udp_local_q, cp->ip); if (status) rte_panic("%s (%" PRIu32 "): Error deleting UDP filter " "(queue = %" PRIu32 ", IP = 0x%" PRIx32 ") (%" PRId32 ")\n", p->name, p->pmd_id, p->udp_local_q, cp->ip, status); } /* SCTP */ if (p->sctp_local_q != 0) { int status = app_link_filter_sctp_del(p, cp); APP_LOG(app, LOW, "%s (%" PRIu32 "): Deleting SCTP filter " "(queue = %" PRIu32 ", IP = 0x%" PRIx32 ")", p->name, p->pmd_id, p->sctp_local_q, cp->ip); if (status) rte_panic("%s (%" PRIu32 "): Error deleting SCTP filter " "(queue = %" PRIu32 ", IP = 0x%" PRIx32 ") (%" PRId32 ")\n", p->name, p->pmd_id, p->sctp_local_q, cp->ip, status); } } } static void app_check_link(struct app_params *app) { uint32_t all_links_up, i; all_links_up = 1; for (i = 0; i < app->n_links; i++) { struct app_link_params *p = &app->link_params[i]; struct rte_eth_link link_params; memset(&link_params, 0, sizeof(link_params)); rte_eth_link_get(p->pmd_id, &link_params); APP_LOG(app, HIGH, "%s (%" PRIu32 ") (%" PRIu32 " Gbps) %s", p->name, p->pmd_id, link_params.link_speed / 1000, link_params.link_status ? "UP" : "DOWN"); if (link_params.link_status == ETH_LINK_DOWN) all_links_up = 0; } if (all_links_up == 0) rte_panic("Some links are DOWN\n"); } static uint32_t is_any_swq_frag_or_ras(struct app_params *app) { uint32_t i; for (i = 0; i < app->n_pktq_swq; i++) { struct app_pktq_swq_params *p = &app->swq_params[i]; if ((p->ipv4_frag == 1) || (p->ipv6_frag == 1) || (p->ipv4_ras == 1) || (p->ipv6_ras == 1)) return 1; } return 0; } static void app_init_link_frag_ras(struct app_params *app) { uint32_t i; if (is_any_swq_frag_or_ras(app)) { for (i = 0; i < app->n_pktq_hwq_out; i++) { struct app_pktq_hwq_out_params *p_txq = &app->hwq_out_params[i]; p_txq->conf.txq_flags &= ~ETH_TXQ_FLAGS_NOMULTSEGS; } } } static inline int app_get_cpu_socket_id(uint32_t pmd_id) { int status = rte_eth_dev_socket_id(pmd_id); return (status != SOCKET_ID_ANY) ? status : 0; } static inline int app_link_rss_enabled(struct app_link_params *cp) { return (cp->n_rss_qs) ? 1 : 0; } static void app_link_rss_setup(struct app_link_params *cp) { struct rte_eth_dev_info dev_info; struct rte_eth_rss_reta_entry64 reta_conf[APP_RETA_SIZE_MAX]; uint32_t i; int status; /* Get RETA size */ memset(&dev_info, 0, sizeof(dev_info)); rte_eth_dev_info_get(cp->pmd_id, &dev_info); if (dev_info.reta_size == 0) rte_panic("%s (%u): RSS setup error (null RETA size)\n", cp->name, cp->pmd_id); if (dev_info.reta_size > ETH_RSS_RETA_SIZE_512) rte_panic("%s (%u): RSS setup error (RETA size too big)\n", cp->name, cp->pmd_id); /* Setup RETA contents */ memset(reta_conf, 0, sizeof(reta_conf)); for (i = 0; i < dev_info.reta_size; i++) reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX; for (i = 0; i < dev_info.reta_size; i++) { uint32_t reta_id = i / RTE_RETA_GROUP_SIZE; uint32_t reta_pos = i % RTE_RETA_GROUP_SIZE; uint32_t rss_qs_pos = i % cp->n_rss_qs; reta_conf[reta_id].reta[reta_pos] = (uint16_t) cp->rss_qs[rss_qs_pos]; } /* RETA update */ status = rte_eth_dev_rss_reta_update(cp->pmd_id, reta_conf, dev_info.reta_size); if (status != 0) rte_panic("%s (%u): RSS setup error (RETA update failed)\n", cp->name, cp->pmd_id); } static void app_init_link_set_config(struct app_link_params *p) { if (p->n_rss_qs) { p->conf.rxmode.mq_mode = ETH_MQ_RX_RSS; p->conf.rx_adv_conf.rss_conf.rss_hf = p->rss_proto_ipv4 | p->rss_proto_ipv6 | p->rss_proto_l2; } } static void app_init_link(struct app_params *app) { uint32_t i; app_init_link_frag_ras(app); for (i = 0; i < app->n_links; i++) { struct app_link_params *p_link = &app->link_params[i]; uint32_t link_id, n_hwq_in, n_hwq_out, j; int status; sscanf(p_link->name, "LINK%" PRIu32, &link_id); n_hwq_in = app_link_get_n_rxq(app, p_link); n_hwq_out = app_link_get_n_txq(app, p_link); app_init_link_set_config(p_link); APP_LOG(app, HIGH, "Initializing %s (%" PRIu32") " "(%" PRIu32 " RXQ, %" PRIu32 " TXQ) ...", p_link->name, p_link->pmd_id, n_hwq_in, n_hwq_out); /* LINK */ status = rte_eth_dev_configure( p_link->pmd_id, n_hwq_in, n_hwq_out, &p_link->conf); if (status < 0) rte_panic("%s (%" PRId32 "): " "init error (%" PRId32 ")\n", p_link->name, p_link->pmd_id, status); rte_eth_macaddr_get(p_link->pmd_id, (struct ether_addr *) &p_link->mac_addr); if (p_link->promisc) rte_eth_promiscuous_enable(p_link->pmd_id); /* RXQ */ for (j = 0; j < app->n_pktq_hwq_in; j++) { struct app_pktq_hwq_in_params *p_rxq = &app->hwq_in_params[j]; uint32_t rxq_link_id, rxq_queue_id; uint16_t nb_rxd = p_rxq->size; sscanf(p_rxq->name, "RXQ%" PRIu32 ".%" PRIu32, &rxq_link_id, &rxq_queue_id); if (rxq_link_id != link_id) continue; status = rte_eth_dev_adjust_nb_rx_tx_desc( p_link->pmd_id, &nb_rxd, NULL); if (status < 0) rte_panic("%s (%" PRIu32 "): " "%s adjust number of Rx descriptors " "error (%" PRId32 ")\n", p_link->name, p_link->pmd_id, p_rxq->name, status); status = rte_eth_rx_queue_setup( p_link->pmd_id, rxq_queue_id, nb_rxd, app_get_cpu_socket_id(p_link->pmd_id), &p_rxq->conf, app->mempool[p_rxq->mempool_id]); if (status < 0) rte_panic("%s (%" PRIu32 "): " "%s init error (%" PRId32 ")\n", p_link->name, p_link->pmd_id, p_rxq->name, status); } /* TXQ */ for (j = 0; j < app->n_pktq_hwq_out; j++) { struct app_pktq_hwq_out_params *p_txq = &app->hwq_out_params[j]; uint32_t txq_link_id, txq_queue_id; uint16_t nb_txd = p_txq->size; sscanf(p_txq->name, "TXQ%" PRIu32 ".%" PRIu32, &txq_link_id, &txq_queue_id); if (txq_link_id != link_id) continue; status = rte_eth_dev_adjust_nb_rx_tx_desc( p_link->pmd_id, NULL, &nb_txd); if (status < 0) rte_panic("%s (%" PRIu32 "): " "%s adjust number of Tx descriptors " "error (%" PRId32 ")\n", p_link->name, p_link->pmd_id, p_txq->name, status); status = rte_eth_tx_queue_setup( p_link->pmd_id, txq_queue_id, nb_txd, app_get_cpu_socket_id(p_link->pmd_id), &p_txq->conf); if (status < 0) rte_panic("%s (%" PRIu32 "): " "%s init error (%" PRId32 ")\n", p_link->name, p_link->pmd_id, p_txq->name, status); } /* LINK START */ status = rte_eth_dev_start(p_link->pmd_id); if (status < 0) rte_panic("Cannot start %s (error %" PRId32 ")\n", p_link->name, status); /* LINK FILTERS */ app_link_set_arp_filter(app, p_link); app_link_set_tcp_syn_filter(app, p_link); if (app_link_rss_enabled(p_link)) app_link_rss_setup(p_link); /* LINK UP */ app_link_up_internal(app, p_link); } app_check_link(app); } static void app_init_swq(struct app_params *app) { uint32_t i; for (i = 0; i < app->n_pktq_swq; i++) { struct app_pktq_swq_params *p = &app->swq_params[i]; unsigned flags = 0; if (app_swq_get_readers(app, p) == 1) flags |= RING_F_SC_DEQ; if (app_swq_get_writers(app, p) == 1) flags |= RING_F_SP_ENQ; APP_LOG(app, HIGH, "Initializing %s...", p->name); app->swq[i] = rte_ring_create( p->name, p->size, p->cpu_socket_id, flags); if (app->swq[i] == NULL) rte_panic("%s init error\n", p->name); } } static void app_init_tm(struct app_params *app) { uint32_t i; for (i = 0; i < app->n_pktq_tm; i++) { struct app_pktq_tm_params *p_tm = &app->tm_params[i]; struct app_link_params *p_link; struct rte_eth_link link_eth_params; struct rte_sched_port *sched; uint32_t n_subports, subport_id; int status; p_link = app_get_link_for_tm(app, p_tm); /* LINK */ rte_eth_link_get(p_link->pmd_id, &link_eth_params); /* TM */ p_tm->sched_port_params.name = p_tm->name; p_tm->sched_port_params.socket = app_get_cpu_socket_id(p_link->pmd_id); p_tm->sched_port_params.rate = (uint64_t) link_eth_params.link_speed * 1000 * 1000 / 8; APP_LOG(app, HIGH, "Initializing %s ...", p_tm->name); sched = rte_sched_port_config(&p_tm->sched_port_params); if (sched == NULL) rte_panic("%s init error\n", p_tm->name); app->tm[i] = sched; /* Subport */ n_subports = p_tm->sched_port_params.n_subports_per_port; for (subport_id = 0; subport_id < n_subports; subport_id++) { uint32_t n_pipes_per_subport, pipe_id; status = rte_sched_subport_config(sched, subport_id, &p_tm->sched_subport_params[subport_id]); if (status) rte_panic("%s subport %" PRIu32 " init error (%" PRId32 ")\n", p_tm->name, subport_id, status); /* Pipe */ n_pipes_per_subport = p_tm->sched_port_params.n_pipes_per_subport; for (pipe_id = 0; pipe_id < n_pipes_per_subport; pipe_id++) { int profile_id = p_tm->sched_pipe_to_profile[ subport_id * APP_MAX_SCHED_PIPES + pipe_id]; if (profile_id == -1) continue; status = rte_sched_pipe_config(sched, subport_id, pipe_id, profile_id); if (status) rte_panic("%s subport %" PRIu32 " pipe %" PRIu32 " (profile %" PRId32 ") " "init error (% " PRId32 ")\n", p_tm->name, subport_id, pipe_id, profile_id, status); } } } } #ifndef RTE_EXEC_ENV_LINUXAPP static void app_init_tap(struct app_params *app) { if (app->n_pktq_tap == 0) return; rte_panic("TAP device not supported.\n"); } #else static void app_init_tap(struct app_params *app) { uint32_t i; for (i = 0; i < app->n_pktq_tap; i++) { struct app_pktq_tap_params *p_tap = &app->tap_params[i]; struct ifreq ifr; int fd, status; APP_LOG(app, HIGH, "Initializing %s ...", p_tap->name); fd = open("/dev/net/tun", O_RDWR | O_NONBLOCK); if (fd < 0) rte_panic("Cannot open file /dev/net/tun\n"); memset(&ifr, 0, sizeof(ifr)); ifr.ifr_flags = IFF_TAP | IFF_NO_PI; /* No packet information */ snprintf(ifr.ifr_name, IFNAMSIZ, "%s", p_tap->name); status = ioctl(fd, TUNSETIFF, (void *) &ifr); if (status < 0) rte_panic("TAP setup error\n"); app->tap[i] = fd; } }
int32_t interfaceSetup(void) { uint8_t portIndex = 0, portCount = rte_eth_dev_count(); int32_t ret = 0, socket_id = -1; struct rte_eth_link link; for (portIndex = 0; portIndex < portCount; portIndex++) { /* fetch the socket Id to which the port the mapped */ for (ret = 0; ret < MAX_NUMANODE; ret++) { if (numaNodeInfo[ret].intfTotal) { if (numaNodeInfo[ret].intfAvail & (1 << portIndex)) { socket_id = ret; break; } } } memset(&link, 0x00, sizeof(struct rte_eth_link)); ret = rte_eth_dev_configure(portIndex, 1, 1, &portConf); if (unlikely(ret < 0)) { rte_panic("ERROR: Dev Configure\n"); return -1; } ret = rte_eth_rx_queue_setup(portIndex, 0, RTE_TEST_RX_DESC_DEFAULT, 0, NULL, numaNodeInfo[socket_id].rx[0]); if (unlikely(ret < 0)) { rte_panic("ERROR: Rx Queue Setup\n"); return -2; } ret = rte_eth_tx_queue_setup(portIndex, 0, RTE_TEST_TX_DESC_DEFAULT, 0, NULL); if (unlikely(ret < 0)) { rte_panic("ERROR: Tx Queue Setup\n"); return -3; } rte_eth_link_get(portIndex, &link); if (unlikely(link.link_duplex != ETH_LINK_FULL_DUPLEX)) { printf(" port:%u; duplex:%s, status:%s", (unsigned) portIndex, (link.link_duplex == ETH_LINK_FULL_DUPLEX)?"Full":"half", (link.link_status == 1)?"up":"down"); /*return -1; Note: there is chance if interface is not connected or speed does not match */ } rte_eth_promiscuous_enable(portIndex); rte_eth_dev_start(portIndex); } return 0; }
int rte_netmap_init_port(uint8_t portid, const struct rte_netmap_port_conf *conf) { int32_t ret; uint16_t i; uint16_t rx_slots, tx_slots; if (conf == NULL || portid >= RTE_DIM(ports) || conf->nr_tx_rings > netmap.conf.max_rings || conf->nr_rx_rings > netmap.conf.max_rings) { RTE_LOG(ERR, USER1, "%s(%hhu): invalid parameters\n", __func__, portid); return (-EINVAL); } rx_slots = (uint16_t)rte_align32pow2(conf->nr_rx_slots); tx_slots = (uint16_t)rte_align32pow2(conf->nr_tx_slots); if (tx_slots > netmap.conf.max_slots || rx_slots > netmap.conf.max_slots) { RTE_LOG(ERR, USER1, "%s(%hhu): invalid parameters\n", __func__, portid); return (-EINVAL); } ret = rte_eth_dev_configure(portid, conf->nr_rx_rings, conf->nr_tx_rings, conf->eth_conf); if (ret < 0) { RTE_LOG(ERR, USER1, "Couldn't configure port %hhu\n", portid); return (ret); } for (i = 0; i < conf->nr_tx_rings; i++) { ret = rte_eth_tx_queue_setup(portid, i, tx_slots, conf->socket_id, conf->tx_conf); if (ret < 0) { RTE_LOG(ERR, USER1, "Couldn't configure TX queue %"PRIu16" of " "port %"PRIu8"\n", i, portid); return (ret); } ret = rte_eth_rx_queue_setup(portid, i, rx_slots, conf->socket_id, conf->rx_conf, conf->pool); if (ret < 0) { RTE_LOG(ERR, USER1, "Couldn't configure RX queue %"PRIu16" of " "port %"PRIu8"\n", i, portid); return (ret); } } /* copy config to the private storage. */ ports[portid].eth_conf = conf->eth_conf[0]; ports[portid].rx_conf = conf->rx_conf[0]; ports[portid].tx_conf = conf->tx_conf[0]; ports[portid].pool = conf->pool; ports[portid].socket_id = conf->socket_id; ports[portid].nr_tx_rings = conf->nr_tx_rings; ports[portid].nr_rx_rings = conf->nr_rx_rings; ports[portid].nr_tx_slots = tx_slots; ports[portid].nr_rx_slots = rx_slots; ports[portid].tx_burst = conf->tx_burst; ports[portid].rx_burst = conf->rx_burst; return (0); }
static int dpdk_main(int port_id, int argc, char* argv[]) { struct rte_eth_dev_info dev_info; unsigned nb_queues; FILE* lfile; uint8_t core_id; int ret; printf("In dpdk_main\n"); // Open the log file lfile = fopen("./vrouter.log", "w"); // Program the rte log rte_openlog_stream(lfile); ret = rte_eal_init(argc, argv); if (ret < 0) { log_crit( "Invalid EAL parameters\n"); return -1; } log_info( "Programming cmd rings now!\n"); rx_event_fd = (int *) malloc(sizeof(int *) * rte_lcore_count()); if (!rx_event_fd) { log_crit("Failed to allocate memory for rx event fd arrays\n"); return -ENOMEM; } rte_eth_macaddr_get(port_id, &port_eth_addr); log_info("Port%d: MAC Address: ", port_id); print_ethaddr(&port_eth_addr); /* Determine the number of RX/TX pairs supported by NIC */ rte_eth_dev_info_get(port_id, &dev_info); dev_info.pci_dev->intr_handle.type = RTE_INTR_HANDLE_VFIO_MSIX; dev_info.pci_dev->intr_handle.max_intr = dev_info.max_rx_queues + dev_info.max_tx_queues; ret = rte_intr_efd_enable(&dev_info.pci_dev->intr_handle, dev_info.max_rx_queues); if (ret < 0) { rte_exit(EXIT_FAILURE, "Failed to enable rx interrupts\n"); } ret = rte_intr_enable(&dev_info.pci_dev->intr_handle); if (ret < 0) { rte_exit(EXIT_FAILURE, "Failed to enable interrupts\n"); } ret = rte_eth_dev_configure(port_id, dev_info.max_rx_queues, dev_info.max_tx_queues, &port_conf); if (ret < 0) { rte_exit(EXIT_FAILURE, "Failed to configure ethernet device\n"); } /* For each RX/TX pair */ nb_queues = dev_info.max_tx_queues; for (core_id = 0; core_id < nb_queues; core_id++) { char s[64]; if (rte_lcore_is_enabled(core_id) == 0) continue; /* NUMA socket number */ unsigned socketid = rte_lcore_to_socket_id(core_id); if (socketid >= NB_SOCKETS) { log_crit( "Socket %d of lcore %u is out of range %d\n", socketid, core_id, NB_SOCKETS); return -EBADF; } /* Create memory pool */ if (pktmbuf_pool[socketid] == NULL) { log_info("Creating mempool on %d of ~%lx bytes\n", socketid, NB_MBUF * MBUF_SIZE); printf("Creating mempool on %d of ~%lx bytes\n", socketid, NB_MBUF * MBUF_SIZE); snprintf(s, sizeof(s), "mbuf_pool_%d", socketid); pktmbuf_pool[socketid] = rte_mempool_create(s, NB_MBUF, MBUF_SIZE, MEMPOOL_CACHE_SIZE, PKTMBUF_PRIV_SZ, rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, NULL, socketid, 0); if (!pktmbuf_pool[socketid]) { log_crit( "Cannot init mbuf pool on socket %d\n", socketid); return -ENOMEM; } } /* Setup the TX queue */ ret = rte_eth_tx_queue_setup(port_id, core_id, RTE_TX_DESC_DEFAULT, socketid, &tx_conf); if (ret < 0) { log_crit( "Cannot initialize TX queue (%d)\n", core_id); return -ENODEV; } /* Setup the RX queue */ ret = rte_eth_rx_queue_setup(port_id, core_id, RTE_RX_DESC_DEFAULT, socketid, &rx_conf, pktmbuf_pool[socketid]); if (ret < 0) { log_crit( "Cannot initialize RX queue (%d)\n", core_id); return -ENODEV; } /* Create the event fds for event notification */ lcore_cmd_event_fd[core_id] = eventfd(0, 0); } // Start the eth device ret = rte_eth_dev_start(port_id); if (ret < 0) { log_crit( "rte_eth_dev_start: err=%d, port=%d\n", ret, core_id); return -ENODEV; } // Put the device in promiscuous mode rte_eth_promiscuous_enable(port_id); // Wait for link up //check_all_ports_link_status(1, 1u << port_id); log_info( "Starting engines on every core\n"); rte_eal_mp_remote_launch(engine_loop, &dev_info, CALL_MASTER); return 0; }
lagopus_result_t dpdk_configure_interface(struct interface *ifp) { unsigned socket; uint32_t lcore; uint8_t queue; int ret; uint32_t n_rx_queues, n_tx_queues; uint8_t portid; struct rte_mempool *pool; portid = ifp->info.eth.port_number; n_rx_queues = app_get_nic_rx_queues_per_port(portid); n_tx_queues = app.nic_tx_port_mask[portid]; if ((n_rx_queues == 0) && (n_tx_queues == 0)) { return LAGOPUS_RESULT_INVALID_ARGS; } if (ifp->info.eth_dpdk_phy.mtu < 64 || ifp->info.eth_dpdk_phy.mtu > MAX_PACKET_SZ) { return LAGOPUS_RESULT_OUT_OF_RANGE; } rte_eth_dev_info_get(portid, &ifp->devinfo); /* Init port */ printf("Initializing NIC port %u ...\n", (unsigned) portid); ret = rte_eth_dev_configure(portid, (uint8_t) n_rx_queues, (uint8_t) n_tx_queues, &port_conf); if (ret < 0) { rte_panic("Cannot init NIC port %u (%s)\n", (unsigned) portid, strerror(-ret)); } ret = rte_eth_dev_set_mtu(portid, ifp->info.eth_dpdk_phy.mtu); if (ret < 0) { rte_panic("Cannot set MTU(%d) for port %d (%d)\n", ifp->info.eth_dpdk_phy.mtu, portid, ret); } rte_eth_promiscuous_enable(portid); /* Init RX queues */ for (queue = 0; queue < APP_MAX_RX_QUEUES_PER_NIC_PORT; queue ++) { struct app_lcore_params_io *lp; uint8_t i; if (app.nic_rx_queue_mask[portid][queue] == NIC_RX_QUEUE_UNCONFIGURED) { continue; } app_get_lcore_for_nic_rx(portid, queue, &lcore); lp = &app.lcore_params[lcore].io; socket = rte_lcore_to_socket_id(lcore); pool = app.lcore_params[lcore].pool; printf("Initializing NIC port %u RX queue %u ...\n", (unsigned) portid, (unsigned) queue); ret = rte_eth_rx_queue_setup(portid, queue, (uint16_t) app.nic_rx_ring_size, socket, #if defined(RTE_VERSION_NUM) && RTE_VERSION >= RTE_VERSION_NUM(1, 8, 0, 0) &ifp->devinfo.default_rxconf, #else &rx_conf, #endif /* RTE_VERSION_NUM */ pool); if (ret < 0) { rte_panic("Cannot init RX queue %u for port %u (%d)\n", (unsigned) queue, (unsigned) portid, ret); } for (i = 0; i < lp->rx.n_nic_queues; i++) { if (lp->rx.nic_queues[i].port != portid || lp->rx.nic_queues[i].queue != queue) { continue; } lp->rx.nic_queues[i].enabled = true; break; } } /* Init TX queues */ if (app.nic_tx_port_mask[portid] == 1) { app_get_lcore_for_nic_tx(portid, &lcore); socket = rte_lcore_to_socket_id(lcore); printf("Initializing NIC port %u TX queue 0 ...\n", (unsigned) portid); ret = rte_eth_tx_queue_setup(portid, 0, (uint16_t) app.nic_tx_ring_size, socket, #if defined(RTE_VERSION_NUM) && RTE_VERSION >= RTE_VERSION_NUM(1, 8, 0, 0) &ifp->devinfo.default_txconf #else &tx_conf #endif /* RTE_VERSION_NUM */ ); if (ret < 0) { rte_panic("Cannot init TX queue 0 for port %d (%d)\n", portid, ret); } } ifp->stats = port_stats; dpdk_interface_set_index(ifp); return LAGOPUS_RESULT_OK; }
static inline int port_init_common(uint8_t port, const struct rte_eth_conf *port_conf, struct rte_mempool *mp) { const uint16_t rx_ring_size = 512, tx_ring_size = 512; int retval; uint16_t q; struct rte_eth_dev_info dev_info; if (!rte_eth_dev_is_valid_port(port)) return -1; retval = rte_eth_dev_configure(port, 0, 0, port_conf); rte_eth_dev_info_get(port, &dev_info); default_params.rx_rings = RTE_MIN(dev_info.max_rx_queues, MAX_NUM_RX_QUEUE); default_params.tx_rings = 1; /* Configure the Ethernet device. */ retval = rte_eth_dev_configure(port, default_params.rx_rings, default_params.tx_rings, port_conf); if (retval != 0) return retval; for (q = 0; q < default_params.rx_rings; q++) { retval = rte_eth_rx_queue_setup(port, q, rx_ring_size, rte_eth_dev_socket_id(port), NULL, mp); if (retval < 0) return retval; } /* Allocate and set up 1 TX queue per Ethernet port. */ for (q = 0; q < default_params.tx_rings; q++) { retval = rte_eth_tx_queue_setup(port, q, tx_ring_size, rte_eth_dev_socket_id(port), NULL); if (retval < 0) return retval; } /* Start the Ethernet port. */ retval = rte_eth_dev_start(port); if (retval < 0) return retval; /* Display the port MAC address. */ struct ether_addr addr; rte_eth_macaddr_get(port, &addr); printf("Port %u MAC: %02" PRIx8 " %02" PRIx8 " %02" PRIx8 " %02" PRIx8 " %02" PRIx8 " %02" PRIx8 "\n", (unsigned int)port, addr.addr_bytes[0], addr.addr_bytes[1], addr.addr_bytes[2], addr.addr_bytes[3], addr.addr_bytes[4], addr.addr_bytes[5]); /* Enable RX in promiscuous mode for the Ethernet device. */ rte_eth_promiscuous_enable(port); return 0; }
static int app_init_port(uint8_t portid, struct rte_mempool *mp) { int ret; struct rte_eth_link link; struct rte_eth_rxconf rx_conf; struct rte_eth_txconf tx_conf; /* check if port already initialized (multistream configuration) */ if (app_inited_port_mask & (1u << portid)) return 0; rx_conf.rx_thresh.pthresh = rx_thresh.pthresh; rx_conf.rx_thresh.hthresh = rx_thresh.hthresh; rx_conf.rx_thresh.wthresh = rx_thresh.wthresh; rx_conf.rx_free_thresh = 32; rx_conf.rx_drop_en = 0; tx_conf.tx_thresh.pthresh = tx_thresh.pthresh; tx_conf.tx_thresh.hthresh = tx_thresh.hthresh; tx_conf.tx_thresh.wthresh = tx_thresh.wthresh; tx_conf.tx_free_thresh = 0; tx_conf.tx_rs_thresh = 0; tx_conf.txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | ETH_TXQ_FLAGS_NOOFFLOADS; /* init port */ RTE_LOG(INFO, APP, "Initializing port %hu... ", portid); fflush(stdout); ret = rte_eth_dev_configure(portid, 1, 1, &port_conf); if (ret < 0) rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%hu\n", ret, portid); /* init one RX queue */ fflush(stdout); ret = rte_eth_rx_queue_setup(portid, 0, (uint16_t)ring_conf.rx_size, rte_eth_dev_socket_id(portid), &rx_conf, mp); if (ret < 0) rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d, port=%hu\n", ret, portid); /* init one TX queue */ fflush(stdout); ret = rte_eth_tx_queue_setup(portid, 0, (uint16_t)ring_conf.tx_size, rte_eth_dev_socket_id(portid), &tx_conf); if (ret < 0) rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d, " "port=%hu queue=%d\n", ret, portid, 0); /* Start device */ ret = rte_eth_dev_start(portid); if (ret < 0) rte_exit(EXIT_FAILURE, "rte_pmd_port_start: err=%d, port=%hu\n", ret, portid); printf("done: "); /* get link status */ rte_eth_link_get(portid, &link); if (link.link_status) { printf(" Link Up - speed %u Mbps - %s\n", (uint32_t) link.link_speed, (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? ("full-duplex") : ("half-duplex\n")); } else { printf(" Link Down\n"); } rte_eth_promiscuous_enable(portid); /* mark port as initialized */ app_inited_port_mask |= 1u << portid; return 0; }
int VIFHYPER_CREATE(const char *devstr, struct virtif_sc *vif_sc, uint8_t *enaddr, struct virtif_user **viup) { struct rte_eth_conf portconf; struct rte_eth_link link; struct ether_addr ea; struct virtif_user *viu; unsigned long tmp; char *ep; int rv = EINVAL; /* XXX: not very accurate ;) */ viu = malloc(sizeof(*viu)); memset(viu, 0, sizeof(*viu)); viu->viu_devstr = strdup(devstr); viu->viu_virtifsc = vif_sc; tmp = strtoul(devstr, &ep, 10); if (*ep != '\0') OUT("invalid dev string"); if (tmp > 255) OUT("DPDK port id out of range"); viu->viu_port_id = tmp; if (viu->viu_port_id >= rte_eth_dev_count()) { rv = -ENODEV; OUT("DPDK port not initialized"); } memset(&portconf, 0, sizeof(portconf)); if ((rv = rte_eth_dev_configure(viu->viu_port_id, NQUEUE, NQUEUE, &portconf)) < 0) OUT("configure device"); if ((rv = rte_eth_rx_queue_setup(viu->viu_port_id, 0, NDESCRX, 0, &rxconf, mbpool_rx)) <0) OUT("rx queue setup"); if ((rv = rte_eth_tx_queue_setup(viu->viu_port_id, 0, NDESCTX, 0, &txconf)) < 0) OUT("tx queue setup"); if ((rv = rte_eth_dev_start(viu->viu_port_id)) < 0) OUT("device start"); rte_eth_link_get(viu->viu_port_id, &link); if (!link.link_status) { ifwarn(viu, "link down"); } rte_eth_promiscuous_enable(viu->viu_port_id); rte_eth_macaddr_get(viu->viu_port_id, &ea); memcpy(enaddr, ea.addr_bytes, ETHER_ADDR_LEN); rv = pthread_create(&viu->viu_rcvpt, NULL, receiver, viu); out: /* XXX: well this isn't much of an unrolling ... */ if (rv != 0) free(viu); else *viup = viu; return rumpuser_component_errtrans(-rv); }
int main(int argc, char **argv) { //struct lcore_queue_conf *qconf = NULL; //struct rte_eth_dev_info dev_info; struct lcore_env** envs; int ret; uint8_t n_ports; unsigned lcore_count; ret = rte_eal_init(argc, argv); if (ret < 0) rte_exit(EXIT_FAILURE, "Invalid EAL arguments\n"); argc -= ret; argv += ret; ret = l2sw_parse_args(argc, argv); if (ret < 0) rte_exit(EXIT_FAILURE, "Invalid MARIO arguments\n"); lcore_count = rte_lcore_count(); n_ports = rte_eth_dev_count(); //RTE_LOG(INFO, MARIO, "Find %u logical cores\n" , lcore_count); mbuf_pool = rte_mempool_create("mbuf_pool", NB_MBUF, MBUF_SIZE, 32, sizeof(struct rte_pktmbuf_pool_private), rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, NULL, rte_socket_id(), 0); if (mbuf_pool == NULL) rte_exit(EXIT_FAILURE, "Cannot init mbuf pool\n"); // init route_table route_table = create_route_table(ROUTE_ENTRY_SIZE); add_staticroute(route_table); // init arp_table arp_table = create_arp_table(ARP_ENTRY_SIZE); n_ports = rte_eth_dev_count(); if (n_ports == 0) rte_exit(EXIT_FAILURE, "No Ethernet ports - byte\n"); //RTE_LOG(INFO, MARIO, "Find %u ethernet ports\n", n_ports); if (n_ports > RTE_MAX_ETHPORTS) n_ports = RTE_MAX_ETHPORTS; /* Each logical core is assigned a dedicated TX queue on each port. */ /* for(uint8_t port_id = 0; port_id < n_ports; port_id++) { rte_eth_dev_info_get(port_id, &dev_info); } */ /* Initialize the port/queue configuration of each logical core */ /* for(uint8_t port_id = 0; port_id < n_ports; port_id++) { ; } */ /* Initialize lcore_env */ envs = (struct lcore_env**) rte_malloc(NULL,sizeof(struct lcore_env*),0); if (envs == NULL) rte_exit(EXIT_FAILURE, "Cannot allocate memory for core envs\n"); uint8_t lcore_id; for (lcore_id = 0; lcore_id < lcore_count; lcore_id++) { struct lcore_env* env; env = (struct lcore_env*) rte_malloc(NULL,sizeof(struct lcore_env) + sizeof(struct mbuf_table) *n_ports,0); if (env == NULL) rte_exit(EXIT_FAILURE, "Cannot allocate memory for %u core env\n", lcore_id); env->n_port = n_ports; env->lcore_id = lcore_id; memset(env->tx_mbufs, 0, sizeof(struct mbuf_table) * n_ports); envs[lcore_id] = env; } /* Initialise each port */ uint8_t port_id; for(port_id = 0; port_id < n_ports; port_id++) { //RTE_LOG(INFO, MARIO, "Initializing port %u...", port_id); fflush(stdout); ret = rte_eth_dev_configure(port_id, lcore_count, lcore_count, &port_conf); if (ret < 0) rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%u\n", ret, (unsigned)port_id); //RTE_LOG(INFO, MARIO, "done\n"); rte_eth_macaddr_get(port_id, &port2eth[port_id]); /* init one RX queue */ uint8_t core_id; for (core_id = 0; core_id < lcore_count; core_id++) { ret = rte_eth_rx_queue_setup(port_id, core_id, nb_rxd, rte_eth_dev_socket_id(port_id), NULL, mbuf_pool); if (ret < 0) rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup:err=%d, port=%u queue=%u\n", ret, (unsigned) port_id, (unsigned) core_id); } /* init one TX queue */ for (core_id = 0; core_id < lcore_count; core_id++) { ret = rte_eth_tx_queue_setup(port_id, core_id, nb_txd, rte_eth_dev_socket_id(port_id), NULL); if (ret < 0) rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup:err=%d, port=%u queue=%u\n", ret, (unsigned) port_id, (unsigned) core_id); } /* Start device */ ret = rte_eth_dev_start(port_id); if (ret < 0) rte_exit(EXIT_FAILURE, "rte_eth_dev_start:err=%d, port=%u\n", ret, (unsigned) port_id); rte_eth_promiscuous_enable(port_id); /*RTE_LOG(INFO, MARIO, "Port %u, MAC address %02x:%02x:%02x:%02x:%02x:%02x\n\n", port_id, port2eth[port_id].addr_bytes[0], port2eth[port_id].addr_bytes[1], port2eth[port_id].addr_bytes[2], port2eth[port_id].addr_bytes[3], port2eth[port_id].addr_bytes[4], port2eth[port_id].addr_bytes[5]); */ memset(&port_statistics, 0, sizeof(port_statistics)); } check_all_ports_link_status(n_ports); /* launch per-lcore init on every lcore */ rte_eal_mp_remote_launch(l2sw_launch_one_lcore, envs, CALL_MASTER); { uint8_t lcore_id; RTE_LCORE_FOREACH_SLAVE(lcore_id) { if (rte_eal_wait_lcore(lcore_id) < 0) return -1; } } rte_free(arp_table); rte_free(route_table); return 0; }
/* * Initialises a given port using global settings and with the rx buffers * coming from the mbuf_pool passed as parameter */ static inline int port_init(uint8_t port, struct rte_mempool *mbuf_pool) { struct rte_eth_dev_info dev_info; struct rte_eth_conf port_conf; uint16_t rx_rings, tx_rings = (uint16_t)rte_lcore_count(); const uint16_t rx_ring_size = RTE_TEST_RX_DESC_DEFAULT, tx_ring_size = RTE_TEST_TX_DESC_DEFAULT; int retval; uint16_t q; /* The max pool number from dev_info will be used to validate the pool number specified in cmd line */ rte_eth_dev_info_get (port, &dev_info); /*configure the number of supported virtio devices based on VMDQ limits */ num_devices = dev_info.max_vmdq_pools; num_queues = dev_info.max_rx_queues; retval = validate_num_devices(MAX_DEVICES); if (retval < 0) return retval; /* Get port configuration. */ retval = get_eth_conf(&port_conf, num_devices); if (retval < 0) return retval; if (port >= rte_eth_dev_count()) return -1; rx_rings = (uint16_t)num_queues, /* Configure ethernet device. */ retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf); if (retval != 0) return retval; /* Setup the queues. */ for (q = 0; q < rx_rings; q ++) { retval = rte_eth_rx_queue_setup(port, q, rx_ring_size, rte_eth_dev_socket_id(port), &rx_conf_default, mbuf_pool); if (retval < 0) return retval; } for (q = 0; q < tx_rings; q ++) { retval = rte_eth_tx_queue_setup(port, q, tx_ring_size, rte_eth_dev_socket_id(port), &tx_conf_default); if (retval < 0) return retval; } /* Start the device. */ retval = rte_eth_dev_start(port); if (retval < 0) return retval; rte_eth_macaddr_get(port, &vmdq_ports_eth_addr[port]); RTE_LOG(INFO, VHOST_PORT, "Max virtio devices supported: %u\n", num_devices); RTE_LOG(INFO, VHOST_PORT, "Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8 " %02"PRIx8" %02"PRIx8" %02"PRIx8"\n", (unsigned)port, vmdq_ports_eth_addr[port].addr_bytes[0], vmdq_ports_eth_addr[port].addr_bytes[1], vmdq_ports_eth_addr[port].addr_bytes[2], vmdq_ports_eth_addr[port].addr_bytes[3], vmdq_ports_eth_addr[port].addr_bytes[4], vmdq_ports_eth_addr[port].addr_bytes[5]); return 0; }