/* * This function may be called to calculate driver's optimal polling interval . * Paramters: a pointer to socket structure * Returns: None * */ int get_max_drv_poll_interval_in_micros(int port_num) { struct rte_eth_link rte_eth_link; float bytes_in_sec,bursts_in_sec,bytes_in_burst; rte_eth_link_get(port_num,&rte_eth_link); switch(rte_eth_link.link_speed) { case ETH_LINK_SPEED_10: bytes_in_sec = 10/8; break; case ETH_LINK_SPEED_100: bytes_in_sec = 100/8; break; case ETH_LINK_SPEED_1000: bytes_in_sec = 1000/8; break; case ETH_LINK_SPEED_10000: bytes_in_sec = 10000/8; break; default: bytes_in_sec = 10000/8; } if(rte_eth_link.link_duplex == ETH_LINK_HALF_DUPLEX) bytes_in_sec /= 2; bytes_in_sec *= 1024*1024;/* x1M*/ /*MTU*BURST_SIZE*/ bytes_in_burst = 1448*MAX_PKT_BURST; bursts_in_sec = bytes_in_sec / bytes_in_burst; /* micros in sec div burst in sec = max poll interval in micros */ return (int)(1000000/bursts_in_sec)/2/*safe side*/; /* casted to int, is not so large */ }
/** * Initialise an individual port: * - configure number of rx and tx rings * - set up each rx ring, to pull from the main mbuf pool * - set up each tx ring * - start the port and report its status to stdout */ static int init_port(uint8_t port_num) { /* for port configuration all features are off by default */ const struct rte_eth_conf port_conf = { .rxmode = { .mq_mode = ETH_RSS } }; const uint16_t rx_rings = 1, tx_rings = num_clients; const uint16_t rx_ring_size = RTE_MP_RX_DESC_DEFAULT; const uint16_t tx_ring_size = RTE_MP_TX_DESC_DEFAULT; struct rte_eth_link link; uint16_t q; int retval; printf("Port %u init ... ", (unsigned)port_num); fflush(stdout); /* Standard DPDK port initialisation - config port, then set up * rx and tx rings */ if ((retval = rte_eth_dev_configure(port_num, rx_rings, tx_rings, &port_conf)) != 0) return retval; for (q = 0; q < rx_rings; q++) { retval = rte_eth_rx_queue_setup(port_num, q, rx_ring_size, SOCKET0, &rx_conf_default, pktmbuf_pool); if (retval < 0) return retval; } for ( q = 0; q < tx_rings; q ++ ) { retval = rte_eth_tx_queue_setup(port_num, q, tx_ring_size, SOCKET0, &tx_conf_default); if (retval < 0) return retval; } rte_eth_promiscuous_enable(port_num); retval = rte_eth_dev_start(port_num); if (retval < 0) return retval; printf( "done: "); /* get link status */ rte_eth_link_get(port_num, &link); if (link.link_status) { printf(" Link Up - speed %u Mbps - %s\n", (uint32_t) link.link_speed, (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? ("full-duplex") : ("half-duplex\n")); } else { printf(" Link Down\n"); } return 0; }
int VIFHYPER_CREATE(const char *devstr, struct virtif_sc *vif_sc, uint8_t *enaddr, struct virtif_user **viup) { struct rte_eth_conf portconf; struct rte_eth_link link; struct ether_addr ea; struct virtif_user *viu; int rv = EINVAL; /* XXX: not very accurate ;) */ viu = malloc(sizeof(*viu)); memset(viu, 0, sizeof(*viu)); viu->viu_devstr = strdup(devstr); viu->viu_virtifsc = vif_sc; /* this is here only for simplicity */ if ((rv = globalinit(viu)) != 0) goto out; memset(&portconf, 0, sizeof(portconf)); if ((rv = rte_eth_dev_configure(IF_PORTID, NQUEUE, NQUEUE, &portconf)) < 0) OUT("configure device"); if ((rv = rte_eth_rx_queue_setup(IF_PORTID, 0, NDESCRX, 0, &rxconf, mbpool_rx)) <0) OUT("rx queue setup"); if ((rv = rte_eth_tx_queue_setup(IF_PORTID, 0, NDESCTX, 0, &txconf)) < 0) OUT("tx queue setup"); if ((rv = rte_eth_dev_start(IF_PORTID)) < 0) OUT("device start"); rte_eth_link_get(IF_PORTID, &link); if (!link.link_status) { ifwarn(viu, "link down"); } rte_eth_promiscuous_enable(IF_PORTID); rte_eth_macaddr_get(IF_PORTID, &ea); memcpy(enaddr, ea.addr_bytes, ETHER_ADDR_LEN); rv = pthread_create(&viu->viu_rcvpt, NULL, receiver, viu); out: /* XXX: well this isn't much of an unrolling ... */ if (rv != 0) free(viu); else *viup = viu; return rumpuser_component_errtrans(-rv); }
void rw_piot_get_link_info(rw_piot_api_handle_t api_handle, rw_piot_link_info_t *eth_link_info, int wait) { rw_piot_device_t *rw_piot_dev = RWPIOT_GET_DEVICE(api_handle); ASSERT(RWPIOT_VALID_DEVICE(rw_piot_dev)); if (NULL == rw_piot_dev || NULL == eth_link_info) { RW_PIOT_LOG(RTE_LOG_ERR, "PIOT Could not find device by handle or invalid input param\n"); return; } if (wait){ rte_eth_link_get(rw_piot_dev->rte_port_id, eth_link_info); }else{ rte_eth_link_get_nowait(rw_piot_dev->rte_port_id, eth_link_info); } return; }
static struct rte_sched_port * app_init_sched_port(uint32_t portid, uint32_t socketid) { static char port_name[32]; /* static as referenced from global port_params*/ struct rte_eth_link link; struct rte_sched_port *port = NULL; uint32_t pipe, subport; int err; rte_eth_link_get((uint8_t)portid, &link); port_params.socket = socketid; port_params.rate = (uint64_t) link.link_speed * 1000 * 1000 / 8; rte_snprintf(port_name, sizeof(port_name), "port_%d", portid); port_params.name = port_name; port = rte_sched_port_config(&port_params); if (port == NULL){ rte_exit(EXIT_FAILURE, "Unable to config sched port\n"); } for (subport = 0; subport < port_params.n_subports_per_port; subport ++) { err = rte_sched_subport_config(port, subport, &subport_params[subport]); if (err) { rte_exit(EXIT_FAILURE, "Unable to config sched subport %u, err=%d\n", subport, err); } for (pipe = 0; pipe < port_params.n_pipes_per_subport; pipe ++) { if (app_pipe_to_profile[subport][pipe] != -1) { err = rte_sched_pipe_config(port, subport, pipe, app_pipe_to_profile[subport][pipe]); if (err) { rte_exit(EXIT_FAILURE, "Unable to config sched pipe %u " "for profile %d, err=%d\n", pipe, app_pipe_to_profile[subport][pipe], err); } } } } return port; }
int rumpcomp_virtif_create(int devnum, struct virtif_user **viup) { struct rte_eth_conf portconf; struct rte_eth_link link; int rv = EINVAL; /* XXX: not very accurate ;) */ /* this is here only for simplicity */ if (globalinit() != 0) goto out; memset(&portconf, 0, sizeof(portconf)); if (rte_eth_dev_configure(IF_PORTID, NQUEUE, NQUEUE, &portconf) < 0) OUT("configure device\n"); if (rte_eth_rx_queue_setup(IF_PORTID, 0, NDESC, 0, &rxconf, mbpool) <0) OUT("rx queue setup\n"); if (rte_eth_tx_queue_setup(IF_PORTID, 0, NDESC, 0, &txconf) < 0) OUT("tx queue setup\n"); if (rte_eth_dev_start(IF_PORTID) < 0) OUT("device start\n"); rte_eth_link_get(IF_PORTID, &link); if (!link.link_status) { printf("warning: virt link down\n"); } rte_eth_promiscuous_enable(IF_PORTID); rv = 0; out: *viup = NULL; /* not used by the driver in its current state */ return rv; }
int32_t interfaceSetup(void) { uint8_t portIndex = 0, portCount = rte_eth_dev_count(); int32_t ret = 0, socket_id = -1; struct rte_eth_link link; for (portIndex = 0; portIndex < portCount; portIndex++) { /* fetch the socket Id to which the port the mapped */ for (ret = 0; ret < MAX_NUMANODE; ret++) { if (numaNodeInfo[ret].intfTotal) { if (numaNodeInfo[ret].intfAvail & (1 << portIndex)) { socket_id = ret; break; } } } memset(&link, 0x00, sizeof(struct rte_eth_link)); ret = rte_eth_dev_configure(portIndex, 1, 1, &portConf); if (unlikely(ret < 0)) { rte_panic("ERROR: Dev Configure\n"); return -1; } ret = rte_eth_rx_queue_setup(portIndex, 0, RTE_TEST_RX_DESC_DEFAULT, 0, NULL, numaNodeInfo[socket_id].rx[0]); if (unlikely(ret < 0)) { rte_panic("ERROR: Rx Queue Setup\n"); return -2; } ret = rte_eth_tx_queue_setup(portIndex, 0, RTE_TEST_TX_DESC_DEFAULT, 0, NULL); if (unlikely(ret < 0)) { rte_panic("ERROR: Tx Queue Setup\n"); return -3; } rte_eth_link_get(portIndex, &link); if (unlikely(link.link_duplex != ETH_LINK_FULL_DUPLEX)) { printf(" port:%u; duplex:%s, status:%s", (unsigned) portIndex, (link.link_duplex == ETH_LINK_FULL_DUPLEX)?"Full":"half", (link.link_status == 1)?"up":"down"); /*return -1; Note: there is chance if interface is not connected or speed does not match */ } rte_eth_promiscuous_enable(portIndex); rte_eth_dev_start(portIndex); } return 0; }
int VIFHYPER_CREATE(const char *devstr, struct virtif_sc *vif_sc, uint8_t *enaddr, struct virtif_user **viup) { struct rte_eth_conf portconf; struct rte_eth_link link; struct ether_addr ea; struct virtif_user *viu; unsigned long tmp; char *ep; int rv = EINVAL; /* XXX: not very accurate ;) */ viu = malloc(sizeof(*viu)); memset(viu, 0, sizeof(*viu)); viu->viu_devstr = strdup(devstr); viu->viu_virtifsc = vif_sc; tmp = strtoul(devstr, &ep, 10); if (*ep != '\0') OUT("invalid dev string"); if (tmp > 255) OUT("DPDK port id out of range"); viu->viu_port_id = tmp; if (viu->viu_port_id >= rte_eth_dev_count()) { rv = -ENODEV; OUT("DPDK port not initialized"); } memset(&portconf, 0, sizeof(portconf)); if ((rv = rte_eth_dev_configure(viu->viu_port_id, NQUEUE, NQUEUE, &portconf)) < 0) OUT("configure device"); if ((rv = rte_eth_rx_queue_setup(viu->viu_port_id, 0, NDESCRX, 0, &rxconf, mbpool_rx)) <0) OUT("rx queue setup"); if ((rv = rte_eth_tx_queue_setup(viu->viu_port_id, 0, NDESCTX, 0, &txconf)) < 0) OUT("tx queue setup"); if ((rv = rte_eth_dev_start(viu->viu_port_id)) < 0) OUT("device start"); rte_eth_link_get(viu->viu_port_id, &link); if (!link.link_status) { ifwarn(viu, "link down"); } rte_eth_promiscuous_enable(viu->viu_port_id); rte_eth_macaddr_get(viu->viu_port_id, &ea); memcpy(enaddr, ea.addr_bytes, ETHER_ADDR_LEN); rv = pthread_create(&viu->viu_rcvpt, NULL, receiver, viu); out: /* XXX: well this isn't much of an unrolling ... */ if (rv != 0) free(viu); else *viup = viu; return rumpuser_component_errtrans(-rv); }
static int app_init_port(uint8_t portid, struct rte_mempool *mp) { int ret; struct rte_eth_link link; struct rte_eth_rxconf rx_conf; struct rte_eth_txconf tx_conf; /* check if port already initialized (multistream configuration) */ if (app_inited_port_mask & (1u << portid)) return 0; rx_conf.rx_thresh.pthresh = rx_thresh.pthresh; rx_conf.rx_thresh.hthresh = rx_thresh.hthresh; rx_conf.rx_thresh.wthresh = rx_thresh.wthresh; rx_conf.rx_free_thresh = 32; rx_conf.rx_drop_en = 0; tx_conf.tx_thresh.pthresh = tx_thresh.pthresh; tx_conf.tx_thresh.hthresh = tx_thresh.hthresh; tx_conf.tx_thresh.wthresh = tx_thresh.wthresh; tx_conf.tx_free_thresh = 0; tx_conf.tx_rs_thresh = 0; tx_conf.txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | ETH_TXQ_FLAGS_NOOFFLOADS; /* init port */ RTE_LOG(INFO, APP, "Initializing port %hu... ", portid); fflush(stdout); ret = rte_eth_dev_configure(portid, 1, 1, &port_conf); if (ret < 0) rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%hu\n", ret, portid); /* init one RX queue */ fflush(stdout); ret = rte_eth_rx_queue_setup(portid, 0, (uint16_t)ring_conf.rx_size, rte_eth_dev_socket_id(portid), &rx_conf, mp); if (ret < 0) rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d, port=%hu\n", ret, portid); /* init one TX queue */ fflush(stdout); ret = rte_eth_tx_queue_setup(portid, 0, (uint16_t)ring_conf.tx_size, rte_eth_dev_socket_id(portid), &tx_conf); if (ret < 0) rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d, " "port=%hu queue=%d\n", ret, portid, 0); /* Start device */ ret = rte_eth_dev_start(portid); if (ret < 0) rte_exit(EXIT_FAILURE, "rte_pmd_port_start: err=%d, port=%hu\n", ret, portid); printf("done: "); /* get link status */ rte_eth_link_get(portid, &link); if (link.link_status) { printf(" Link Up - speed %u Mbps - %s\n", (uint32_t) link.link_speed, (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? ("full-duplex") : ("half-duplex\n")); } else { printf(" Link Down\n"); } rte_eth_promiscuous_enable(portid); /* mark port as initialized */ app_inited_port_mask |= 1u << portid; return 0; }
static inline int app_link_filter_arp_add(struct app_link_params *link) { struct rte_eth_ethertype_filter filter = { .ether_type = ETHER_TYPE_ARP, .flags = 0, .queue = link->arp_q, }; return rte_eth_dev_filter_ctrl(link->pmd_id, RTE_ETH_FILTER_ETHERTYPE, RTE_ETH_FILTER_ADD, &filter); } static inline int app_link_filter_tcp_syn_add(struct app_link_params *link) { struct rte_eth_syn_filter filter = { .hig_pri = 1, .queue = link->tcp_syn_q, }; return rte_eth_dev_filter_ctrl(link->pmd_id, RTE_ETH_FILTER_SYN, RTE_ETH_FILTER_ADD, &filter); } static inline int app_link_filter_ip_add(struct app_link_params *l1, struct app_link_params *l2) { struct rte_eth_ntuple_filter filter = { .flags = RTE_5TUPLE_FLAGS, .dst_ip = rte_bswap32(l2->ip), .dst_ip_mask = UINT32_MAX, /* Enable */ .src_ip = 0, .src_ip_mask = 0, /* Disable */ .dst_port = 0, .dst_port_mask = 0, /* Disable */ .src_port = 0, .src_port_mask = 0, /* Disable */ .proto = 0, .proto_mask = 0, /* Disable */ .tcp_flags = 0, .priority = 1, /* Lowest */ .queue = l1->ip_local_q, }; return rte_eth_dev_filter_ctrl(l1->pmd_id, RTE_ETH_FILTER_NTUPLE, RTE_ETH_FILTER_ADD, &filter); } static inline int app_link_filter_ip_del(struct app_link_params *l1, struct app_link_params *l2) { struct rte_eth_ntuple_filter filter = { .flags = RTE_5TUPLE_FLAGS, .dst_ip = rte_bswap32(l2->ip), .dst_ip_mask = UINT32_MAX, /* Enable */ .src_ip = 0, .src_ip_mask = 0, /* Disable */ .dst_port = 0, .dst_port_mask = 0, /* Disable */ .src_port = 0, .src_port_mask = 0, /* Disable */ .proto = 0, .proto_mask = 0, /* Disable */ .tcp_flags = 0, .priority = 1, /* Lowest */ .queue = l1->ip_local_q, }; return rte_eth_dev_filter_ctrl(l1->pmd_id, RTE_ETH_FILTER_NTUPLE, RTE_ETH_FILTER_DELETE, &filter); } static inline int app_link_filter_tcp_add(struct app_link_params *l1, struct app_link_params *l2) { struct rte_eth_ntuple_filter filter = { .flags = RTE_5TUPLE_FLAGS, .dst_ip = rte_bswap32(l2->ip), .dst_ip_mask = UINT32_MAX, /* Enable */ .src_ip = 0, .src_ip_mask = 0, /* Disable */ .dst_port = 0, .dst_port_mask = 0, /* Disable */ .src_port = 0, .src_port_mask = 0, /* Disable */ .proto = IPPROTO_TCP, .proto_mask = UINT8_MAX, /* Enable */ .tcp_flags = 0, .priority = 2, /* Higher priority than IP */ .queue = l1->tcp_local_q, }; return rte_eth_dev_filter_ctrl(l1->pmd_id, RTE_ETH_FILTER_NTUPLE, RTE_ETH_FILTER_ADD, &filter); } static inline int app_link_filter_tcp_del(struct app_link_params *l1, struct app_link_params *l2) { struct rte_eth_ntuple_filter filter = { .flags = RTE_5TUPLE_FLAGS, .dst_ip = rte_bswap32(l2->ip), .dst_ip_mask = UINT32_MAX, /* Enable */ .src_ip = 0, .src_ip_mask = 0, /* Disable */ .dst_port = 0, .dst_port_mask = 0, /* Disable */ .src_port = 0, .src_port_mask = 0, /* Disable */ .proto = IPPROTO_TCP, .proto_mask = UINT8_MAX, /* Enable */ .tcp_flags = 0, .priority = 2, /* Higher priority than IP */ .queue = l1->tcp_local_q, }; return rte_eth_dev_filter_ctrl(l1->pmd_id, RTE_ETH_FILTER_NTUPLE, RTE_ETH_FILTER_DELETE, &filter); } static inline int app_link_filter_udp_add(struct app_link_params *l1, struct app_link_params *l2) { struct rte_eth_ntuple_filter filter = { .flags = RTE_5TUPLE_FLAGS, .dst_ip = rte_bswap32(l2->ip), .dst_ip_mask = UINT32_MAX, /* Enable */ .src_ip = 0, .src_ip_mask = 0, /* Disable */ .dst_port = 0, .dst_port_mask = 0, /* Disable */ .src_port = 0, .src_port_mask = 0, /* Disable */ .proto = IPPROTO_UDP, .proto_mask = UINT8_MAX, /* Enable */ .tcp_flags = 0, .priority = 2, /* Higher priority than IP */ .queue = l1->udp_local_q, }; return rte_eth_dev_filter_ctrl(l1->pmd_id, RTE_ETH_FILTER_NTUPLE, RTE_ETH_FILTER_ADD, &filter); } static inline int app_link_filter_udp_del(struct app_link_params *l1, struct app_link_params *l2) { struct rte_eth_ntuple_filter filter = { .flags = RTE_5TUPLE_FLAGS, .dst_ip = rte_bswap32(l2->ip), .dst_ip_mask = UINT32_MAX, /* Enable */ .src_ip = 0, .src_ip_mask = 0, /* Disable */ .dst_port = 0, .dst_port_mask = 0, /* Disable */ .src_port = 0, .src_port_mask = 0, /* Disable */ .proto = IPPROTO_UDP, .proto_mask = UINT8_MAX, /* Enable */ .tcp_flags = 0, .priority = 2, /* Higher priority than IP */ .queue = l1->udp_local_q, }; return rte_eth_dev_filter_ctrl(l1->pmd_id, RTE_ETH_FILTER_NTUPLE, RTE_ETH_FILTER_DELETE, &filter); } static inline int app_link_filter_sctp_add(struct app_link_params *l1, struct app_link_params *l2) { struct rte_eth_ntuple_filter filter = { .flags = RTE_5TUPLE_FLAGS, .dst_ip = rte_bswap32(l2->ip), .dst_ip_mask = UINT32_MAX, /* Enable */ .src_ip = 0, .src_ip_mask = 0, /* Disable */ .dst_port = 0, .dst_port_mask = 0, /* Disable */ .src_port = 0, .src_port_mask = 0, /* Disable */ .proto = IPPROTO_SCTP, .proto_mask = UINT8_MAX, /* Enable */ .tcp_flags = 0, .priority = 2, /* Higher priority than IP */ .queue = l1->sctp_local_q, }; return rte_eth_dev_filter_ctrl(l1->pmd_id, RTE_ETH_FILTER_NTUPLE, RTE_ETH_FILTER_ADD, &filter); } static inline int app_link_filter_sctp_del(struct app_link_params *l1, struct app_link_params *l2) { struct rte_eth_ntuple_filter filter = { .flags = RTE_5TUPLE_FLAGS, .dst_ip = rte_bswap32(l2->ip), .dst_ip_mask = UINT32_MAX, /* Enable */ .src_ip = 0, .src_ip_mask = 0, /* Disable */ .dst_port = 0, .dst_port_mask = 0, /* Disable */ .src_port = 0, .src_port_mask = 0, /* Disable */ .proto = IPPROTO_SCTP, .proto_mask = UINT8_MAX, /* Enable */ .tcp_flags = 0, .priority = 2, /* Higher priority than IP */ .queue = l1->sctp_local_q, }; return rte_eth_dev_filter_ctrl(l1->pmd_id, RTE_ETH_FILTER_NTUPLE, RTE_ETH_FILTER_DELETE, &filter); } static void app_link_set_arp_filter(struct app_params *app, struct app_link_params *cp) { if (cp->arp_q != 0) { int status = app_link_filter_arp_add(cp); APP_LOG(app, LOW, "%s (%" PRIu32 "): " "Adding ARP filter (queue = %" PRIu32 ")", cp->name, cp->pmd_id, cp->arp_q); if (status) rte_panic("%s (%" PRIu32 "): " "Error adding ARP filter " "(queue = %" PRIu32 ") (%" PRId32 ")\n", cp->name, cp->pmd_id, cp->arp_q, status); } } static void app_link_set_tcp_syn_filter(struct app_params *app, struct app_link_params *cp) { if (cp->tcp_syn_q != 0) { int status = app_link_filter_tcp_syn_add(cp); APP_LOG(app, LOW, "%s (%" PRIu32 "): " "Adding TCP SYN filter (queue = %" PRIu32 ")", cp->name, cp->pmd_id, cp->tcp_syn_q); if (status) rte_panic("%s (%" PRIu32 "): " "Error adding TCP SYN filter " "(queue = %" PRIu32 ") (%" PRId32 ")\n", cp->name, cp->pmd_id, cp->tcp_syn_q, status); } } void app_link_up_internal(struct app_params *app, struct app_link_params *cp) { uint32_t i; int status; /* For each link, add filters for IP of current link */ if (cp->ip != 0) { for (i = 0; i < app->n_links; i++) { struct app_link_params *p = &app->link_params[i]; /* IP */ if (p->ip_local_q != 0) { int status = app_link_filter_ip_add(p, cp); APP_LOG(app, LOW, "%s (%" PRIu32 "): " "Adding IP filter (queue= %" PRIu32 ", IP = 0x%08" PRIx32 ")", p->name, p->pmd_id, p->ip_local_q, cp->ip); if (status) rte_panic("%s (%" PRIu32 "): " "Error adding IP " "filter (queue= %" PRIu32 ", " "IP = 0x%08" PRIx32 ") (%" PRId32 ")\n", p->name, p->pmd_id, p->ip_local_q, cp->ip, status); } /* TCP */ if (p->tcp_local_q != 0) { int status = app_link_filter_tcp_add(p, cp); APP_LOG(app, LOW, "%s (%" PRIu32 "): " "Adding TCP filter " "(queue = %" PRIu32 ", IP = 0x%08" PRIx32 ")", p->name, p->pmd_id, p->tcp_local_q, cp->ip); if (status) rte_panic("%s (%" PRIu32 "): " "Error adding TCP " "filter (queue = %" PRIu32 ", " "IP = 0x%08" PRIx32 ") (%" PRId32 ")\n", p->name, p->pmd_id, p->tcp_local_q, cp->ip, status); } /* UDP */ if (p->udp_local_q != 0) { int status = app_link_filter_udp_add(p, cp); APP_LOG(app, LOW, "%s (%" PRIu32 "): " "Adding UDP filter " "(queue = %" PRIu32 ", IP = 0x%08" PRIx32 ")", p->name, p->pmd_id, p->udp_local_q, cp->ip); if (status) rte_panic("%s (%" PRIu32 "): " "Error adding UDP " "filter (queue = %" PRIu32 ", " "IP = 0x%08" PRIx32 ") (%" PRId32 ")\n", p->name, p->pmd_id, p->udp_local_q, cp->ip, status); } /* SCTP */ if (p->sctp_local_q != 0) { int status = app_link_filter_sctp_add(p, cp); APP_LOG(app, LOW, "%s (%" PRIu32 "): Adding SCTP filter " "(queue = %" PRIu32 ", IP = 0x%08" PRIx32 ")", p->name, p->pmd_id, p->sctp_local_q, cp->ip); if (status) rte_panic("%s (%" PRIu32 "): " "Error adding SCTP " "filter (queue = %" PRIu32 ", " "IP = 0x%08" PRIx32 ") (%" PRId32 ")\n", p->name, p->pmd_id, p->sctp_local_q, cp->ip, status); } } } /* PMD link up */ status = rte_eth_dev_set_link_up(cp->pmd_id); /* Do not panic if PMD does not provide link up functionality */ if (status < 0 && status != -ENOTSUP) rte_panic("%s (%" PRIu32 "): PMD set link up error %" PRId32 "\n", cp->name, cp->pmd_id, status); /* Mark link as UP */ cp->state = 1; } void app_link_down_internal(struct app_params *app, struct app_link_params *cp) { uint32_t i; int status; /* PMD link down */ status = rte_eth_dev_set_link_down(cp->pmd_id); /* Do not panic if PMD does not provide link down functionality */ if (status < 0 && status != -ENOTSUP) rte_panic("%s (%" PRIu32 "): PMD set link down error %" PRId32 "\n", cp->name, cp->pmd_id, status); /* Mark link as DOWN */ cp->state = 0; /* Return if current link IP is not valid */ if (cp->ip == 0) return; /* For each link, remove filters for IP of current link */ for (i = 0; i < app->n_links; i++) { struct app_link_params *p = &app->link_params[i]; /* IP */ if (p->ip_local_q != 0) { int status = app_link_filter_ip_del(p, cp); APP_LOG(app, LOW, "%s (%" PRIu32 "): Deleting IP filter " "(queue = %" PRIu32 ", IP = 0x%" PRIx32 ")", p->name, p->pmd_id, p->ip_local_q, cp->ip); if (status) rte_panic("%s (%" PRIu32 "): Error deleting IP filter " "(queue = %" PRIu32 ", IP = 0x%" PRIx32 ") (%" PRId32 ")\n", p->name, p->pmd_id, p->ip_local_q, cp->ip, status); } /* TCP */ if (p->tcp_local_q != 0) { int status = app_link_filter_tcp_del(p, cp); APP_LOG(app, LOW, "%s (%" PRIu32 "): Deleting TCP filter " "(queue = %" PRIu32 ", IP = 0x%" PRIx32 ")", p->name, p->pmd_id, p->tcp_local_q, cp->ip); if (status) rte_panic("%s (%" PRIu32 "): Error deleting TCP filter " "(queue = %" PRIu32 ", IP = 0x%" PRIx32 ") (%" PRId32 ")\n", p->name, p->pmd_id, p->tcp_local_q, cp->ip, status); } /* UDP */ if (p->udp_local_q != 0) { int status = app_link_filter_udp_del(p, cp); APP_LOG(app, LOW, "%s (%" PRIu32 "): Deleting UDP filter " "(queue = %" PRIu32 ", IP = 0x%" PRIx32 ")", p->name, p->pmd_id, p->udp_local_q, cp->ip); if (status) rte_panic("%s (%" PRIu32 "): Error deleting UDP filter " "(queue = %" PRIu32 ", IP = 0x%" PRIx32 ") (%" PRId32 ")\n", p->name, p->pmd_id, p->udp_local_q, cp->ip, status); } /* SCTP */ if (p->sctp_local_q != 0) { int status = app_link_filter_sctp_del(p, cp); APP_LOG(app, LOW, "%s (%" PRIu32 "): Deleting SCTP filter " "(queue = %" PRIu32 ", IP = 0x%" PRIx32 ")", p->name, p->pmd_id, p->sctp_local_q, cp->ip); if (status) rte_panic("%s (%" PRIu32 "): Error deleting SCTP filter " "(queue = %" PRIu32 ", IP = 0x%" PRIx32 ") (%" PRId32 ")\n", p->name, p->pmd_id, p->sctp_local_q, cp->ip, status); } } } static void app_check_link(struct app_params *app) { uint32_t all_links_up, i; all_links_up = 1; for (i = 0; i < app->n_links; i++) { struct app_link_params *p = &app->link_params[i]; struct rte_eth_link link_params; memset(&link_params, 0, sizeof(link_params)); rte_eth_link_get(p->pmd_id, &link_params); APP_LOG(app, HIGH, "%s (%" PRIu32 ") (%" PRIu32 " Gbps) %s", p->name, p->pmd_id, link_params.link_speed / 1000, link_params.link_status ? "UP" : "DOWN"); if (link_params.link_status == ETH_LINK_DOWN) all_links_up = 0; } if (all_links_up == 0) rte_panic("Some links are DOWN\n"); } static uint32_t is_any_swq_frag_or_ras(struct app_params *app) { uint32_t i; for (i = 0; i < app->n_pktq_swq; i++) { struct app_pktq_swq_params *p = &app->swq_params[i]; if ((p->ipv4_frag == 1) || (p->ipv6_frag == 1) || (p->ipv4_ras == 1) || (p->ipv6_ras == 1)) return 1; } return 0; } static void app_init_link_frag_ras(struct app_params *app) { uint32_t i; if (is_any_swq_frag_or_ras(app)) { for (i = 0; i < app->n_pktq_hwq_out; i++) { struct app_pktq_hwq_out_params *p_txq = &app->hwq_out_params[i]; p_txq->conf.txq_flags &= ~ETH_TXQ_FLAGS_NOMULTSEGS; } } } static inline int app_get_cpu_socket_id(uint32_t pmd_id) { int status = rte_eth_dev_socket_id(pmd_id); return (status != SOCKET_ID_ANY) ? status : 0; } static inline int app_link_rss_enabled(struct app_link_params *cp) { return (cp->n_rss_qs) ? 1 : 0; } static void app_link_rss_setup(struct app_link_params *cp) { struct rte_eth_dev_info dev_info; struct rte_eth_rss_reta_entry64 reta_conf[APP_RETA_SIZE_MAX]; uint32_t i; int status; /* Get RETA size */ memset(&dev_info, 0, sizeof(dev_info)); rte_eth_dev_info_get(cp->pmd_id, &dev_info); if (dev_info.reta_size == 0) rte_panic("%s (%u): RSS setup error (null RETA size)\n", cp->name, cp->pmd_id); if (dev_info.reta_size > ETH_RSS_RETA_SIZE_512) rte_panic("%s (%u): RSS setup error (RETA size too big)\n", cp->name, cp->pmd_id); /* Setup RETA contents */ memset(reta_conf, 0, sizeof(reta_conf)); for (i = 0; i < dev_info.reta_size; i++) reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX; for (i = 0; i < dev_info.reta_size; i++) { uint32_t reta_id = i / RTE_RETA_GROUP_SIZE; uint32_t reta_pos = i % RTE_RETA_GROUP_SIZE; uint32_t rss_qs_pos = i % cp->n_rss_qs; reta_conf[reta_id].reta[reta_pos] = (uint16_t) cp->rss_qs[rss_qs_pos]; } /* RETA update */ status = rte_eth_dev_rss_reta_update(cp->pmd_id, reta_conf, dev_info.reta_size); if (status != 0) rte_panic("%s (%u): RSS setup error (RETA update failed)\n", cp->name, cp->pmd_id); } static void app_init_link_set_config(struct app_link_params *p) { if (p->n_rss_qs) { p->conf.rxmode.mq_mode = ETH_MQ_RX_RSS; p->conf.rx_adv_conf.rss_conf.rss_hf = p->rss_proto_ipv4 | p->rss_proto_ipv6 | p->rss_proto_l2; } } static void app_init_link(struct app_params *app) { uint32_t i; app_init_link_frag_ras(app); for (i = 0; i < app->n_links; i++) { struct app_link_params *p_link = &app->link_params[i]; uint32_t link_id, n_hwq_in, n_hwq_out, j; int status; sscanf(p_link->name, "LINK%" PRIu32, &link_id); n_hwq_in = app_link_get_n_rxq(app, p_link); n_hwq_out = app_link_get_n_txq(app, p_link); app_init_link_set_config(p_link); APP_LOG(app, HIGH, "Initializing %s (%" PRIu32") " "(%" PRIu32 " RXQ, %" PRIu32 " TXQ) ...", p_link->name, p_link->pmd_id, n_hwq_in, n_hwq_out); /* LINK */ status = rte_eth_dev_configure( p_link->pmd_id, n_hwq_in, n_hwq_out, &p_link->conf); if (status < 0) rte_panic("%s (%" PRId32 "): " "init error (%" PRId32 ")\n", p_link->name, p_link->pmd_id, status); rte_eth_macaddr_get(p_link->pmd_id, (struct ether_addr *) &p_link->mac_addr); if (p_link->promisc) rte_eth_promiscuous_enable(p_link->pmd_id); /* RXQ */ for (j = 0; j < app->n_pktq_hwq_in; j++) { struct app_pktq_hwq_in_params *p_rxq = &app->hwq_in_params[j]; uint32_t rxq_link_id, rxq_queue_id; uint16_t nb_rxd = p_rxq->size; sscanf(p_rxq->name, "RXQ%" PRIu32 ".%" PRIu32, &rxq_link_id, &rxq_queue_id); if (rxq_link_id != link_id) continue; status = rte_eth_dev_adjust_nb_rx_tx_desc( p_link->pmd_id, &nb_rxd, NULL); if (status < 0) rte_panic("%s (%" PRIu32 "): " "%s adjust number of Rx descriptors " "error (%" PRId32 ")\n", p_link->name, p_link->pmd_id, p_rxq->name, status); status = rte_eth_rx_queue_setup( p_link->pmd_id, rxq_queue_id, nb_rxd, app_get_cpu_socket_id(p_link->pmd_id), &p_rxq->conf, app->mempool[p_rxq->mempool_id]); if (status < 0) rte_panic("%s (%" PRIu32 "): " "%s init error (%" PRId32 ")\n", p_link->name, p_link->pmd_id, p_rxq->name, status); } /* TXQ */ for (j = 0; j < app->n_pktq_hwq_out; j++) { struct app_pktq_hwq_out_params *p_txq = &app->hwq_out_params[j]; uint32_t txq_link_id, txq_queue_id; uint16_t nb_txd = p_txq->size; sscanf(p_txq->name, "TXQ%" PRIu32 ".%" PRIu32, &txq_link_id, &txq_queue_id); if (txq_link_id != link_id) continue; status = rte_eth_dev_adjust_nb_rx_tx_desc( p_link->pmd_id, NULL, &nb_txd); if (status < 0) rte_panic("%s (%" PRIu32 "): " "%s adjust number of Tx descriptors " "error (%" PRId32 ")\n", p_link->name, p_link->pmd_id, p_txq->name, status); status = rte_eth_tx_queue_setup( p_link->pmd_id, txq_queue_id, nb_txd, app_get_cpu_socket_id(p_link->pmd_id), &p_txq->conf); if (status < 0) rte_panic("%s (%" PRIu32 "): " "%s init error (%" PRId32 ")\n", p_link->name, p_link->pmd_id, p_txq->name, status); } /* LINK START */ status = rte_eth_dev_start(p_link->pmd_id); if (status < 0) rte_panic("Cannot start %s (error %" PRId32 ")\n", p_link->name, status); /* LINK FILTERS */ app_link_set_arp_filter(app, p_link); app_link_set_tcp_syn_filter(app, p_link); if (app_link_rss_enabled(p_link)) app_link_rss_setup(p_link); /* LINK UP */ app_link_up_internal(app, p_link); } app_check_link(app); } static void app_init_swq(struct app_params *app) { uint32_t i; for (i = 0; i < app->n_pktq_swq; i++) { struct app_pktq_swq_params *p = &app->swq_params[i]; unsigned flags = 0; if (app_swq_get_readers(app, p) == 1) flags |= RING_F_SC_DEQ; if (app_swq_get_writers(app, p) == 1) flags |= RING_F_SP_ENQ; APP_LOG(app, HIGH, "Initializing %s...", p->name); app->swq[i] = rte_ring_create( p->name, p->size, p->cpu_socket_id, flags); if (app->swq[i] == NULL) rte_panic("%s init error\n", p->name); } } static void app_init_tm(struct app_params *app) { uint32_t i; for (i = 0; i < app->n_pktq_tm; i++) { struct app_pktq_tm_params *p_tm = &app->tm_params[i]; struct app_link_params *p_link; struct rte_eth_link link_eth_params; struct rte_sched_port *sched; uint32_t n_subports, subport_id; int status; p_link = app_get_link_for_tm(app, p_tm); /* LINK */ rte_eth_link_get(p_link->pmd_id, &link_eth_params); /* TM */ p_tm->sched_port_params.name = p_tm->name; p_tm->sched_port_params.socket = app_get_cpu_socket_id(p_link->pmd_id); p_tm->sched_port_params.rate = (uint64_t) link_eth_params.link_speed * 1000 * 1000 / 8; APP_LOG(app, HIGH, "Initializing %s ...", p_tm->name); sched = rte_sched_port_config(&p_tm->sched_port_params); if (sched == NULL) rte_panic("%s init error\n", p_tm->name); app->tm[i] = sched; /* Subport */ n_subports = p_tm->sched_port_params.n_subports_per_port; for (subport_id = 0; subport_id < n_subports; subport_id++) { uint32_t n_pipes_per_subport, pipe_id; status = rte_sched_subport_config(sched, subport_id, &p_tm->sched_subport_params[subport_id]); if (status) rte_panic("%s subport %" PRIu32 " init error (%" PRId32 ")\n", p_tm->name, subport_id, status); /* Pipe */ n_pipes_per_subport = p_tm->sched_port_params.n_pipes_per_subport; for (pipe_id = 0; pipe_id < n_pipes_per_subport; pipe_id++) { int profile_id = p_tm->sched_pipe_to_profile[ subport_id * APP_MAX_SCHED_PIPES + pipe_id]; if (profile_id == -1) continue; status = rte_sched_pipe_config(sched, subport_id, pipe_id, profile_id); if (status) rte_panic("%s subport %" PRIu32 " pipe %" PRIu32 " (profile %" PRId32 ") " "init error (% " PRId32 ")\n", p_tm->name, subport_id, pipe_id, profile_id, status); } } } } #ifndef RTE_EXEC_ENV_LINUXAPP static void app_init_tap(struct app_params *app) { if (app->n_pktq_tap == 0) return; rte_panic("TAP device not supported.\n"); } #else static void app_init_tap(struct app_params *app) { uint32_t i; for (i = 0; i < app->n_pktq_tap; i++) { struct app_pktq_tap_params *p_tap = &app->tap_params[i]; struct ifreq ifr; int fd, status; APP_LOG(app, HIGH, "Initializing %s ...", p_tap->name); fd = open("/dev/net/tun", O_RDWR | O_NONBLOCK); if (fd < 0) rte_panic("Cannot open file /dev/net/tun\n"); memset(&ifr, 0, sizeof(ifr)); ifr.ifr_flags = IFF_TAP | IFF_NO_PI; /* No packet information */ snprintf(ifr.ifr_name, IFNAMSIZ, "%s", p_tap->name); status = ioctl(fd, TUNSETIFF, (void *) &ifr); if (status < 0) rte_panic("TAP setup error\n"); app->tap[i] = fd; } }
static int test_ethdev_configure(void) { struct rte_eth_conf null_conf; struct rte_eth_link link; memset(&null_conf, 0, sizeof(struct rte_eth_conf)); if ((TX_PORT >= RTE_MAX_ETHPORTS) || (RX_PORT >= RTE_MAX_ETHPORTS)\ || (RXTX_PORT >= RTE_MAX_ETHPORTS)) { printf(" TX/RX port exceed max eth ports\n"); return -1; } if (rte_eth_dev_configure(TX_PORT, 1, 2, &null_conf) < 0) { printf("Configure failed for TX port\n"); return -1; } /* Test queue release */ if (rte_eth_dev_configure(TX_PORT, 1, 1, &null_conf) < 0) { printf("Configure failed for TX port\n"); return -1; } if (rte_eth_dev_configure(RX_PORT, 1, 1, &null_conf) < 0) { printf("Configure failed for RX port\n"); return -1; } if (rte_eth_dev_configure(RXTX_PORT, 1, 1, &null_conf) < 0) { printf("Configure failed for RXTX port\n"); return -1; } if (rte_eth_tx_queue_setup(TX_PORT, 0, RING_SIZE, SOCKET0, NULL) < 0) { printf("TX queue setup failed\n"); return -1; } if (rte_eth_rx_queue_setup(RX_PORT, 0, RING_SIZE, SOCKET0, NULL, mp) < 0) { printf("RX queue setup failed\n"); return -1; } if (rte_eth_tx_queue_setup(RXTX_PORT, 0, RING_SIZE, SOCKET0, NULL) < 0) { printf("TX queue setup failed\n"); return -1; } if (rte_eth_rx_queue_setup(RXTX_PORT, 0, RING_SIZE, SOCKET0, NULL, mp) < 0) { printf("RX queue setup failed\n"); return -1; } if (rte_eth_dev_start(TX_PORT) < 0) { printf("Error starting TX port\n"); return -1; } if (rte_eth_dev_start(RX_PORT) < 0) { printf("Error starting RX port\n"); return -1; } if (rte_eth_dev_start(RXTX_PORT) < 0) { printf("Error starting RX port\n"); return -1; } rte_eth_link_get(TX_PORT, &link); rte_eth_link_get(RX_PORT, &link); rte_eth_link_get(RXTX_PORT, &link); return 0; }
/* * Initialize a given port using default settings and with the RX buffers * coming from the mbuf_pool passed as a parameter. * FIXME: Starting with assumption of one thread/core per port */ static inline int uhd_dpdk_port_init(struct uhd_dpdk_port *port, struct rte_mempool *rx_mbuf_pool, unsigned int mtu) { int retval; /* Check for a valid port */ if (port->id >= rte_eth_dev_count()) return -ENODEV; /* Set up Ethernet device with defaults (1 RX ring, 1 TX ring) */ /* FIXME: Check if hw_ip_checksum is possible */ struct rte_eth_conf port_conf = { .rxmode = { .max_rx_pkt_len = mtu, .jumbo_frame = 1, .hw_ip_checksum = 1, } }; retval = rte_eth_dev_configure(port->id, 1, 1, &port_conf); if (retval != 0) return retval; retval = rte_eth_rx_queue_setup(port->id, 0, DEFAULT_RING_SIZE, rte_eth_dev_socket_id(port->id), NULL, rx_mbuf_pool); if (retval < 0) return retval; retval = rte_eth_tx_queue_setup(port->id, 0, DEFAULT_RING_SIZE, rte_eth_dev_socket_id(port->id), NULL); if (retval < 0) goto port_init_fail; /* Create the hash table for the RX sockets */ char name[32]; snprintf(name, sizeof(name), "rx_table_%u", port->id); struct rte_hash_parameters hash_params = { .name = name, .entries = UHD_DPDK_MAX_SOCKET_CNT, .key_len = sizeof(struct uhd_dpdk_ipv4_5tuple), .hash_func = NULL, .hash_func_init_val = 0, }; port->rx_table = rte_hash_create(&hash_params); if (port->rx_table == NULL) { retval = rte_errno; goto port_init_fail; } /* Create ARP table */ snprintf(name, sizeof(name), "arp_table_%u", port->id); hash_params.name = name; hash_params.entries = UHD_DPDK_MAX_SOCKET_CNT; hash_params.key_len = sizeof(uint32_t); hash_params.hash_func = NULL; hash_params.hash_func_init_val = 0; port->arp_table = rte_hash_create(&hash_params); if (port->arp_table == NULL) { retval = rte_errno; goto free_rx_table; } /* Set up list for TX queues */ LIST_INIT(&port->txq_list); /* Start the Ethernet port. */ retval = rte_eth_dev_start(port->id); if (retval < 0) { goto free_arp_table; } /* Display the port MAC address. */ rte_eth_macaddr_get(port->id, &port->mac_addr); RTE_LOG(INFO, EAL, "Port %u MAC: %02x %02x %02x %02x %02x %02x\n", (unsigned)port->id, port->mac_addr.addr_bytes[0], port->mac_addr.addr_bytes[1], port->mac_addr.addr_bytes[2], port->mac_addr.addr_bytes[3], port->mac_addr.addr_bytes[4], port->mac_addr.addr_bytes[5]); struct rte_eth_link link; rte_eth_link_get(port->id, &link); RTE_LOG(INFO, EAL, "Port %u UP: %d\n", port->id, link.link_status); return 0; free_arp_table: rte_hash_free(port->arp_table); free_rx_table: rte_hash_free(port->rx_table); port_init_fail: return rte_errno; } static int uhd_dpdk_thread_init(struct uhd_dpdk_thread *thread, unsigned int id) { if (!ctx || !thread) return -EINVAL; unsigned int socket_id = rte_lcore_to_socket_id(id); thread->id = id; thread->rx_pktbuf_pool = ctx->rx_pktbuf_pools[socket_id]; thread->tx_pktbuf_pool = ctx->tx_pktbuf_pools[socket_id]; LIST_INIT(&thread->port_list); char name[32]; snprintf(name, sizeof(name), "sockreq_ring_%u", id); thread->sock_req_ring = rte_ring_create( name, UHD_DPDK_MAX_PENDING_SOCK_REQS, socket_id, RING_F_SC_DEQ ); if (!thread->sock_req_ring) return -ENOMEM; return 0; } int uhd_dpdk_init(int argc, char **argv, unsigned int num_ports, int *port_thread_mapping, int num_mbufs, int mbuf_cache_size, int mtu) { /* Init context only once */ if (ctx) return 1; if ((num_ports == 0) || (port_thread_mapping == NULL)) { return -EINVAL; } /* Grabs arguments intended for DPDK's EAL */ int ret = rte_eal_init(argc, argv); if (ret < 0) rte_exit(EXIT_FAILURE, "Error with EAL initialization\n"); ctx = (struct uhd_dpdk_ctx *) rte_zmalloc("uhd_dpdk_ctx", sizeof(*ctx), rte_socket_id()); if (!ctx) return -ENOMEM; ctx->num_threads = rte_lcore_count(); if (ctx->num_threads <= 1) rte_exit(EXIT_FAILURE, "Error: No worker threads enabled\n"); /* Check that we have ports to send/receive on */ ctx->num_ports = rte_eth_dev_count(); if (ctx->num_ports < 1) rte_exit(EXIT_FAILURE, "Error: Found no ports\n"); if (ctx->num_ports < num_ports) rte_exit(EXIT_FAILURE, "Error: User requested more ports than available\n"); /* Get memory for thread and port data structures */ ctx->threads = rte_zmalloc("uhd_dpdk_thread", RTE_MAX_LCORE*sizeof(struct uhd_dpdk_thread), 0); if (!ctx->threads) rte_exit(EXIT_FAILURE, "Error: Could not allocate memory for thread data\n"); ctx->ports = rte_zmalloc("uhd_dpdk_port", ctx->num_ports*sizeof(struct uhd_dpdk_port), 0); if (!ctx->ports) rte_exit(EXIT_FAILURE, "Error: Could not allocate memory for port data\n"); /* Initialize the thread data structures */ for (int i = rte_get_next_lcore(-1, 1, 0); (i < RTE_MAX_LCORE); i = rte_get_next_lcore(i, 1, 0)) { /* Do one mempool of RX/TX per socket */ unsigned int socket_id = rte_lcore_to_socket_id(i); /* FIXME Probably want to take into account actual number of ports per socket */ if (ctx->tx_pktbuf_pools[socket_id] == NULL) { /* Creates a new mempool in memory to hold the mbufs. * This is done for each CPU socket */ const int mbuf_size = mtu + 2048 + RTE_PKTMBUF_HEADROOM; char name[32]; snprintf(name, sizeof(name), "rx_mbuf_pool_%u", socket_id); ctx->rx_pktbuf_pools[socket_id] = rte_pktmbuf_pool_create( name, ctx->num_ports*num_mbufs, mbuf_cache_size, 0, mbuf_size, socket_id ); snprintf(name, sizeof(name), "tx_mbuf_pool_%u", socket_id); ctx->tx_pktbuf_pools[socket_id] = rte_pktmbuf_pool_create( name, ctx->num_ports*num_mbufs, mbuf_cache_size, 0, mbuf_size, socket_id ); if ((ctx->rx_pktbuf_pools[socket_id]== NULL) || (ctx->tx_pktbuf_pools[socket_id]== NULL)) rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n"); } if (uhd_dpdk_thread_init(&ctx->threads[i], i) < 0) rte_exit(EXIT_FAILURE, "Error initializing thread %i\n", i); } unsigned master_lcore = rte_get_master_lcore(); /* Assign ports to threads and initialize the port data structures */ for (unsigned int i = 0; i < num_ports; i++) { int thread_id = port_thread_mapping[i]; if (thread_id < 0) continue; if (((unsigned int) thread_id) == master_lcore) RTE_LOG(WARNING, EAL, "User requested master lcore for port %u\n", i); if (ctx->threads[thread_id].id != (unsigned int) thread_id) rte_exit(EXIT_FAILURE, "Requested inactive lcore %u for port %u\n", (unsigned int) thread_id, i); struct uhd_dpdk_port *port = &ctx->ports[i]; port->id = i; port->parent = &ctx->threads[thread_id]; ctx->threads[thread_id].num_ports++; LIST_INSERT_HEAD(&ctx->threads[thread_id].port_list, port, port_entry); /* Initialize port. */ if (uhd_dpdk_port_init(port, port->parent->rx_pktbuf_pool, mtu) != 0) rte_exit(EXIT_FAILURE, "Cannot init port %"PRIu8 "\n", i); } RTE_LOG(INFO, EAL, "Init DONE!\n"); /* FIXME: Create functions to do this */ RTE_LOG(INFO, EAL, "Starting I/O threads!\n"); for (int i = rte_get_next_lcore(-1, 1, 0); (i < RTE_MAX_LCORE); i = rte_get_next_lcore(i, 1, 0)) { struct uhd_dpdk_thread *t = &ctx->threads[i]; if (!LIST_EMPTY(&t->port_list)) { rte_eal_remote_launch(_uhd_dpdk_driver_main, NULL, ctx->threads[i].id); } } return 0; } /* FIXME: This will be changed once we have functions to handle the threads */ int uhd_dpdk_destroy(void) { if (!ctx) return -ENODEV; struct uhd_dpdk_config_req *req = (struct uhd_dpdk_config_req *) rte_zmalloc(NULL, sizeof(*req), 0); if (!req) return -ENOMEM; req->req_type = UHD_DPDK_LCORE_TERM; for (int i = rte_get_next_lcore(-1, 1, 0); (i < RTE_MAX_LCORE); i = rte_get_next_lcore(i, 1, 0)) { struct uhd_dpdk_thread *t = &ctx->threads[i]; if (LIST_EMPTY(&t->port_list)) continue; if (rte_eal_get_lcore_state(t->id) == FINISHED) continue; pthread_mutex_init(&req->mutex, NULL); pthread_cond_init(&req->cond, NULL); pthread_mutex_lock(&req->mutex); if (rte_ring_enqueue(t->sock_req_ring, req)) { pthread_mutex_unlock(&req->mutex); RTE_LOG(ERR, USER2, "Failed to terminate thread %d\n", i); rte_free(req); return -ENOSPC; } struct timespec timeout = { .tv_sec = 1, .tv_nsec = 0 }; pthread_cond_timedwait(&req->cond, &req->mutex, &timeout); pthread_mutex_unlock(&req->mutex); } rte_free(req); return 0; }
int main(int argc, char *argv[]) { // Disable core dumps for this tool. rte_eal_init() core dumps in places that are not what I want. struct rlimit rlim; int rc = getrlimit(RLIMIT_CORE, &rlim); if(rc < 0) { error(255, errno, "getrlimit(RLIMIT_CORE) failed"); } else { rlim.rlim_cur = 0; rc = setrlimit(RLIMIT_CORE, &rlim); if(rc < 0) { error(255, errno, "setrlimit(RLIMIT_CORE) failed"); } } // initialize DPDK. No args are really needed, but if we pass any into this tool, just pass them along to DPDK. argv[0] = "dpdk"; rc = rte_eal_init(argc, argv); if (rc < 0) { error(255, 0, "rte_eal_init() failed, rte_errno=%d, %s", rte_errno, rte_strerror(rte_errno)); } struct rte_eth_dev_info dev_info; struct ether_addr eth_addr; struct rte_eth_link link; unsigned port_id; uint32_t num_ports = rte_eth_dev_count(); uint32_t if_index; char ifname[IF_NAMESIZE]; printf("%d EAL ports available.\n", num_ports); for (port_id = 0; port_id < num_ports; port_id++) { rte_eth_dev_info_get(port_id, &dev_info); rte_eth_macaddr_get(port_id, ð_addr); rte_eth_link_get(port_id, &link); if_index = dev_info.if_index; if((if_index >= 0) && (if_indextoname(if_index, ifname))) { } else { strcpy(ifname, "<none>"); } printf("port %d interface-index: %d\n", port_id, if_index); printf("port %d interface: %s\n", port_id, ifname); printf("port %d driver: %s\n", port_id, dev_info.driver_name); printf("port %d mac-addr: %02x:%02x:%02x:%02x:%02x:%02x\n", port_id, eth_addr.addr_bytes[0], eth_addr.addr_bytes[1], eth_addr.addr_bytes[2], eth_addr.addr_bytes[3], eth_addr.addr_bytes[4], eth_addr.addr_bytes[5]); printf("port %d pci-bus-addr: %04x:%02x:%02x.%x\n", port_id, dev_info.pci_dev->addr.domain, dev_info.pci_dev->addr.bus, dev_info.pci_dev->addr.devid, dev_info.pci_dev->addr.function); printf("port %d socket: %d\n", port_id, rte_eth_dev_socket_id(port_id)); if (link.link_status) { printf("port %d link-state: up\n", port_id); printf("port %d link-speed: %u\n", port_id, (unsigned)link.link_speed); if(link.link_duplex == ETH_LINK_FULL_DUPLEX) { printf("port %d link-duplex: full-duplex\n", port_id); } else { printf("port %d link-duplex: half-duplex\n", port_id); } } else { printf("port %d link-state: down\n", port_id); printf("port %d link-speed: -1\n", port_id); printf("port %d link-duplex: <n/a>\n", port_id); } printf("\n"); } return 0; }
static void app_init_nics(void) { uint32_t socket, lcore; uint8_t port, queue; struct ether_addr mac_addr; int ret; /* Init driver */ printf("Initializing the PMD driver ...\n"); #ifdef RTE_LIBRTE_IGB_PMD if (rte_igb_pmd_init() < 0) { rte_panic("Cannot init IGB PMD\n"); } #endif #ifdef RTE_LIBRTE_IXGBE_PMD if (rte_ixgbe_pmd_init() < 0) { rte_panic("Cannot init IXGBE PMD\n"); } #endif if (rte_eal_pci_probe() < 0) { rte_panic("Cannot probe PCI\n"); } memset(port_stat,0,sizeof(struct port_stat)*MAX_PORT_NUM); /* Init NIC ports and queues, then start the ports */ for (port = 0; port < APP_MAX_NIC_PORTS; port ++) { struct rte_eth_link link; struct rte_mempool *pool; uint32_t n_rx_queues, n_tx_queues; n_rx_queues = app_get_nic_rx_queues_per_port(port); n_tx_queues = app.nic_tx_port_mask[port]; if ((n_rx_queues == 0) && (n_tx_queues == 0)) { continue; } /* Init port */ printf("Initializing NIC port %u ...\n", (uint32_t) port); ret = rte_eth_dev_configure( port, (uint8_t) n_rx_queues, (uint8_t) n_tx_queues, &port_conf); if (ret < 0) { rte_panic("Cannot init NIC port %u (%d)\n", (uint32_t) port, ret); } rte_eth_promiscuous_enable(port); /* Init RX queues */ for (queue = 0; queue < APP_MAX_RX_QUEUES_PER_NIC_PORT; queue ++) { if (app.nic_rx_queue_mask[port][queue] == 0) { continue; } app_get_lcore_for_nic_rx(port, queue, &lcore); socket = rte_lcore_to_socket_id(lcore); pool = app.lcore_params[lcore].pool; printf("Initializing NIC port %u RX queue %u ...\n", (uint32_t) port, (uint32_t) queue); ret = rte_eth_rx_queue_setup( port, queue, (uint16_t) app.nic_rx_ring_size, socket, &rx_conf, pool); if (ret < 0) { rte_panic("Cannot init RX queue %u for port %u (%d)\n", (uint32_t) queue, (uint32_t) port, ret); } } /* Init TX queues */ if (app.nic_tx_port_mask[port] == 1) { app_get_lcore_for_nic_tx(port, &lcore); socket = rte_lcore_to_socket_id(lcore); printf("Initializing NIC port %u TX queue 0 ...\n", (uint32_t) port); ret = rte_eth_tx_queue_setup( port, 0, (uint16_t) app.nic_tx_ring_size, socket, &tx_conf); if (ret < 0) { rte_panic("Cannot init TX queue 0 for port %d (%d)\n", port, ret); } } /* Start port */ ret = rte_eth_dev_start(port); if (ret < 0) { rte_panic("Cannot start port %d (%d)\n", port, ret); } /* Get link status */ rte_eth_link_get(port, &link); rte_eth_macaddr_get(port,&mac_addr); int i=0; for(i=0;i<5;i++) printf("%02x:",mac_addr.addr_bytes[i]); printf("%02x\n",mac_addr.addr_bytes[i]); memcpy(port_stat[port].mac_addr,mac_addr.addr_bytes,6); for(i=0;i<5;i++) printf("%02x:",port_stat[port].mac_addr[i]); printf("%02x\n",port_stat[port].mac_addr[i]); if (link.link_status) { printf("Port %u is UP (%u Mbps)\n", (uint32_t) port, (unsigned) link.link_speed); port_stat[port].port_status=1; port_stat[port].port_speed=link.link_speed; } else { printf("Port %u is DOWN\n", (uint32_t) port); port_stat[port].port_status=0; } } }