static struct rte_sched_port * app_init_sched_port(uint32_t portid, uint32_t socketid) { static char port_name[32]; /* static as referenced from global port_params*/ struct rte_eth_link link; struct rte_sched_port *port = NULL; uint32_t pipe, subport; int err; rte_eth_link_get((uint8_t)portid, &link); port_params.socket = socketid; port_params.rate = (uint64_t) link.link_speed * 1000 * 1000 / 8; rte_snprintf(port_name, sizeof(port_name), "port_%d", portid); port_params.name = port_name; port = rte_sched_port_config(&port_params); if (port == NULL){ rte_exit(EXIT_FAILURE, "Unable to config sched port\n"); } for (subport = 0; subport < port_params.n_subports_per_port; subport ++) { err = rte_sched_subport_config(port, subport, &subport_params[subport]); if (err) { rte_exit(EXIT_FAILURE, "Unable to config sched subport %u, err=%d\n", subport, err); } for (pipe = 0; pipe < port_params.n_pipes_per_subport; pipe ++) { if (app_pipe_to_profile[subport][pipe] != -1) { err = rte_sched_pipe_config(port, subport, pipe, app_pipe_to_profile[subport][pipe]); if (err) { rte_exit(EXIT_FAILURE, "Unable to config sched pipe %u " "for profile %d, err=%d\n", pipe, app_pipe_to_profile[subport][pipe], err); } } } } return port; }
/** * test main entrance for library sched */ static int test_sched(void) { struct rte_mempool *mp = NULL; struct rte_sched_port *port = NULL; uint32_t pipe; struct rte_mbuf *in_mbufs[10]; struct rte_mbuf *out_mbufs[10]; int i; int err; mp = create_mempool(); port_param.socket = 0; port_param.rate = (uint64_t) 10000 * 1000 * 1000 / 8; port = rte_sched_port_config(&port_param); VERIFY(port != NULL, "Error config sched port\n"); err = rte_sched_subport_config(port, SUBPORT, subport_param); VERIFY(err == 0, "Error config sched, err=%d\n", err); for (pipe = 0; pipe < port_param.n_pipes_per_subport; pipe ++) { err = rte_sched_pipe_config(port, SUBPORT, pipe, 0); VERIFY(err == 0, "Error config sched pipe %u, err=%d\n", pipe, err); } for (i = 0; i < 10; i++) { in_mbufs[i] = rte_pktmbuf_alloc(mp); prepare_pkt(in_mbufs[i]); } err = rte_sched_port_enqueue(port, in_mbufs, 10); VERIFY(err == 10, "Wrong enqueue, err=%d\n", err); err = rte_sched_port_dequeue(port, out_mbufs, 10); VERIFY(err == 10, "Wrong dequeue, err=%d\n", err); for (i = 0; i < 10; i++) { enum rte_meter_color color; uint32_t subport, traffic_class, queue; color = rte_sched_port_pkt_read_color(out_mbufs[i]); VERIFY(color == e_RTE_METER_YELLOW, "Wrong color\n"); rte_sched_port_pkt_read_tree_path(out_mbufs[i], &subport, &pipe, &traffic_class, &queue); VERIFY(subport == SUBPORT, "Wrong subport\n"); VERIFY(pipe == PIPE, "Wrong pipe\n"); VERIFY(traffic_class == TC, "Wrong traffic_class\n"); VERIFY(queue == QUEUE, "Wrong queue\n"); } struct rte_sched_subport_stats subport_stats; uint32_t tc_ov; rte_sched_subport_read_stats(port, SUBPORT, &subport_stats, &tc_ov); //VERIFY(subport_stats.n_pkts_tc[TC-1] == 10, "Wrong subport stats\n"); struct rte_sched_queue_stats queue_stats; uint16_t qlen; rte_sched_queue_read_stats(port, QUEUE, &queue_stats, &qlen); //VERIFY(queue_stats.n_pkts == 10, "Wrong queue stats\n"); rte_sched_port_free(port); return 0; }
static inline int app_link_filter_arp_add(struct app_link_params *link) { struct rte_eth_ethertype_filter filter = { .ether_type = ETHER_TYPE_ARP, .flags = 0, .queue = link->arp_q, }; return rte_eth_dev_filter_ctrl(link->pmd_id, RTE_ETH_FILTER_ETHERTYPE, RTE_ETH_FILTER_ADD, &filter); } static inline int app_link_filter_tcp_syn_add(struct app_link_params *link) { struct rte_eth_syn_filter filter = { .hig_pri = 1, .queue = link->tcp_syn_q, }; return rte_eth_dev_filter_ctrl(link->pmd_id, RTE_ETH_FILTER_SYN, RTE_ETH_FILTER_ADD, &filter); } static inline int app_link_filter_ip_add(struct app_link_params *l1, struct app_link_params *l2) { struct rte_eth_ntuple_filter filter = { .flags = RTE_5TUPLE_FLAGS, .dst_ip = rte_bswap32(l2->ip), .dst_ip_mask = UINT32_MAX, /* Enable */ .src_ip = 0, .src_ip_mask = 0, /* Disable */ .dst_port = 0, .dst_port_mask = 0, /* Disable */ .src_port = 0, .src_port_mask = 0, /* Disable */ .proto = 0, .proto_mask = 0, /* Disable */ .tcp_flags = 0, .priority = 1, /* Lowest */ .queue = l1->ip_local_q, }; return rte_eth_dev_filter_ctrl(l1->pmd_id, RTE_ETH_FILTER_NTUPLE, RTE_ETH_FILTER_ADD, &filter); } static inline int app_link_filter_ip_del(struct app_link_params *l1, struct app_link_params *l2) { struct rte_eth_ntuple_filter filter = { .flags = RTE_5TUPLE_FLAGS, .dst_ip = rte_bswap32(l2->ip), .dst_ip_mask = UINT32_MAX, /* Enable */ .src_ip = 0, .src_ip_mask = 0, /* Disable */ .dst_port = 0, .dst_port_mask = 0, /* Disable */ .src_port = 0, .src_port_mask = 0, /* Disable */ .proto = 0, .proto_mask = 0, /* Disable */ .tcp_flags = 0, .priority = 1, /* Lowest */ .queue = l1->ip_local_q, }; return rte_eth_dev_filter_ctrl(l1->pmd_id, RTE_ETH_FILTER_NTUPLE, RTE_ETH_FILTER_DELETE, &filter); } static inline int app_link_filter_tcp_add(struct app_link_params *l1, struct app_link_params *l2) { struct rte_eth_ntuple_filter filter = { .flags = RTE_5TUPLE_FLAGS, .dst_ip = rte_bswap32(l2->ip), .dst_ip_mask = UINT32_MAX, /* Enable */ .src_ip = 0, .src_ip_mask = 0, /* Disable */ .dst_port = 0, .dst_port_mask = 0, /* Disable */ .src_port = 0, .src_port_mask = 0, /* Disable */ .proto = IPPROTO_TCP, .proto_mask = UINT8_MAX, /* Enable */ .tcp_flags = 0, .priority = 2, /* Higher priority than IP */ .queue = l1->tcp_local_q, }; return rte_eth_dev_filter_ctrl(l1->pmd_id, RTE_ETH_FILTER_NTUPLE, RTE_ETH_FILTER_ADD, &filter); } static inline int app_link_filter_tcp_del(struct app_link_params *l1, struct app_link_params *l2) { struct rte_eth_ntuple_filter filter = { .flags = RTE_5TUPLE_FLAGS, .dst_ip = rte_bswap32(l2->ip), .dst_ip_mask = UINT32_MAX, /* Enable */ .src_ip = 0, .src_ip_mask = 0, /* Disable */ .dst_port = 0, .dst_port_mask = 0, /* Disable */ .src_port = 0, .src_port_mask = 0, /* Disable */ .proto = IPPROTO_TCP, .proto_mask = UINT8_MAX, /* Enable */ .tcp_flags = 0, .priority = 2, /* Higher priority than IP */ .queue = l1->tcp_local_q, }; return rte_eth_dev_filter_ctrl(l1->pmd_id, RTE_ETH_FILTER_NTUPLE, RTE_ETH_FILTER_DELETE, &filter); } static inline int app_link_filter_udp_add(struct app_link_params *l1, struct app_link_params *l2) { struct rte_eth_ntuple_filter filter = { .flags = RTE_5TUPLE_FLAGS, .dst_ip = rte_bswap32(l2->ip), .dst_ip_mask = UINT32_MAX, /* Enable */ .src_ip = 0, .src_ip_mask = 0, /* Disable */ .dst_port = 0, .dst_port_mask = 0, /* Disable */ .src_port = 0, .src_port_mask = 0, /* Disable */ .proto = IPPROTO_UDP, .proto_mask = UINT8_MAX, /* Enable */ .tcp_flags = 0, .priority = 2, /* Higher priority than IP */ .queue = l1->udp_local_q, }; return rte_eth_dev_filter_ctrl(l1->pmd_id, RTE_ETH_FILTER_NTUPLE, RTE_ETH_FILTER_ADD, &filter); } static inline int app_link_filter_udp_del(struct app_link_params *l1, struct app_link_params *l2) { struct rte_eth_ntuple_filter filter = { .flags = RTE_5TUPLE_FLAGS, .dst_ip = rte_bswap32(l2->ip), .dst_ip_mask = UINT32_MAX, /* Enable */ .src_ip = 0, .src_ip_mask = 0, /* Disable */ .dst_port = 0, .dst_port_mask = 0, /* Disable */ .src_port = 0, .src_port_mask = 0, /* Disable */ .proto = IPPROTO_UDP, .proto_mask = UINT8_MAX, /* Enable */ .tcp_flags = 0, .priority = 2, /* Higher priority than IP */ .queue = l1->udp_local_q, }; return rte_eth_dev_filter_ctrl(l1->pmd_id, RTE_ETH_FILTER_NTUPLE, RTE_ETH_FILTER_DELETE, &filter); } static inline int app_link_filter_sctp_add(struct app_link_params *l1, struct app_link_params *l2) { struct rte_eth_ntuple_filter filter = { .flags = RTE_5TUPLE_FLAGS, .dst_ip = rte_bswap32(l2->ip), .dst_ip_mask = UINT32_MAX, /* Enable */ .src_ip = 0, .src_ip_mask = 0, /* Disable */ .dst_port = 0, .dst_port_mask = 0, /* Disable */ .src_port = 0, .src_port_mask = 0, /* Disable */ .proto = IPPROTO_SCTP, .proto_mask = UINT8_MAX, /* Enable */ .tcp_flags = 0, .priority = 2, /* Higher priority than IP */ .queue = l1->sctp_local_q, }; return rte_eth_dev_filter_ctrl(l1->pmd_id, RTE_ETH_FILTER_NTUPLE, RTE_ETH_FILTER_ADD, &filter); } static inline int app_link_filter_sctp_del(struct app_link_params *l1, struct app_link_params *l2) { struct rte_eth_ntuple_filter filter = { .flags = RTE_5TUPLE_FLAGS, .dst_ip = rte_bswap32(l2->ip), .dst_ip_mask = UINT32_MAX, /* Enable */ .src_ip = 0, .src_ip_mask = 0, /* Disable */ .dst_port = 0, .dst_port_mask = 0, /* Disable */ .src_port = 0, .src_port_mask = 0, /* Disable */ .proto = IPPROTO_SCTP, .proto_mask = UINT8_MAX, /* Enable */ .tcp_flags = 0, .priority = 2, /* Higher priority than IP */ .queue = l1->sctp_local_q, }; return rte_eth_dev_filter_ctrl(l1->pmd_id, RTE_ETH_FILTER_NTUPLE, RTE_ETH_FILTER_DELETE, &filter); } static void app_link_set_arp_filter(struct app_params *app, struct app_link_params *cp) { if (cp->arp_q != 0) { int status = app_link_filter_arp_add(cp); APP_LOG(app, LOW, "%s (%" PRIu32 "): " "Adding ARP filter (queue = %" PRIu32 ")", cp->name, cp->pmd_id, cp->arp_q); if (status) rte_panic("%s (%" PRIu32 "): " "Error adding ARP filter " "(queue = %" PRIu32 ") (%" PRId32 ")\n", cp->name, cp->pmd_id, cp->arp_q, status); } } static void app_link_set_tcp_syn_filter(struct app_params *app, struct app_link_params *cp) { if (cp->tcp_syn_q != 0) { int status = app_link_filter_tcp_syn_add(cp); APP_LOG(app, LOW, "%s (%" PRIu32 "): " "Adding TCP SYN filter (queue = %" PRIu32 ")", cp->name, cp->pmd_id, cp->tcp_syn_q); if (status) rte_panic("%s (%" PRIu32 "): " "Error adding TCP SYN filter " "(queue = %" PRIu32 ") (%" PRId32 ")\n", cp->name, cp->pmd_id, cp->tcp_syn_q, status); } } void app_link_up_internal(struct app_params *app, struct app_link_params *cp) { uint32_t i; int status; /* For each link, add filters for IP of current link */ if (cp->ip != 0) { for (i = 0; i < app->n_links; i++) { struct app_link_params *p = &app->link_params[i]; /* IP */ if (p->ip_local_q != 0) { int status = app_link_filter_ip_add(p, cp); APP_LOG(app, LOW, "%s (%" PRIu32 "): " "Adding IP filter (queue= %" PRIu32 ", IP = 0x%08" PRIx32 ")", p->name, p->pmd_id, p->ip_local_q, cp->ip); if (status) rte_panic("%s (%" PRIu32 "): " "Error adding IP " "filter (queue= %" PRIu32 ", " "IP = 0x%08" PRIx32 ") (%" PRId32 ")\n", p->name, p->pmd_id, p->ip_local_q, cp->ip, status); } /* TCP */ if (p->tcp_local_q != 0) { int status = app_link_filter_tcp_add(p, cp); APP_LOG(app, LOW, "%s (%" PRIu32 "): " "Adding TCP filter " "(queue = %" PRIu32 ", IP = 0x%08" PRIx32 ")", p->name, p->pmd_id, p->tcp_local_q, cp->ip); if (status) rte_panic("%s (%" PRIu32 "): " "Error adding TCP " "filter (queue = %" PRIu32 ", " "IP = 0x%08" PRIx32 ") (%" PRId32 ")\n", p->name, p->pmd_id, p->tcp_local_q, cp->ip, status); } /* UDP */ if (p->udp_local_q != 0) { int status = app_link_filter_udp_add(p, cp); APP_LOG(app, LOW, "%s (%" PRIu32 "): " "Adding UDP filter " "(queue = %" PRIu32 ", IP = 0x%08" PRIx32 ")", p->name, p->pmd_id, p->udp_local_q, cp->ip); if (status) rte_panic("%s (%" PRIu32 "): " "Error adding UDP " "filter (queue = %" PRIu32 ", " "IP = 0x%08" PRIx32 ") (%" PRId32 ")\n", p->name, p->pmd_id, p->udp_local_q, cp->ip, status); } /* SCTP */ if (p->sctp_local_q != 0) { int status = app_link_filter_sctp_add(p, cp); APP_LOG(app, LOW, "%s (%" PRIu32 "): Adding SCTP filter " "(queue = %" PRIu32 ", IP = 0x%08" PRIx32 ")", p->name, p->pmd_id, p->sctp_local_q, cp->ip); if (status) rte_panic("%s (%" PRIu32 "): " "Error adding SCTP " "filter (queue = %" PRIu32 ", " "IP = 0x%08" PRIx32 ") (%" PRId32 ")\n", p->name, p->pmd_id, p->sctp_local_q, cp->ip, status); } } } /* PMD link up */ status = rte_eth_dev_set_link_up(cp->pmd_id); /* Do not panic if PMD does not provide link up functionality */ if (status < 0 && status != -ENOTSUP) rte_panic("%s (%" PRIu32 "): PMD set link up error %" PRId32 "\n", cp->name, cp->pmd_id, status); /* Mark link as UP */ cp->state = 1; } void app_link_down_internal(struct app_params *app, struct app_link_params *cp) { uint32_t i; int status; /* PMD link down */ status = rte_eth_dev_set_link_down(cp->pmd_id); /* Do not panic if PMD does not provide link down functionality */ if (status < 0 && status != -ENOTSUP) rte_panic("%s (%" PRIu32 "): PMD set link down error %" PRId32 "\n", cp->name, cp->pmd_id, status); /* Mark link as DOWN */ cp->state = 0; /* Return if current link IP is not valid */ if (cp->ip == 0) return; /* For each link, remove filters for IP of current link */ for (i = 0; i < app->n_links; i++) { struct app_link_params *p = &app->link_params[i]; /* IP */ if (p->ip_local_q != 0) { int status = app_link_filter_ip_del(p, cp); APP_LOG(app, LOW, "%s (%" PRIu32 "): Deleting IP filter " "(queue = %" PRIu32 ", IP = 0x%" PRIx32 ")", p->name, p->pmd_id, p->ip_local_q, cp->ip); if (status) rte_panic("%s (%" PRIu32 "): Error deleting IP filter " "(queue = %" PRIu32 ", IP = 0x%" PRIx32 ") (%" PRId32 ")\n", p->name, p->pmd_id, p->ip_local_q, cp->ip, status); } /* TCP */ if (p->tcp_local_q != 0) { int status = app_link_filter_tcp_del(p, cp); APP_LOG(app, LOW, "%s (%" PRIu32 "): Deleting TCP filter " "(queue = %" PRIu32 ", IP = 0x%" PRIx32 ")", p->name, p->pmd_id, p->tcp_local_q, cp->ip); if (status) rte_panic("%s (%" PRIu32 "): Error deleting TCP filter " "(queue = %" PRIu32 ", IP = 0x%" PRIx32 ") (%" PRId32 ")\n", p->name, p->pmd_id, p->tcp_local_q, cp->ip, status); } /* UDP */ if (p->udp_local_q != 0) { int status = app_link_filter_udp_del(p, cp); APP_LOG(app, LOW, "%s (%" PRIu32 "): Deleting UDP filter " "(queue = %" PRIu32 ", IP = 0x%" PRIx32 ")", p->name, p->pmd_id, p->udp_local_q, cp->ip); if (status) rte_panic("%s (%" PRIu32 "): Error deleting UDP filter " "(queue = %" PRIu32 ", IP = 0x%" PRIx32 ") (%" PRId32 ")\n", p->name, p->pmd_id, p->udp_local_q, cp->ip, status); } /* SCTP */ if (p->sctp_local_q != 0) { int status = app_link_filter_sctp_del(p, cp); APP_LOG(app, LOW, "%s (%" PRIu32 "): Deleting SCTP filter " "(queue = %" PRIu32 ", IP = 0x%" PRIx32 ")", p->name, p->pmd_id, p->sctp_local_q, cp->ip); if (status) rte_panic("%s (%" PRIu32 "): Error deleting SCTP filter " "(queue = %" PRIu32 ", IP = 0x%" PRIx32 ") (%" PRId32 ")\n", p->name, p->pmd_id, p->sctp_local_q, cp->ip, status); } } } static void app_check_link(struct app_params *app) { uint32_t all_links_up, i; all_links_up = 1; for (i = 0; i < app->n_links; i++) { struct app_link_params *p = &app->link_params[i]; struct rte_eth_link link_params; memset(&link_params, 0, sizeof(link_params)); rte_eth_link_get(p->pmd_id, &link_params); APP_LOG(app, HIGH, "%s (%" PRIu32 ") (%" PRIu32 " Gbps) %s", p->name, p->pmd_id, link_params.link_speed / 1000, link_params.link_status ? "UP" : "DOWN"); if (link_params.link_status == ETH_LINK_DOWN) all_links_up = 0; } if (all_links_up == 0) rte_panic("Some links are DOWN\n"); } static uint32_t is_any_swq_frag_or_ras(struct app_params *app) { uint32_t i; for (i = 0; i < app->n_pktq_swq; i++) { struct app_pktq_swq_params *p = &app->swq_params[i]; if ((p->ipv4_frag == 1) || (p->ipv6_frag == 1) || (p->ipv4_ras == 1) || (p->ipv6_ras == 1)) return 1; } return 0; } static void app_init_link_frag_ras(struct app_params *app) { uint32_t i; if (is_any_swq_frag_or_ras(app)) { for (i = 0; i < app->n_pktq_hwq_out; i++) { struct app_pktq_hwq_out_params *p_txq = &app->hwq_out_params[i]; p_txq->conf.txq_flags &= ~ETH_TXQ_FLAGS_NOMULTSEGS; } } } static inline int app_get_cpu_socket_id(uint32_t pmd_id) { int status = rte_eth_dev_socket_id(pmd_id); return (status != SOCKET_ID_ANY) ? status : 0; } static inline int app_link_rss_enabled(struct app_link_params *cp) { return (cp->n_rss_qs) ? 1 : 0; } static void app_link_rss_setup(struct app_link_params *cp) { struct rte_eth_dev_info dev_info; struct rte_eth_rss_reta_entry64 reta_conf[APP_RETA_SIZE_MAX]; uint32_t i; int status; /* Get RETA size */ memset(&dev_info, 0, sizeof(dev_info)); rte_eth_dev_info_get(cp->pmd_id, &dev_info); if (dev_info.reta_size == 0) rte_panic("%s (%u): RSS setup error (null RETA size)\n", cp->name, cp->pmd_id); if (dev_info.reta_size > ETH_RSS_RETA_SIZE_512) rte_panic("%s (%u): RSS setup error (RETA size too big)\n", cp->name, cp->pmd_id); /* Setup RETA contents */ memset(reta_conf, 0, sizeof(reta_conf)); for (i = 0; i < dev_info.reta_size; i++) reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX; for (i = 0; i < dev_info.reta_size; i++) { uint32_t reta_id = i / RTE_RETA_GROUP_SIZE; uint32_t reta_pos = i % RTE_RETA_GROUP_SIZE; uint32_t rss_qs_pos = i % cp->n_rss_qs; reta_conf[reta_id].reta[reta_pos] = (uint16_t) cp->rss_qs[rss_qs_pos]; } /* RETA update */ status = rte_eth_dev_rss_reta_update(cp->pmd_id, reta_conf, dev_info.reta_size); if (status != 0) rte_panic("%s (%u): RSS setup error (RETA update failed)\n", cp->name, cp->pmd_id); } static void app_init_link_set_config(struct app_link_params *p) { if (p->n_rss_qs) { p->conf.rxmode.mq_mode = ETH_MQ_RX_RSS; p->conf.rx_adv_conf.rss_conf.rss_hf = p->rss_proto_ipv4 | p->rss_proto_ipv6 | p->rss_proto_l2; } } static void app_init_link(struct app_params *app) { uint32_t i; app_init_link_frag_ras(app); for (i = 0; i < app->n_links; i++) { struct app_link_params *p_link = &app->link_params[i]; uint32_t link_id, n_hwq_in, n_hwq_out, j; int status; sscanf(p_link->name, "LINK%" PRIu32, &link_id); n_hwq_in = app_link_get_n_rxq(app, p_link); n_hwq_out = app_link_get_n_txq(app, p_link); app_init_link_set_config(p_link); APP_LOG(app, HIGH, "Initializing %s (%" PRIu32") " "(%" PRIu32 " RXQ, %" PRIu32 " TXQ) ...", p_link->name, p_link->pmd_id, n_hwq_in, n_hwq_out); /* LINK */ status = rte_eth_dev_configure( p_link->pmd_id, n_hwq_in, n_hwq_out, &p_link->conf); if (status < 0) rte_panic("%s (%" PRId32 "): " "init error (%" PRId32 ")\n", p_link->name, p_link->pmd_id, status); rte_eth_macaddr_get(p_link->pmd_id, (struct ether_addr *) &p_link->mac_addr); if (p_link->promisc) rte_eth_promiscuous_enable(p_link->pmd_id); /* RXQ */ for (j = 0; j < app->n_pktq_hwq_in; j++) { struct app_pktq_hwq_in_params *p_rxq = &app->hwq_in_params[j]; uint32_t rxq_link_id, rxq_queue_id; uint16_t nb_rxd = p_rxq->size; sscanf(p_rxq->name, "RXQ%" PRIu32 ".%" PRIu32, &rxq_link_id, &rxq_queue_id); if (rxq_link_id != link_id) continue; status = rte_eth_dev_adjust_nb_rx_tx_desc( p_link->pmd_id, &nb_rxd, NULL); if (status < 0) rte_panic("%s (%" PRIu32 "): " "%s adjust number of Rx descriptors " "error (%" PRId32 ")\n", p_link->name, p_link->pmd_id, p_rxq->name, status); status = rte_eth_rx_queue_setup( p_link->pmd_id, rxq_queue_id, nb_rxd, app_get_cpu_socket_id(p_link->pmd_id), &p_rxq->conf, app->mempool[p_rxq->mempool_id]); if (status < 0) rte_panic("%s (%" PRIu32 "): " "%s init error (%" PRId32 ")\n", p_link->name, p_link->pmd_id, p_rxq->name, status); } /* TXQ */ for (j = 0; j < app->n_pktq_hwq_out; j++) { struct app_pktq_hwq_out_params *p_txq = &app->hwq_out_params[j]; uint32_t txq_link_id, txq_queue_id; uint16_t nb_txd = p_txq->size; sscanf(p_txq->name, "TXQ%" PRIu32 ".%" PRIu32, &txq_link_id, &txq_queue_id); if (txq_link_id != link_id) continue; status = rte_eth_dev_adjust_nb_rx_tx_desc( p_link->pmd_id, NULL, &nb_txd); if (status < 0) rte_panic("%s (%" PRIu32 "): " "%s adjust number of Tx descriptors " "error (%" PRId32 ")\n", p_link->name, p_link->pmd_id, p_txq->name, status); status = rte_eth_tx_queue_setup( p_link->pmd_id, txq_queue_id, nb_txd, app_get_cpu_socket_id(p_link->pmd_id), &p_txq->conf); if (status < 0) rte_panic("%s (%" PRIu32 "): " "%s init error (%" PRId32 ")\n", p_link->name, p_link->pmd_id, p_txq->name, status); } /* LINK START */ status = rte_eth_dev_start(p_link->pmd_id); if (status < 0) rte_panic("Cannot start %s (error %" PRId32 ")\n", p_link->name, status); /* LINK FILTERS */ app_link_set_arp_filter(app, p_link); app_link_set_tcp_syn_filter(app, p_link); if (app_link_rss_enabled(p_link)) app_link_rss_setup(p_link); /* LINK UP */ app_link_up_internal(app, p_link); } app_check_link(app); } static void app_init_swq(struct app_params *app) { uint32_t i; for (i = 0; i < app->n_pktq_swq; i++) { struct app_pktq_swq_params *p = &app->swq_params[i]; unsigned flags = 0; if (app_swq_get_readers(app, p) == 1) flags |= RING_F_SC_DEQ; if (app_swq_get_writers(app, p) == 1) flags |= RING_F_SP_ENQ; APP_LOG(app, HIGH, "Initializing %s...", p->name); app->swq[i] = rte_ring_create( p->name, p->size, p->cpu_socket_id, flags); if (app->swq[i] == NULL) rte_panic("%s init error\n", p->name); } } static void app_init_tm(struct app_params *app) { uint32_t i; for (i = 0; i < app->n_pktq_tm; i++) { struct app_pktq_tm_params *p_tm = &app->tm_params[i]; struct app_link_params *p_link; struct rte_eth_link link_eth_params; struct rte_sched_port *sched; uint32_t n_subports, subport_id; int status; p_link = app_get_link_for_tm(app, p_tm); /* LINK */ rte_eth_link_get(p_link->pmd_id, &link_eth_params); /* TM */ p_tm->sched_port_params.name = p_tm->name; p_tm->sched_port_params.socket = app_get_cpu_socket_id(p_link->pmd_id); p_tm->sched_port_params.rate = (uint64_t) link_eth_params.link_speed * 1000 * 1000 / 8; APP_LOG(app, HIGH, "Initializing %s ...", p_tm->name); sched = rte_sched_port_config(&p_tm->sched_port_params); if (sched == NULL) rte_panic("%s init error\n", p_tm->name); app->tm[i] = sched; /* Subport */ n_subports = p_tm->sched_port_params.n_subports_per_port; for (subport_id = 0; subport_id < n_subports; subport_id++) { uint32_t n_pipes_per_subport, pipe_id; status = rte_sched_subport_config(sched, subport_id, &p_tm->sched_subport_params[subport_id]); if (status) rte_panic("%s subport %" PRIu32 " init error (%" PRId32 ")\n", p_tm->name, subport_id, status); /* Pipe */ n_pipes_per_subport = p_tm->sched_port_params.n_pipes_per_subport; for (pipe_id = 0; pipe_id < n_pipes_per_subport; pipe_id++) { int profile_id = p_tm->sched_pipe_to_profile[ subport_id * APP_MAX_SCHED_PIPES + pipe_id]; if (profile_id == -1) continue; status = rte_sched_pipe_config(sched, subport_id, pipe_id, profile_id); if (status) rte_panic("%s subport %" PRIu32 " pipe %" PRIu32 " (profile %" PRId32 ") " "init error (% " PRId32 ")\n", p_tm->name, subport_id, pipe_id, profile_id, status); } } } } #ifndef RTE_EXEC_ENV_LINUXAPP static void app_init_tap(struct app_params *app) { if (app->n_pktq_tap == 0) return; rte_panic("TAP device not supported.\n"); } #else static void app_init_tap(struct app_params *app) { uint32_t i; for (i = 0; i < app->n_pktq_tap; i++) { struct app_pktq_tap_params *p_tap = &app->tap_params[i]; struct ifreq ifr; int fd, status; APP_LOG(app, HIGH, "Initializing %s ...", p_tap->name); fd = open("/dev/net/tun", O_RDWR | O_NONBLOCK); if (fd < 0) rte_panic("Cannot open file /dev/net/tun\n"); memset(&ifr, 0, sizeof(ifr)); ifr.ifr_flags = IFF_TAP | IFF_NO_PI; /* No packet information */ snprintf(ifr.ifr_name, IFNAMSIZ, "%s", p_tap->name); status = ioctl(fd, TUNSETIFF, (void *) &ifr); if (status < 0) rte_panic("TAP setup error\n"); app->tap[i] = fd; } }
/* Initialize cores and allocate mempools */ static void init_lcores(void) { char name[64]; struct lcore_cfg *lconf = 0; static uint8_t *worker_thread_table[MAX_SOCKETS] = {0}; static uint16_t *user_table[MAX_SOCKETS] = {0}; struct rte_lpm *ipv4_lpm[MAX_SOCKETS] = {0}; struct rte_hash *qinq_to_gre_lookup[MAX_SOCKETS] = {0}; struct next_hop_struct *next_hop[MAX_SOCKETS] = {0}; /* need to allocate mempools as the first thing to use the lowest possible address range */ setup_mempools(lcore_cfg_init); lcore_cfg = rte_zmalloc_socket("lcore_cfg_hp", RTE_MAX_LCORE * sizeof(struct lcore_cfg), CACHE_LINE_SIZE, rte_socket_id()); TGEN_PANIC(lcore_cfg == NULL, "Could not allocate memory for core control structures\n"); rte_memcpy(lcore_cfg, lcore_cfg_init, RTE_MAX_LCORE * sizeof(struct lcore_cfg)); init_lcore_info(); check_no_mode_core(); mprintf("=== Initializing rings on cores ===\n"); init_rings(); for (uint8_t socket_id = 0; socket_id < MAX_SOCKETS; ++socket_id) { uint16_t data_structs_flags = data_structs_needed(lconf, socket_id); if (data_structs_flags & DATA_STRUCTS_NEED_WT_TABLE) { worker_thread_table[socket_id] = rte_zmalloc_socket(NULL , 0x1000000, CACHE_LINE_SIZE, socket_id); TGEN_PANIC(worker_thread_table == NULL, "Error creating worker thread table"); } if (data_structs_flags & DATA_STRUCTS_NEED_GRE_TABLE) { mprintf("=== user <-> QinQ table configuration ===\n"); qinq_to_gre_lookup[socket_id] = read_gre_table_config(config_path, "gre_table.cfg", worker_thread_table[socket_id], lb_nb_txrings, socket_id); TGEN_PANIC(NULL == qinq_to_gre_lookup[socket_id], "Failed to allocate qinq to gre lookup table\n"); } if (data_structs_flags & DATA_STRUCTS_NEED_USER_TABLE) { mprintf("=== User table configuration ===\n"); user_table[socket_id] = read_user_table_config(config_path, "user_table.cfg", &qinq_to_gre_lookup[socket_id], socket_id); TGEN_PANIC(NULL == user_table[socket_id], "Failed to allocate user lookup table\n"); } if (data_structs_flags & DATA_STRUCTS_NEED_NEXT_HOP) { mprintf("=== Next hop configuration ===\n"); next_hop[socket_id] = read_next_hop_config(config_path, "next_hop.cfg", &tgen_used_port_mask, socket_id); init_routing_ports(); } if (data_structs_flags & DATA_STRUCTS_NEED_LPM_V4) { mprintf("=== IPv4 routing configuration ===\n"); ipv4_lpm[socket_id] = read_lpm_v4_config(config_path, "ipv4.cfg", socket_id); TGEN_PANIC(NULL == ipv4_lpm[socket_id], "Failed to allocate IPv4 LPM\n"); } if (data_structs_flags & DATA_STRUCTS_NEED_LPM_V6) { mprintf("=== IPv6 routing configuration ===\n"); read_lpm_v6_config(config_path, "ipv6.cfg", socket_id); } } check_consistent_cfg(); mprintf("=== Initializing tables, mempools and queue numbers on cores ===\n"); for (uint8_t lcore_id = 0; lcore_id < RTE_MAX_LCORE; ++lcore_id) { if (!rte_lcore_is_enabled(lcore_id) || lcore_id == tgen_cfg.master) { continue; } lconf = &lcore_cfg[lcore_id]; uint8_t socket = rte_lcore_to_socket_id(lcore_id); for (uint8_t task_id = 0; task_id < lconf->nb_tasks; ++task_id) { struct task_startup_cfg *startup_cfg = &lconf->startup_cfg[task_id]; if (QOS == startup_cfg->mode) { rte_snprintf(name, sizeof(name), "qos_sched_port_%u_%u", lcore_id, task_id); startup_cfg->qos_conf.port_params.name = name; startup_cfg->qos_conf.port_params.socket = socket; startup_cfg->qos_conf.port_params.rate = TEN_GIGABIT; startup_cfg->sched_port = rte_sched_port_config(&startup_cfg->qos_conf.port_params); TGEN_PANIC(startup_cfg->sched_port == NULL, "failed to create sched_port"); mprintf("number of pipes: %d\n\n", startup_cfg->qos_conf.port_params.n_pipes_per_subport); int err = rte_sched_subport_config(startup_cfg->sched_port, 0, startup_cfg->qos_conf.subport_params); TGEN_PANIC(err != 0, "Failed setting up sched_port subport, error: %d", err); /* only single subport and single pipe profile is supported */ for (uint32_t pipe = 0; pipe < startup_cfg->qos_conf.port_params.n_pipes_per_subport; ++pipe) { err = rte_sched_pipe_config(startup_cfg->sched_port, 0 , pipe, 0); TGEN_PANIC(err != 0, "failed setting up sched port pipe, error: %d", err); } } if (LB_QINQ == startup_cfg->mode) { startup_cfg->worker_thread_table = worker_thread_table[rte_socket_id()]; } if (QINQ_DECAP_ARP == startup_cfg->mode || QINQ_DECAP_V4 == startup_cfg->mode) { startup_cfg->qinq_gre = qinq_to_gre_lookup[rte_socket_id()]; } if (QOS == startup_cfg->mode || CLASSIFY == startup_cfg->mode || QINQ_DECAP_V6 == startup_cfg->mode) { startup_cfg->user_table = user_table[rte_socket_id()]; } if (ROUTING == startup_cfg->mode || FWD == startup_cfg->mode || QINQ_DECAP_V4 == startup_cfg->mode) { startup_cfg->next_hop = next_hop[rte_socket_id()]; } if (QINQ_DECAP_V4 == startup_cfg->mode || FWD == startup_cfg->mode || ROUTING == startup_cfg->mode) { startup_cfg->ipv4_lpm = ipv4_lpm[rte_socket_id()]; } } mprintf("\t*** Initializing core %u ***\n", lcore_id); if (lconf->flags & PCFG_CPETABLEv4) { sprintf(name, "core_%u_CPEv4Table", lcore_id); uint8_t table_part = lconf->startup_cfg[0].nb_slave_threads; if (!rte_is_power_of_2(table_part)) { table_part = rte_align32pow2(table_part) >> 1; } struct rte_hash_parameters hash_params = { .name = name, .entries = MAX_GRE / table_part, .bucket_entries = GRE_BUCKET_ENTRIES, .key_len = sizeof(struct hash_gre_struct), .entry_len = sizeof(struct cpe_table_hash_entry), .hash_func_init_val = 0, .socket_id = socket }; lconf->cpe_v4_table = rte_hash_ext_create(&hash_params); TGEN_PANIC(lconf->cpe_v4_table == NULL, "Unable to allocate memory for IPv4 hash table on core %u\n", lcore_id); /* set all entries to expire at MAX_TSC (i.e. never) so that we don't waste cycles at startup going through all the empty entries */ setup_arp_entries(lconf->cpe_v4_table); /* for locality, copy the pointer to the port structure where it is needed at packet handling time */ for (uint8_t task_id = 0; task_id < lconf->nb_tasks; ++task_id) { if (lconf->startup_cfg[task_id].flags & PORT_STARTUP_CPEv4) { lconf->startup_cfg[task_id].cpe_table = lconf->cpe_v4_table; } } } if (lconf->flags & PCFG_CPETABLEv6) { sprintf(name, "core_%u_CPEv6Table", lcore_id); uint8_t table_part = lconf->startup_cfg[0].nb_slave_threads; if (!rte_is_power_of_2(table_part)) { table_part = rte_align32pow2(table_part) >> 1; }