int fdir_callback_add_rules(struct fdir_parsing* filter_tmp){ /** * struct fdir_parsing { * int port; * enum fdir_parsing_cmd cmd; * struct rte_eth_fdir_filter filter; * }; **/ int return_value ; int port_id = filter_tmp->port; if (( main_information.port_mask & (1 << port_id)) != 0) { return_value = rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR, RTE_ETH_FILTER_ADD, &filter_tmp->filter); if (return_value != 0 ){ RTE_LOG(ERR, PORT, "error adding FDIR rules, error %i (%s)\n", rte_errno, strerror(rte_errno)); return rte_errno; } }//endif return 0; }
int fdir_init(uint8_t port_id){ int return_value; struct rte_eth_fdir_info fdir_info; /* check fdir card support */ return_value = rte_eth_dev_filter_supported(port_id, RTE_ETH_FILTER_FDIR); if (return_value < 0) { RTE_LOG(CRIT,PORT,"FDIR not supported on port %-2d\n",port_id); return -1; } /* check filter is supported */ return_value = rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR, RTE_ETH_FILTER_NOP, NULL ); rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR, RTE_ETH_FILTER_INFO, &fdir_info); /* check fdir is activated */ if (fdir_info.mode == RTE_FDIR_MODE_PERFECT) { printf("MODE PERFECT RULE ENABLED\n"); } else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE) { printf("MODE SIGNATURE ENABLED\n"); } else { RTE_LOG(CRIT,PORT,"FDIR not enabled on port %-2d\n",port_id); return -1; } if (return_value < 0){ RTE_LOG(ERR, PORT, "error FDIR : FILTER not supported," "error %i (%s)\n", rte_errno, strerror(rte_errno)); return rte_errno; } return 0; }
/** * Removes cloud filter. Ensures that nothing is adding buffers to the RX * queue before disabling RX on the device. */ void vxlan_unlink(struct vhost_dev *vdev) { unsigned i = 0, rx_count; int ret; struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; struct rte_eth_tunnel_filter_conf tunnel_filter_conf; if (vdev->ready == DEVICE_RX) { memset(&tunnel_filter_conf, 0, sizeof(struct rte_eth_tunnel_filter_conf)); ether_addr_copy(&ports_eth_addr[0], &tunnel_filter_conf.outer_mac); ether_addr_copy(&vdev->mac_address, &tunnel_filter_conf.inner_mac); tunnel_filter_conf.tenant_id = tenant_id_conf[vdev->rx_q]; tunnel_filter_conf.filter_type = tep_filter_type[filter_idx]; if (tep_filter_type[filter_idx] == RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID) tunnel_filter_conf.inner_vlan = INNER_VLAN_ID; tunnel_filter_conf.queue_id = vdev->rx_q; tunnel_filter_conf.tunnel_type = RTE_TUNNEL_TYPE_VXLAN; ret = rte_eth_dev_filter_ctrl(ports[0], RTE_ETH_FILTER_TUNNEL, RTE_ETH_FILTER_DELETE, &tunnel_filter_conf); if (ret) { RTE_LOG(ERR, VHOST_DATA, "%d Failed to add device MAC address to cloud filter\n", vdev->rx_q); return; } for (i = 0; i < ETHER_ADDR_LEN; i++) vdev->mac_address.addr_bytes[i] = 0; /* Clear out the receive buffers */ rx_count = rte_eth_rx_burst(ports[0], (uint16_t)vdev->rx_q, pkts_burst, MAX_PKT_BURST); while (rx_count) { for (i = 0; i < rx_count; i++) rte_pktmbuf_free(pkts_burst[i]); rx_count = rte_eth_rx_burst(ports[0], (uint16_t)vdev->rx_q, pkts_burst, MAX_PKT_BURST); } vdev->ready = DEVICE_MAC_LEARNING; } }
static inline int app_link_filter_arp_add(struct app_link_params *link) { struct rte_eth_ethertype_filter filter = { .ether_type = ETHER_TYPE_ARP, .flags = 0, .queue = link->arp_q, }; return rte_eth_dev_filter_ctrl(link->pmd_id, RTE_ETH_FILTER_ETHERTYPE, RTE_ETH_FILTER_ADD, &filter); } static inline int app_link_filter_tcp_syn_add(struct app_link_params *link) { struct rte_eth_syn_filter filter = { .hig_pri = 1, .queue = link->tcp_syn_q, }; return rte_eth_dev_filter_ctrl(link->pmd_id, RTE_ETH_FILTER_SYN, RTE_ETH_FILTER_ADD, &filter); } static inline int app_link_filter_ip_add(struct app_link_params *l1, struct app_link_params *l2) { struct rte_eth_ntuple_filter filter = { .flags = RTE_5TUPLE_FLAGS, .dst_ip = rte_bswap32(l2->ip), .dst_ip_mask = UINT32_MAX, /* Enable */ .src_ip = 0, .src_ip_mask = 0, /* Disable */ .dst_port = 0, .dst_port_mask = 0, /* Disable */ .src_port = 0, .src_port_mask = 0, /* Disable */ .proto = 0, .proto_mask = 0, /* Disable */ .tcp_flags = 0, .priority = 1, /* Lowest */ .queue = l1->ip_local_q, }; return rte_eth_dev_filter_ctrl(l1->pmd_id, RTE_ETH_FILTER_NTUPLE, RTE_ETH_FILTER_ADD, &filter); } static inline int app_link_filter_ip_del(struct app_link_params *l1, struct app_link_params *l2) { struct rte_eth_ntuple_filter filter = { .flags = RTE_5TUPLE_FLAGS, .dst_ip = rte_bswap32(l2->ip), .dst_ip_mask = UINT32_MAX, /* Enable */ .src_ip = 0, .src_ip_mask = 0, /* Disable */ .dst_port = 0, .dst_port_mask = 0, /* Disable */ .src_port = 0, .src_port_mask = 0, /* Disable */ .proto = 0, .proto_mask = 0, /* Disable */ .tcp_flags = 0, .priority = 1, /* Lowest */ .queue = l1->ip_local_q, }; return rte_eth_dev_filter_ctrl(l1->pmd_id, RTE_ETH_FILTER_NTUPLE, RTE_ETH_FILTER_DELETE, &filter); } static inline int app_link_filter_tcp_add(struct app_link_params *l1, struct app_link_params *l2) { struct rte_eth_ntuple_filter filter = { .flags = RTE_5TUPLE_FLAGS, .dst_ip = rte_bswap32(l2->ip), .dst_ip_mask = UINT32_MAX, /* Enable */ .src_ip = 0, .src_ip_mask = 0, /* Disable */ .dst_port = 0, .dst_port_mask = 0, /* Disable */ .src_port = 0, .src_port_mask = 0, /* Disable */ .proto = IPPROTO_TCP, .proto_mask = UINT8_MAX, /* Enable */ .tcp_flags = 0, .priority = 2, /* Higher priority than IP */ .queue = l1->tcp_local_q, }; return rte_eth_dev_filter_ctrl(l1->pmd_id, RTE_ETH_FILTER_NTUPLE, RTE_ETH_FILTER_ADD, &filter); } static inline int app_link_filter_tcp_del(struct app_link_params *l1, struct app_link_params *l2) { struct rte_eth_ntuple_filter filter = { .flags = RTE_5TUPLE_FLAGS, .dst_ip = rte_bswap32(l2->ip), .dst_ip_mask = UINT32_MAX, /* Enable */ .src_ip = 0, .src_ip_mask = 0, /* Disable */ .dst_port = 0, .dst_port_mask = 0, /* Disable */ .src_port = 0, .src_port_mask = 0, /* Disable */ .proto = IPPROTO_TCP, .proto_mask = UINT8_MAX, /* Enable */ .tcp_flags = 0, .priority = 2, /* Higher priority than IP */ .queue = l1->tcp_local_q, }; return rte_eth_dev_filter_ctrl(l1->pmd_id, RTE_ETH_FILTER_NTUPLE, RTE_ETH_FILTER_DELETE, &filter); } static inline int app_link_filter_udp_add(struct app_link_params *l1, struct app_link_params *l2) { struct rte_eth_ntuple_filter filter = { .flags = RTE_5TUPLE_FLAGS, .dst_ip = rte_bswap32(l2->ip), .dst_ip_mask = UINT32_MAX, /* Enable */ .src_ip = 0, .src_ip_mask = 0, /* Disable */ .dst_port = 0, .dst_port_mask = 0, /* Disable */ .src_port = 0, .src_port_mask = 0, /* Disable */ .proto = IPPROTO_UDP, .proto_mask = UINT8_MAX, /* Enable */ .tcp_flags = 0, .priority = 2, /* Higher priority than IP */ .queue = l1->udp_local_q, }; return rte_eth_dev_filter_ctrl(l1->pmd_id, RTE_ETH_FILTER_NTUPLE, RTE_ETH_FILTER_ADD, &filter); } static inline int app_link_filter_udp_del(struct app_link_params *l1, struct app_link_params *l2) { struct rte_eth_ntuple_filter filter = { .flags = RTE_5TUPLE_FLAGS, .dst_ip = rte_bswap32(l2->ip), .dst_ip_mask = UINT32_MAX, /* Enable */ .src_ip = 0, .src_ip_mask = 0, /* Disable */ .dst_port = 0, .dst_port_mask = 0, /* Disable */ .src_port = 0, .src_port_mask = 0, /* Disable */ .proto = IPPROTO_UDP, .proto_mask = UINT8_MAX, /* Enable */ .tcp_flags = 0, .priority = 2, /* Higher priority than IP */ .queue = l1->udp_local_q, }; return rte_eth_dev_filter_ctrl(l1->pmd_id, RTE_ETH_FILTER_NTUPLE, RTE_ETH_FILTER_DELETE, &filter); } static inline int app_link_filter_sctp_add(struct app_link_params *l1, struct app_link_params *l2) { struct rte_eth_ntuple_filter filter = { .flags = RTE_5TUPLE_FLAGS, .dst_ip = rte_bswap32(l2->ip), .dst_ip_mask = UINT32_MAX, /* Enable */ .src_ip = 0, .src_ip_mask = 0, /* Disable */ .dst_port = 0, .dst_port_mask = 0, /* Disable */ .src_port = 0, .src_port_mask = 0, /* Disable */ .proto = IPPROTO_SCTP, .proto_mask = UINT8_MAX, /* Enable */ .tcp_flags = 0, .priority = 2, /* Higher priority than IP */ .queue = l1->sctp_local_q, }; return rte_eth_dev_filter_ctrl(l1->pmd_id, RTE_ETH_FILTER_NTUPLE, RTE_ETH_FILTER_ADD, &filter); } static inline int app_link_filter_sctp_del(struct app_link_params *l1, struct app_link_params *l2) { struct rte_eth_ntuple_filter filter = { .flags = RTE_5TUPLE_FLAGS, .dst_ip = rte_bswap32(l2->ip), .dst_ip_mask = UINT32_MAX, /* Enable */ .src_ip = 0, .src_ip_mask = 0, /* Disable */ .dst_port = 0, .dst_port_mask = 0, /* Disable */ .src_port = 0, .src_port_mask = 0, /* Disable */ .proto = IPPROTO_SCTP, .proto_mask = UINT8_MAX, /* Enable */ .tcp_flags = 0, .priority = 2, /* Higher priority than IP */ .queue = l1->sctp_local_q, }; return rte_eth_dev_filter_ctrl(l1->pmd_id, RTE_ETH_FILTER_NTUPLE, RTE_ETH_FILTER_DELETE, &filter); } static void app_link_set_arp_filter(struct app_params *app, struct app_link_params *cp) { if (cp->arp_q != 0) { int status = app_link_filter_arp_add(cp); APP_LOG(app, LOW, "%s (%" PRIu32 "): " "Adding ARP filter (queue = %" PRIu32 ")", cp->name, cp->pmd_id, cp->arp_q); if (status) rte_panic("%s (%" PRIu32 "): " "Error adding ARP filter " "(queue = %" PRIu32 ") (%" PRId32 ")\n", cp->name, cp->pmd_id, cp->arp_q, status); } } static void app_link_set_tcp_syn_filter(struct app_params *app, struct app_link_params *cp) { if (cp->tcp_syn_q != 0) { int status = app_link_filter_tcp_syn_add(cp); APP_LOG(app, LOW, "%s (%" PRIu32 "): " "Adding TCP SYN filter (queue = %" PRIu32 ")", cp->name, cp->pmd_id, cp->tcp_syn_q); if (status) rte_panic("%s (%" PRIu32 "): " "Error adding TCP SYN filter " "(queue = %" PRIu32 ") (%" PRId32 ")\n", cp->name, cp->pmd_id, cp->tcp_syn_q, status); } } void app_link_up_internal(struct app_params *app, struct app_link_params *cp) { uint32_t i; int status; /* For each link, add filters for IP of current link */ if (cp->ip != 0) { for (i = 0; i < app->n_links; i++) { struct app_link_params *p = &app->link_params[i]; /* IP */ if (p->ip_local_q != 0) { int status = app_link_filter_ip_add(p, cp); APP_LOG(app, LOW, "%s (%" PRIu32 "): " "Adding IP filter (queue= %" PRIu32 ", IP = 0x%08" PRIx32 ")", p->name, p->pmd_id, p->ip_local_q, cp->ip); if (status) rte_panic("%s (%" PRIu32 "): " "Error adding IP " "filter (queue= %" PRIu32 ", " "IP = 0x%08" PRIx32 ") (%" PRId32 ")\n", p->name, p->pmd_id, p->ip_local_q, cp->ip, status); } /* TCP */ if (p->tcp_local_q != 0) { int status = app_link_filter_tcp_add(p, cp); APP_LOG(app, LOW, "%s (%" PRIu32 "): " "Adding TCP filter " "(queue = %" PRIu32 ", IP = 0x%08" PRIx32 ")", p->name, p->pmd_id, p->tcp_local_q, cp->ip); if (status) rte_panic("%s (%" PRIu32 "): " "Error adding TCP " "filter (queue = %" PRIu32 ", " "IP = 0x%08" PRIx32 ") (%" PRId32 ")\n", p->name, p->pmd_id, p->tcp_local_q, cp->ip, status); } /* UDP */ if (p->udp_local_q != 0) { int status = app_link_filter_udp_add(p, cp); APP_LOG(app, LOW, "%s (%" PRIu32 "): " "Adding UDP filter " "(queue = %" PRIu32 ", IP = 0x%08" PRIx32 ")", p->name, p->pmd_id, p->udp_local_q, cp->ip); if (status) rte_panic("%s (%" PRIu32 "): " "Error adding UDP " "filter (queue = %" PRIu32 ", " "IP = 0x%08" PRIx32 ") (%" PRId32 ")\n", p->name, p->pmd_id, p->udp_local_q, cp->ip, status); } /* SCTP */ if (p->sctp_local_q != 0) { int status = app_link_filter_sctp_add(p, cp); APP_LOG(app, LOW, "%s (%" PRIu32 "): Adding SCTP filter " "(queue = %" PRIu32 ", IP = 0x%08" PRIx32 ")", p->name, p->pmd_id, p->sctp_local_q, cp->ip); if (status) rte_panic("%s (%" PRIu32 "): " "Error adding SCTP " "filter (queue = %" PRIu32 ", " "IP = 0x%08" PRIx32 ") (%" PRId32 ")\n", p->name, p->pmd_id, p->sctp_local_q, cp->ip, status); } } } /* PMD link up */ status = rte_eth_dev_set_link_up(cp->pmd_id); /* Do not panic if PMD does not provide link up functionality */ if (status < 0 && status != -ENOTSUP) rte_panic("%s (%" PRIu32 "): PMD set link up error %" PRId32 "\n", cp->name, cp->pmd_id, status); /* Mark link as UP */ cp->state = 1; } void app_link_down_internal(struct app_params *app, struct app_link_params *cp) { uint32_t i; int status; /* PMD link down */ status = rte_eth_dev_set_link_down(cp->pmd_id); /* Do not panic if PMD does not provide link down functionality */ if (status < 0 && status != -ENOTSUP) rte_panic("%s (%" PRIu32 "): PMD set link down error %" PRId32 "\n", cp->name, cp->pmd_id, status); /* Mark link as DOWN */ cp->state = 0; /* Return if current link IP is not valid */ if (cp->ip == 0) return; /* For each link, remove filters for IP of current link */ for (i = 0; i < app->n_links; i++) { struct app_link_params *p = &app->link_params[i]; /* IP */ if (p->ip_local_q != 0) { int status = app_link_filter_ip_del(p, cp); APP_LOG(app, LOW, "%s (%" PRIu32 "): Deleting IP filter " "(queue = %" PRIu32 ", IP = 0x%" PRIx32 ")", p->name, p->pmd_id, p->ip_local_q, cp->ip); if (status) rte_panic("%s (%" PRIu32 "): Error deleting IP filter " "(queue = %" PRIu32 ", IP = 0x%" PRIx32 ") (%" PRId32 ")\n", p->name, p->pmd_id, p->ip_local_q, cp->ip, status); } /* TCP */ if (p->tcp_local_q != 0) { int status = app_link_filter_tcp_del(p, cp); APP_LOG(app, LOW, "%s (%" PRIu32 "): Deleting TCP filter " "(queue = %" PRIu32 ", IP = 0x%" PRIx32 ")", p->name, p->pmd_id, p->tcp_local_q, cp->ip); if (status) rte_panic("%s (%" PRIu32 "): Error deleting TCP filter " "(queue = %" PRIu32 ", IP = 0x%" PRIx32 ") (%" PRId32 ")\n", p->name, p->pmd_id, p->tcp_local_q, cp->ip, status); } /* UDP */ if (p->udp_local_q != 0) { int status = app_link_filter_udp_del(p, cp); APP_LOG(app, LOW, "%s (%" PRIu32 "): Deleting UDP filter " "(queue = %" PRIu32 ", IP = 0x%" PRIx32 ")", p->name, p->pmd_id, p->udp_local_q, cp->ip); if (status) rte_panic("%s (%" PRIu32 "): Error deleting UDP filter " "(queue = %" PRIu32 ", IP = 0x%" PRIx32 ") (%" PRId32 ")\n", p->name, p->pmd_id, p->udp_local_q, cp->ip, status); } /* SCTP */ if (p->sctp_local_q != 0) { int status = app_link_filter_sctp_del(p, cp); APP_LOG(app, LOW, "%s (%" PRIu32 "): Deleting SCTP filter " "(queue = %" PRIu32 ", IP = 0x%" PRIx32 ")", p->name, p->pmd_id, p->sctp_local_q, cp->ip); if (status) rte_panic("%s (%" PRIu32 "): Error deleting SCTP filter " "(queue = %" PRIu32 ", IP = 0x%" PRIx32 ") (%" PRId32 ")\n", p->name, p->pmd_id, p->sctp_local_q, cp->ip, status); } } } static void app_check_link(struct app_params *app) { uint32_t all_links_up, i; all_links_up = 1; for (i = 0; i < app->n_links; i++) { struct app_link_params *p = &app->link_params[i]; struct rte_eth_link link_params; memset(&link_params, 0, sizeof(link_params)); rte_eth_link_get(p->pmd_id, &link_params); APP_LOG(app, HIGH, "%s (%" PRIu32 ") (%" PRIu32 " Gbps) %s", p->name, p->pmd_id, link_params.link_speed / 1000, link_params.link_status ? "UP" : "DOWN"); if (link_params.link_status == ETH_LINK_DOWN) all_links_up = 0; } if (all_links_up == 0) rte_panic("Some links are DOWN\n"); } static uint32_t is_any_swq_frag_or_ras(struct app_params *app) { uint32_t i; for (i = 0; i < app->n_pktq_swq; i++) { struct app_pktq_swq_params *p = &app->swq_params[i]; if ((p->ipv4_frag == 1) || (p->ipv6_frag == 1) || (p->ipv4_ras == 1) || (p->ipv6_ras == 1)) return 1; } return 0; } static void app_init_link_frag_ras(struct app_params *app) { uint32_t i; if (is_any_swq_frag_or_ras(app)) { for (i = 0; i < app->n_pktq_hwq_out; i++) { struct app_pktq_hwq_out_params *p_txq = &app->hwq_out_params[i]; p_txq->conf.txq_flags &= ~ETH_TXQ_FLAGS_NOMULTSEGS; } } } static inline int app_get_cpu_socket_id(uint32_t pmd_id) { int status = rte_eth_dev_socket_id(pmd_id); return (status != SOCKET_ID_ANY) ? status : 0; } static inline int app_link_rss_enabled(struct app_link_params *cp) { return (cp->n_rss_qs) ? 1 : 0; } static void app_link_rss_setup(struct app_link_params *cp) { struct rte_eth_dev_info dev_info; struct rte_eth_rss_reta_entry64 reta_conf[APP_RETA_SIZE_MAX]; uint32_t i; int status; /* Get RETA size */ memset(&dev_info, 0, sizeof(dev_info)); rte_eth_dev_info_get(cp->pmd_id, &dev_info); if (dev_info.reta_size == 0) rte_panic("%s (%u): RSS setup error (null RETA size)\n", cp->name, cp->pmd_id); if (dev_info.reta_size > ETH_RSS_RETA_SIZE_512) rte_panic("%s (%u): RSS setup error (RETA size too big)\n", cp->name, cp->pmd_id); /* Setup RETA contents */ memset(reta_conf, 0, sizeof(reta_conf)); for (i = 0; i < dev_info.reta_size; i++) reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX; for (i = 0; i < dev_info.reta_size; i++) { uint32_t reta_id = i / RTE_RETA_GROUP_SIZE; uint32_t reta_pos = i % RTE_RETA_GROUP_SIZE; uint32_t rss_qs_pos = i % cp->n_rss_qs; reta_conf[reta_id].reta[reta_pos] = (uint16_t) cp->rss_qs[rss_qs_pos]; } /* RETA update */ status = rte_eth_dev_rss_reta_update(cp->pmd_id, reta_conf, dev_info.reta_size); if (status != 0) rte_panic("%s (%u): RSS setup error (RETA update failed)\n", cp->name, cp->pmd_id); } static void app_init_link_set_config(struct app_link_params *p) { if (p->n_rss_qs) { p->conf.rxmode.mq_mode = ETH_MQ_RX_RSS; p->conf.rx_adv_conf.rss_conf.rss_hf = p->rss_proto_ipv4 | p->rss_proto_ipv6 | p->rss_proto_l2; } } static void app_init_link(struct app_params *app) { uint32_t i; app_init_link_frag_ras(app); for (i = 0; i < app->n_links; i++) { struct app_link_params *p_link = &app->link_params[i]; uint32_t link_id, n_hwq_in, n_hwq_out, j; int status; sscanf(p_link->name, "LINK%" PRIu32, &link_id); n_hwq_in = app_link_get_n_rxq(app, p_link); n_hwq_out = app_link_get_n_txq(app, p_link); app_init_link_set_config(p_link); APP_LOG(app, HIGH, "Initializing %s (%" PRIu32") " "(%" PRIu32 " RXQ, %" PRIu32 " TXQ) ...", p_link->name, p_link->pmd_id, n_hwq_in, n_hwq_out); /* LINK */ status = rte_eth_dev_configure( p_link->pmd_id, n_hwq_in, n_hwq_out, &p_link->conf); if (status < 0) rte_panic("%s (%" PRId32 "): " "init error (%" PRId32 ")\n", p_link->name, p_link->pmd_id, status); rte_eth_macaddr_get(p_link->pmd_id, (struct ether_addr *) &p_link->mac_addr); if (p_link->promisc) rte_eth_promiscuous_enable(p_link->pmd_id); /* RXQ */ for (j = 0; j < app->n_pktq_hwq_in; j++) { struct app_pktq_hwq_in_params *p_rxq = &app->hwq_in_params[j]; uint32_t rxq_link_id, rxq_queue_id; uint16_t nb_rxd = p_rxq->size; sscanf(p_rxq->name, "RXQ%" PRIu32 ".%" PRIu32, &rxq_link_id, &rxq_queue_id); if (rxq_link_id != link_id) continue; status = rte_eth_dev_adjust_nb_rx_tx_desc( p_link->pmd_id, &nb_rxd, NULL); if (status < 0) rte_panic("%s (%" PRIu32 "): " "%s adjust number of Rx descriptors " "error (%" PRId32 ")\n", p_link->name, p_link->pmd_id, p_rxq->name, status); status = rte_eth_rx_queue_setup( p_link->pmd_id, rxq_queue_id, nb_rxd, app_get_cpu_socket_id(p_link->pmd_id), &p_rxq->conf, app->mempool[p_rxq->mempool_id]); if (status < 0) rte_panic("%s (%" PRIu32 "): " "%s init error (%" PRId32 ")\n", p_link->name, p_link->pmd_id, p_rxq->name, status); } /* TXQ */ for (j = 0; j < app->n_pktq_hwq_out; j++) { struct app_pktq_hwq_out_params *p_txq = &app->hwq_out_params[j]; uint32_t txq_link_id, txq_queue_id; uint16_t nb_txd = p_txq->size; sscanf(p_txq->name, "TXQ%" PRIu32 ".%" PRIu32, &txq_link_id, &txq_queue_id); if (txq_link_id != link_id) continue; status = rte_eth_dev_adjust_nb_rx_tx_desc( p_link->pmd_id, NULL, &nb_txd); if (status < 0) rte_panic("%s (%" PRIu32 "): " "%s adjust number of Tx descriptors " "error (%" PRId32 ")\n", p_link->name, p_link->pmd_id, p_txq->name, status); status = rte_eth_tx_queue_setup( p_link->pmd_id, txq_queue_id, nb_txd, app_get_cpu_socket_id(p_link->pmd_id), &p_txq->conf); if (status < 0) rte_panic("%s (%" PRIu32 "): " "%s init error (%" PRId32 ")\n", p_link->name, p_link->pmd_id, p_txq->name, status); } /* LINK START */ status = rte_eth_dev_start(p_link->pmd_id); if (status < 0) rte_panic("Cannot start %s (error %" PRId32 ")\n", p_link->name, status); /* LINK FILTERS */ app_link_set_arp_filter(app, p_link); app_link_set_tcp_syn_filter(app, p_link); if (app_link_rss_enabled(p_link)) app_link_rss_setup(p_link); /* LINK UP */ app_link_up_internal(app, p_link); } app_check_link(app); } static void app_init_swq(struct app_params *app) { uint32_t i; for (i = 0; i < app->n_pktq_swq; i++) { struct app_pktq_swq_params *p = &app->swq_params[i]; unsigned flags = 0; if (app_swq_get_readers(app, p) == 1) flags |= RING_F_SC_DEQ; if (app_swq_get_writers(app, p) == 1) flags |= RING_F_SP_ENQ; APP_LOG(app, HIGH, "Initializing %s...", p->name); app->swq[i] = rte_ring_create( p->name, p->size, p->cpu_socket_id, flags); if (app->swq[i] == NULL) rte_panic("%s init error\n", p->name); } } static void app_init_tm(struct app_params *app) { uint32_t i; for (i = 0; i < app->n_pktq_tm; i++) { struct app_pktq_tm_params *p_tm = &app->tm_params[i]; struct app_link_params *p_link; struct rte_eth_link link_eth_params; struct rte_sched_port *sched; uint32_t n_subports, subport_id; int status; p_link = app_get_link_for_tm(app, p_tm); /* LINK */ rte_eth_link_get(p_link->pmd_id, &link_eth_params); /* TM */ p_tm->sched_port_params.name = p_tm->name; p_tm->sched_port_params.socket = app_get_cpu_socket_id(p_link->pmd_id); p_tm->sched_port_params.rate = (uint64_t) link_eth_params.link_speed * 1000 * 1000 / 8; APP_LOG(app, HIGH, "Initializing %s ...", p_tm->name); sched = rte_sched_port_config(&p_tm->sched_port_params); if (sched == NULL) rte_panic("%s init error\n", p_tm->name); app->tm[i] = sched; /* Subport */ n_subports = p_tm->sched_port_params.n_subports_per_port; for (subport_id = 0; subport_id < n_subports; subport_id++) { uint32_t n_pipes_per_subport, pipe_id; status = rte_sched_subport_config(sched, subport_id, &p_tm->sched_subport_params[subport_id]); if (status) rte_panic("%s subport %" PRIu32 " init error (%" PRId32 ")\n", p_tm->name, subport_id, status); /* Pipe */ n_pipes_per_subport = p_tm->sched_port_params.n_pipes_per_subport; for (pipe_id = 0; pipe_id < n_pipes_per_subport; pipe_id++) { int profile_id = p_tm->sched_pipe_to_profile[ subport_id * APP_MAX_SCHED_PIPES + pipe_id]; if (profile_id == -1) continue; status = rte_sched_pipe_config(sched, subport_id, pipe_id, profile_id); if (status) rte_panic("%s subport %" PRIu32 " pipe %" PRIu32 " (profile %" PRId32 ") " "init error (% " PRId32 ")\n", p_tm->name, subport_id, pipe_id, profile_id, status); } } } } #ifndef RTE_EXEC_ENV_LINUXAPP static void app_init_tap(struct app_params *app) { if (app->n_pktq_tap == 0) return; rte_panic("TAP device not supported.\n"); } #else static void app_init_tap(struct app_params *app) { uint32_t i; for (i = 0; i < app->n_pktq_tap; i++) { struct app_pktq_tap_params *p_tap = &app->tap_params[i]; struct ifreq ifr; int fd, status; APP_LOG(app, HIGH, "Initializing %s ...", p_tap->name); fd = open("/dev/net/tun", O_RDWR | O_NONBLOCK); if (fd < 0) rte_panic("Cannot open file /dev/net/tun\n"); memset(&ifr, 0, sizeof(ifr)); ifr.ifr_flags = IFF_TAP | IFF_NO_PI; /* No packet information */ snprintf(ifr.ifr_name, IFNAMSIZ, "%s", p_tap->name); status = ioctl(fd, TUNSETIFF, (void *) &ifr); if (status < 0) rte_panic("TAP setup error\n"); app->tap[i] = fd; } }
/* * This function learns the MAC address of the device and set init * L2 header and L3 header info. */ int vxlan_link(struct vhost_dev *vdev, struct rte_mbuf *m) { int i, ret; struct ether_hdr *pkt_hdr; struct virtio_net *dev = vdev->dev; uint64_t portid = dev->device_fh; struct ipv4_hdr *ip; struct rte_eth_tunnel_filter_conf tunnel_filter_conf; if (unlikely(portid > VXLAN_N_PORTS)) { RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") WARNING: Not configuring device," "as already have %d ports for VXLAN.", dev->device_fh, VXLAN_N_PORTS); return -1; } /* Learn MAC address of guest device from packet */ pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *); if (is_same_ether_addr(&(pkt_hdr->s_addr), &vdev->mac_address)) { RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") WARNING: This device is using an existing" " MAC address and has not been registered.\n", dev->device_fh); return -1; } for (i = 0; i < ETHER_ADDR_LEN; i++) { vdev->mac_address.addr_bytes[i] = vxdev.port[portid].vport_mac.addr_bytes[i] = pkt_hdr->s_addr.addr_bytes[i]; vxdev.port[portid].peer_mac.addr_bytes[i] = peer_mac[i]; } memset(&tunnel_filter_conf, 0, sizeof(struct rte_eth_tunnel_filter_conf)); ether_addr_copy(&ports_eth_addr[0], &tunnel_filter_conf.outer_mac); tunnel_filter_conf.filter_type = tep_filter_type[filter_idx]; /* inner MAC */ ether_addr_copy(&vdev->mac_address, &tunnel_filter_conf.inner_mac); tunnel_filter_conf.queue_id = vdev->rx_q; tunnel_filter_conf.tenant_id = tenant_id_conf[vdev->rx_q]; if (tep_filter_type[filter_idx] == RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID) tunnel_filter_conf.inner_vlan = INNER_VLAN_ID; tunnel_filter_conf.tunnel_type = RTE_TUNNEL_TYPE_VXLAN; ret = rte_eth_dev_filter_ctrl(ports[0], RTE_ETH_FILTER_TUNNEL, RTE_ETH_FILTER_ADD, &tunnel_filter_conf); if (ret) { RTE_LOG(ERR, VHOST_DATA, "%d Failed to add device MAC address to cloud filter\n", vdev->rx_q); return -1; } /* Print out inner MAC and VNI info. */ RTE_LOG(INFO, VHOST_DATA, "(%d) MAC_ADDRESS %02x:%02x:%02x:%02x:%02x:%02x and VNI %d registered\n", vdev->rx_q, vdev->mac_address.addr_bytes[0], vdev->mac_address.addr_bytes[1], vdev->mac_address.addr_bytes[2], vdev->mac_address.addr_bytes[3], vdev->mac_address.addr_bytes[4], vdev->mac_address.addr_bytes[5], tenant_id_conf[vdev->rx_q]); vxdev.port[portid].vport_id = portid; for (i = 0; i < 4; i++) { /* Local VTEP IP */ vxdev.port_ip |= vxlan_multicast_ips[portid][i] << (8 * i); /* Remote VTEP IP */ vxdev.port[portid].peer_ip |= vxlan_overlay_ips[portid][i] << (8 * i); } vxdev.out_key = tenant_id_conf[vdev->rx_q]; ether_addr_copy(&vxdev.port[portid].peer_mac, &app_l2_hdr[portid].d_addr); ether_addr_copy(&ports_eth_addr[0], &app_l2_hdr[portid].s_addr); app_l2_hdr[portid].ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4); ip = &app_ip_hdr[portid]; ip->version_ihl = IP_VHL_DEF; ip->type_of_service = 0; ip->total_length = 0; ip->packet_id = 0; ip->fragment_offset = IP_DN_FRAGMENT_FLAG; ip->time_to_live = IP_DEFTTL; ip->next_proto_id = IPPROTO_UDP; ip->hdr_checksum = 0; ip->src_addr = vxdev.port_ip; ip->dst_addr = vxdev.port[portid].peer_ip; /* Set device as ready for RX. */ vdev->ready = DEVICE_RX; return 0; }
static inline int ntuple_filter_add(uint8_t port, const char *addr, uint8_t queue_id) { int ret = 0; uint32_t ntuple_ip_addr; ret = inet_pton(AF_INET, addr, &ntuple_ip_addr); if (ret <= 0) { if (ret == 0) { printf("Error: %s is not in presentation format\n", addr); } else if (ret == -1) { perror("inet_pton"); } return ret; } struct rte_eth_ntuple_filter filter = { .flags = RTE_5TUPLE_FLAGS, .dst_ip = ntuple_ip_addr, /* Big endian */ .dst_ip_mask = UINT32_MAX, /* Enable */ .src_ip = 0, .src_ip_mask = 0, /* Disable */ .dst_port = 0, .dst_port_mask = 0, /* Disable */ .src_port = 0, .src_port_mask = 0, /* Disable */ .proto = 0, .proto_mask = 0, /* Disable */ .tcp_flags = 0, .priority = 1, /* Lowest */ .queue = queue_id, }; return rte_eth_dev_filter_ctrl(port, RTE_ETH_FILTER_NTUPLE, RTE_ETH_FILTER_ADD, &filter); } /* * Initialises a given port using global settings and with the rx buffers * coming from the mbuf_pool passed as parameter */ static inline int port_init(uint8_t port, struct rte_mempool *mbuf_pool) { port_conf_default.fdir_conf = fdir_conf; struct rte_eth_conf port_conf = port_conf_default; const uint16_t rx_rings = 1, tx_rings = 1; int retval; uint16_t q; if (port >= rte_eth_dev_count()) return -1; port_conf.rxmode.hw_vlan_strip = 0; retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf); if (retval != 0) return retval; for (q = 0; q < rx_rings; q++) { retval = rte_eth_rx_queue_setup(port, q, RX_RING_SIZE, rte_eth_dev_socket_id(port), NULL, mbuf_pool); if (retval < 0) return retval; } for (q = 0; q < tx_rings; q++) { retval = rte_eth_tx_queue_setup(port, q, TX_RING_SIZE, rte_eth_dev_socket_id(port), NULL); if (retval < 0) return retval; } retval = rte_eth_dev_start(port); if (retval < 0) return retval; struct ether_addr addr; rte_eth_macaddr_get(port, &addr); printf("Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8 " %02"PRIx8" %02"PRIx8" %02"PRIx8"\n", (unsigned)port, addr.addr_bytes[0], addr.addr_bytes[1], addr.addr_bytes[2], addr.addr_bytes[3], addr.addr_bytes[4], addr.addr_bytes[5]); rte_eth_promiscuous_enable(port); return 0; }
static inline void fdir_filter_add(uint8_t port_id, const char *addr, enum rte_eth_fdir_behavior behavior, uint32_t soft_id) { struct rte_eth_fdir_filter entry; uint32_t fdir_ip_addr; int ret = 0; ret = rte_eth_dev_filter_supported(port_id, RTE_ETH_FILTER_FDIR); if (ret < 0) { printf("flow director is not supported on port %u.\n", port_id); return; } memset(&entry, 0, sizeof(struct rte_eth_fdir_filter)); ret = inet_pton(AF_INET, addr, &fdir_ip_addr); if (ret <= 0) { if (ret == 0) { printf("Error: %s is not in presentation format\n", addr); return; } else if (ret == -1) { perror("inet_pton"); return; } } //printf("%d\n", behavior); //printf("%s, %u\n", addr, fdir_ip_addr); entry.input.flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_TCP; //entry.input.flow_type = RTE_ETH_FLOW_IPV4; entry.input.flow.ip4_flow.dst_ip = fdir_ip_addr; //entry.input.flow.udp4_flow.src_port = rte_cpu_to_be_16(TCP_PORT); //entry.input.flow.udp4_flow.dst_port = rte_cpu_to_be_16(TCP_PORT); entry.input.flow.udp4_flow.dst_port = rte_cpu_to_be_16(0); entry.input.flow_ext.is_vf = 0; entry.action.behavior = behavior; entry.action.flex_off = 0; entry.action.report_status = RTE_ETH_FDIR_REPORT_ID; if (behavior == RTE_ETH_FDIR_ACCEPT) entry.action.rx_queue = PKT_ACCEPT_QUEUE; else entry.action.rx_queue = PKT_DROP_QUEUE; entry.soft_id = soft_id; ret = rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR, RTE_ETH_FILTER_ADD, &entry); if (ret < 0) printf("flow director programming error: (%s)\n", strerror(-ret)); entry.soft_id = soft_id + 100; entry.input.flow.udp4_flow.dst_port = rte_cpu_to_be_16(0x1); ret = rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR, RTE_ETH_FILTER_ADD, &entry); if (ret < 0) printf("flow director programming error: (%s)\n", strerror(-ret)); }