void perf_opt_dump(struct evt_options *opt, uint8_t nb_queues) { evt_dump("nb_prod_lcores", "%d", evt_nr_active_lcores(opt->plcores)); evt_dump_producer_lcores(opt); evt_dump("nb_worker_lcores", "%d", evt_nr_active_lcores(opt->wlcores)); evt_dump_worker_lcores(opt); evt_dump_nb_stages(opt); evt_dump("nb_evdev_ports", "%d", perf_nb_event_ports(opt)); evt_dump("nb_evdev_queues", "%d", nb_queues); evt_dump_queue_priority(opt); evt_dump_sched_type_list(opt); evt_dump_producer_type(opt); }
int perf_event_dev_port_setup(struct evt_test *test, struct evt_options *opt, uint8_t stride, uint8_t nb_queues, const struct rte_event_port_conf *port_conf) { struct test_perf *t = evt_test_priv(test); uint16_t port, prod; int ret = -1; /* setup one port per worker, linking to all queues */ for (port = 0; port < evt_nr_active_lcores(opt->wlcores); port++) { struct worker_data *w = &t->worker[port]; w->dev_id = opt->dev_id; w->port_id = port; w->t = t; w->processed_pkts = 0; w->latency = 0; ret = rte_event_port_setup(opt->dev_id, port, port_conf); if (ret) { evt_err("failed to setup port %d", port); return ret; } ret = rte_event_port_link(opt->dev_id, port, NULL, NULL, 0); if (ret != nb_queues) { evt_err("failed to link all queues to port %d", port); return -EINVAL; } } /* port for producers, no links */ if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) { for ( ; port < perf_nb_event_ports(opt); port++) { struct prod_data *p = &t->prod[port]; p->t = t; } ret = perf_event_rx_adapter_setup(opt, stride, *port_conf); if (ret) return ret; } else { prod = 0; for ( ; port < perf_nb_event_ports(opt); port++) { struct prod_data *p = &t->prod[port]; p->dev_id = opt->dev_id; p->port_id = port; p->queue_id = prod * stride; p->t = t; ret = rte_event_port_setup(opt->dev_id, port, port_conf); if (ret) { evt_err("failed to setup port %d", port); return ret; } prod++; } } return ret; }
int perf_opt_check(struct evt_options *opt, uint64_t nb_queues) { unsigned int lcores; /* N producer + N worker + 1 master when producer cores are used * Else N worker + 1 master when Rx adapter is used */ lcores = opt->prod_type == EVT_PROD_TYPE_SYNT ? 3 : 2; if (rte_lcore_count() < lcores) { evt_err("test need minimum %d lcores", lcores); return -1; } /* Validate worker lcores */ if (evt_lcores_has_overlap(opt->wlcores, rte_get_master_lcore())) { evt_err("worker lcores overlaps with master lcore"); return -1; } if (evt_lcores_has_overlap_multi(opt->wlcores, opt->plcores)) { evt_err("worker lcores overlaps producer lcores"); return -1; } if (evt_has_disabled_lcore(opt->wlcores)) { evt_err("one or more workers lcores are not enabled"); return -1; } if (!evt_has_active_lcore(opt->wlcores)) { evt_err("minimum one worker is required"); return -1; } if (opt->prod_type == EVT_PROD_TYPE_SYNT) { /* Validate producer lcores */ if (evt_lcores_has_overlap(opt->plcores, rte_get_master_lcore())) { evt_err("producer lcores overlaps with master lcore"); return -1; } if (evt_has_disabled_lcore(opt->plcores)) { evt_err("one or more producer lcores are not enabled"); return -1; } if (!evt_has_active_lcore(opt->plcores)) { evt_err("minimum one producer is required"); return -1; } } if (evt_has_invalid_stage(opt)) return -1; if (evt_has_invalid_sched_type(opt)) return -1; if (nb_queues > EVT_MAX_QUEUES) { evt_err("number of queues exceeds %d", EVT_MAX_QUEUES); return -1; } if (perf_nb_event_ports(opt) > EVT_MAX_PORTS) { evt_err("number of ports exceeds %d", EVT_MAX_PORTS); return -1; } /* Fixups */ if (opt->nb_stages == 1 && opt->fwd_latency) { evt_info("fwd_latency is valid when nb_stages > 1, disabling"); opt->fwd_latency = 0; } if (opt->fwd_latency && !opt->q_priority) { evt_info("enabled queue priority for latency measurement"); opt->q_priority = 1; } if (opt->nb_pkts == 0) opt->nb_pkts = INT64_MAX/evt_nr_active_lcores(opt->plcores); return 0; }
static int perf_atq_eventdev_setup(struct evt_test *test, struct evt_options *opt) { int ret; uint8_t queue; uint8_t nb_queues; uint8_t nb_ports; struct rte_event_dev_info dev_info; nb_ports = evt_nr_active_lcores(opt->wlcores); nb_ports += (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR || opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) ? 0 : evt_nr_active_lcores(opt->plcores); nb_queues = atq_nb_event_queues(opt); memset(&dev_info, 0, sizeof(struct rte_event_dev_info)); ret = rte_event_dev_info_get(opt->dev_id, &dev_info); if (ret) { evt_err("failed to get eventdev info %d", opt->dev_id); return ret; } const struct rte_event_dev_config config = { .nb_event_queues = nb_queues, .nb_event_ports = nb_ports, .nb_events_limit = dev_info.max_num_events, .nb_event_queue_flows = opt->nb_flows, .nb_event_port_dequeue_depth = dev_info.max_event_port_dequeue_depth, .nb_event_port_enqueue_depth = dev_info.max_event_port_enqueue_depth, }; ret = rte_event_dev_configure(opt->dev_id, &config); if (ret) { evt_err("failed to configure eventdev %d", opt->dev_id); return ret; } struct rte_event_queue_conf q_conf = { .priority = RTE_EVENT_DEV_PRIORITY_NORMAL, .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES, .nb_atomic_flows = opt->nb_flows, .nb_atomic_order_sequences = opt->nb_flows, }; /* queue configurations */ for (queue = 0; queue < nb_queues; queue++) { ret = rte_event_queue_setup(opt->dev_id, queue, &q_conf); if (ret) { evt_err("failed to setup queue=%d", queue); return ret; } } if (opt->wkr_deq_dep > dev_info.max_event_port_dequeue_depth) opt->wkr_deq_dep = dev_info.max_event_port_dequeue_depth; /* port configuration */ const struct rte_event_port_conf p_conf = { .dequeue_depth = opt->wkr_deq_dep, .enqueue_depth = dev_info.max_event_port_dequeue_depth, .new_event_threshold = dev_info.max_num_events, }; ret = perf_event_dev_port_setup(test, opt, 1 /* stride */, nb_queues, &p_conf); if (ret) return ret; if (!evt_has_distributed_sched(opt->dev_id)) { uint32_t service_id; rte_event_dev_service_id_get(opt->dev_id, &service_id); ret = evt_service_setup(service_id); if (ret) { evt_err("No service lcore found to run event dev."); return ret; } } ret = rte_event_dev_start(opt->dev_id); if (ret) { evt_err("failed to start eventdev %d", opt->dev_id); return ret; } return 0; } static void perf_atq_opt_dump(struct evt_options *opt) { perf_opt_dump(opt, atq_nb_event_queues(opt)); } static int perf_atq_opt_check(struct evt_options *opt) { return perf_opt_check(opt, atq_nb_event_queues(opt)); } static bool perf_atq_capability_check(struct evt_options *opt) { struct rte_event_dev_info dev_info; rte_event_dev_info_get(opt->dev_id, &dev_info); if (dev_info.max_event_queues < atq_nb_event_queues(opt) || dev_info.max_event_ports < perf_nb_event_ports(opt)) { evt_err("not enough eventdev queues=%d/%d or ports=%d/%d", atq_nb_event_queues(opt), dev_info.max_event_queues, perf_nb_event_ports(opt), dev_info.max_event_ports); } if (!evt_has_all_types_queue(opt->dev_id)) return false; return true; } static const struct evt_test_ops perf_atq = { .cap_check = perf_atq_capability_check, .opt_check = perf_atq_opt_check, .opt_dump = perf_atq_opt_dump, .test_setup = perf_test_setup, .ethdev_setup = perf_ethdev_setup, .mempool_setup = perf_mempool_setup, .eventdev_setup = perf_atq_eventdev_setup, .launch_lcores = perf_atq_launch_lcores, .eventdev_destroy = perf_eventdev_destroy, .mempool_destroy = perf_mempool_destroy, .ethdev_destroy = perf_ethdev_destroy, .test_result = perf_test_result, .test_destroy = perf_test_destroy, }; EVT_TEST_REGISTER(perf_atq);