static int dpdk_main(int port_id, int argc, char* argv[]) { struct rte_eth_dev_info dev_info; unsigned nb_queues; FILE* lfile; uint8_t core_id; int ret; printf("In dpdk_main\n"); // Open the log file lfile = fopen("./vrouter.log", "w"); // Program the rte log rte_openlog_stream(lfile); ret = rte_eal_init(argc, argv); if (ret < 0) { log_crit( "Invalid EAL parameters\n"); return -1; } log_info( "Programming cmd rings now!\n"); rx_event_fd = (int *) malloc(sizeof(int *) * rte_lcore_count()); if (!rx_event_fd) { log_crit("Failed to allocate memory for rx event fd arrays\n"); return -ENOMEM; } rte_eth_macaddr_get(port_id, &port_eth_addr); log_info("Port%d: MAC Address: ", port_id); print_ethaddr(&port_eth_addr); /* Determine the number of RX/TX pairs supported by NIC */ rte_eth_dev_info_get(port_id, &dev_info); dev_info.pci_dev->intr_handle.type = RTE_INTR_HANDLE_VFIO_MSIX; dev_info.pci_dev->intr_handle.max_intr = dev_info.max_rx_queues + dev_info.max_tx_queues; ret = rte_intr_efd_enable(&dev_info.pci_dev->intr_handle, dev_info.max_rx_queues); if (ret < 0) { rte_exit(EXIT_FAILURE, "Failed to enable rx interrupts\n"); } ret = rte_intr_enable(&dev_info.pci_dev->intr_handle); if (ret < 0) { rte_exit(EXIT_FAILURE, "Failed to enable interrupts\n"); } ret = rte_eth_dev_configure(port_id, dev_info.max_rx_queues, dev_info.max_tx_queues, &port_conf); if (ret < 0) { rte_exit(EXIT_FAILURE, "Failed to configure ethernet device\n"); } /* For each RX/TX pair */ nb_queues = dev_info.max_tx_queues; for (core_id = 0; core_id < nb_queues; core_id++) { char s[64]; if (rte_lcore_is_enabled(core_id) == 0) continue; /* NUMA socket number */ unsigned socketid = rte_lcore_to_socket_id(core_id); if (socketid >= NB_SOCKETS) { log_crit( "Socket %d of lcore %u is out of range %d\n", socketid, core_id, NB_SOCKETS); return -EBADF; } /* Create memory pool */ if (pktmbuf_pool[socketid] == NULL) { log_info("Creating mempool on %d of ~%lx bytes\n", socketid, NB_MBUF * MBUF_SIZE); printf("Creating mempool on %d of ~%lx bytes\n", socketid, NB_MBUF * MBUF_SIZE); snprintf(s, sizeof(s), "mbuf_pool_%d", socketid); pktmbuf_pool[socketid] = rte_mempool_create(s, NB_MBUF, MBUF_SIZE, MEMPOOL_CACHE_SIZE, PKTMBUF_PRIV_SZ, rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, NULL, socketid, 0); if (!pktmbuf_pool[socketid]) { log_crit( "Cannot init mbuf pool on socket %d\n", socketid); return -ENOMEM; } } /* Setup the TX queue */ ret = rte_eth_tx_queue_setup(port_id, core_id, RTE_TX_DESC_DEFAULT, socketid, &tx_conf); if (ret < 0) { log_crit( "Cannot initialize TX queue (%d)\n", core_id); return -ENODEV; } /* Setup the RX queue */ ret = rte_eth_rx_queue_setup(port_id, core_id, RTE_RX_DESC_DEFAULT, socketid, &rx_conf, pktmbuf_pool[socketid]); if (ret < 0) { log_crit( "Cannot initialize RX queue (%d)\n", core_id); return -ENODEV; } /* Create the event fds for event notification */ lcore_cmd_event_fd[core_id] = eventfd(0, 0); } // Start the eth device ret = rte_eth_dev_start(port_id); if (ret < 0) { log_crit( "rte_eth_dev_start: err=%d, port=%d\n", ret, core_id); return -ENODEV; } // Put the device in promiscuous mode rte_eth_promiscuous_enable(port_id); // Wait for link up //check_all_ports_link_status(1, 1u << port_id); log_info( "Starting engines on every core\n"); rte_eal_mp_remote_launch(engine_loop, &dev_info, CALL_MASTER); return 0; }
static int avf_config_rx_queues_irqs(struct rte_eth_dev *dev, struct rte_intr_handle *intr_handle) { struct avf_adapter *adapter = AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter); struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(adapter); uint16_t interval, i; int vec; if (rte_intr_cap_multiple(intr_handle) && dev->data->dev_conf.intr_conf.rxq) { if (rte_intr_efd_enable(intr_handle, dev->data->nb_rx_queues)) return -1; } if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { intr_handle->intr_vec = rte_zmalloc("intr_vec", dev->data->nb_rx_queues * sizeof(int), 0); if (!intr_handle->intr_vec) { PMD_DRV_LOG(ERR, "Failed to allocate %d rx intr_vec", dev->data->nb_rx_queues); return -1; } } if (!dev->data->dev_conf.intr_conf.rxq || !rte_intr_dp_is_en(intr_handle)) { /* Rx interrupt disabled, Map interrupt only for writeback */ vf->nb_msix = 1; if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) { /* If WB_ON_ITR supports, enable it */ vf->msix_base = AVF_RX_VEC_START; AVF_WRITE_REG(hw, AVFINT_DYN_CTLN1(vf->msix_base - 1), AVFINT_DYN_CTLN1_ITR_INDX_MASK | AVFINT_DYN_CTLN1_WB_ON_ITR_MASK); } else { /* If no WB_ON_ITR offload flags, need to set * interrupt for descriptor write back. */ vf->msix_base = AVF_MISC_VEC_ID; /* set ITR to max */ interval = avf_calc_itr_interval( AVF_QUEUE_ITR_INTERVAL_MAX); AVF_WRITE_REG(hw, AVFINT_DYN_CTL01, AVFINT_DYN_CTL01_INTENA_MASK | (AVF_ITR_INDEX_DEFAULT << AVFINT_DYN_CTL01_ITR_INDX_SHIFT) | (interval << AVFINT_DYN_CTL01_INTERVAL_SHIFT)); } AVF_WRITE_FLUSH(hw); /* map all queues to the same interrupt */ for (i = 0; i < dev->data->nb_rx_queues; i++) vf->rxq_map[0] |= 1 << i; } else { if (!rte_intr_allow_others(intr_handle)) { vf->nb_msix = 1; vf->msix_base = AVF_MISC_VEC_ID; for (i = 0; i < dev->data->nb_rx_queues; i++) { vf->rxq_map[0] |= 1 << i; intr_handle->intr_vec[i] = AVF_MISC_VEC_ID; } PMD_DRV_LOG(DEBUG, "vector 0 are mapping to all Rx queues"); } else { /* If Rx interrupt is reuquired, and we can use * multi interrupts, then the vec is from 1 */ vf->nb_msix = RTE_MIN(vf->vf_res->max_vectors, intr_handle->nb_efd); vf->msix_base = AVF_RX_VEC_START; vec = AVF_RX_VEC_START; for (i = 0; i < dev->data->nb_rx_queues; i++) { vf->rxq_map[vec] |= 1 << i; intr_handle->intr_vec[i] = vec++; if (vec >= vf->nb_msix) vec = AVF_RX_VEC_START; } PMD_DRV_LOG(DEBUG, "%u vectors are mapping to %u Rx queues", vf->nb_msix, dev->data->nb_rx_queues); } } if (avf_config_irq_map(adapter)) { PMD_DRV_LOG(ERR, "config interrupt mapping failed"); return -1; } return 0; }