/* * Run the lthread scheduler * This loop is the heart of the system */ void lthread_run(void) { struct lthread_sched *sched = THIS_SCHED; struct lthread *lt = NULL; RTE_LOG(INFO, LTHREAD, "starting scheduler %p on lcore %u phys core %u\n", sched, rte_lcore_id(), rte_lcore_index(rte_lcore_id())); /* if more than one, wait for all schedulers to start */ _lthread_schedulers_sync_start(); /* * This is the main scheduling loop * So long as there are tasks in existence we run this loop. * We check for:- * expired timers, * the local ready queue, * and the peer ready queue, * * and resume lthreads ad infinitum. */ while (!_lthread_sched_isdone(sched)) { rte_timer_manage(); lt = _lthread_queue_poll(sched->ready); if (lt != NULL) _lthread_resume(lt); lt = _lthread_queue_poll(sched->pready); if (lt != NULL) _lthread_resume(lt); } /* if more than one wait for all schedulers to stop */ _lthread_schedulers_sync_stop(); (THIS_SCHED) = NULL; RTE_LOG(INFO, LTHREAD, "stopping scheduler %p on lcore %u phys core %u\n", sched, rte_lcore_id(), rte_lcore_index(rte_lcore_id())); fflush(stdout); }
/***************************************************************************** * trace_set_core_level() ****************************************************************************/ void trace_set_core_level(const trace_comp_t *tc, int lcore, const char *lvl) { trace_buffer_t *tb; trace_level_t tlevel; if (!tc) return; tb = trace_get_pointer(tc->tc_comp_id, rte_lcore_index(lcore)); if (!tb) return; if (!strncmp(lvl, "CRIT", strlen("CRIT") + 1)) { tlevel = TRACE_LVL_CRIT; } else if (!strncmp(lvl, "ERR", strlen("ERR") + 1)) { tlevel = TRACE_LVL_ERROR; } else if (!strncmp(lvl, "INFO", strlen("INFO") + 1)) { tlevel = TRACE_LVL_INFO; } else if (!strncmp(lvl, "LOG", strlen("LOG") + 1)) { tlevel = TRACE_LVL_LOG; } else if (!strncmp(lvl, "DEBUG", strlen("DEBUG") + 1)) { tlevel = TRACE_LVL_DEBUG; } else { assert(0); return; } TRACE_BUF_SET_LEVEL(tb, tlevel); }
/***************************************************************************** * trace_handle_xchg_ptr() ****************************************************************************/ static int trace_handle_xchg_ptr(uint16_t msgid, uint16_t lcore, void *msg) { trace_xchg_ptr_msg_t *xchg_msg; trace_buffer_t *tb; if (MSG_INVALID(msgid, msg, MSG_TRACE_XCHG_PTR)) return -EINVAL; xchg_msg = (trace_xchg_ptr_msg_t *)msg; tb = trace_get_pointer(xchg_msg->txpm_trace_buf_id, rte_lcore_index(lcore)); if (unlikely(!tb)) return -ENOENT; if (!tb->tb_enabled) return 0; *xchg_msg->txpm_start_pos = tb->tb_start; *xchg_msg->txpm_end_pos = tb->tb_end; if (!tb->tb_filled && tb->tb_start == tb->tb_end) *xchg_msg->txpm_start_id = 0; else trace_entry_hdr_peek_buf(tb->tb_buf, tb->tb_size, tb->tb_start, xchg_msg->txpm_start_id, NULL, NULL); trace_xchg_ptr(tb, xchg_msg->txpm_newbuf, xchg_msg->txpm_oldbuf); *xchg_msg->txpm_bufsize = tb->tb_size; tb->tb_filled = false; tb->tb_start = tb->tb_end = 0; return 0; }
/***************************************************************************** * trace_handle_disable() ****************************************************************************/ static int trace_handle_disable(uint16_t msgid, uint16_t lcore, void *msg) { trace_buffer_t *tb; trace_disable_msg_t *dis_msg; if (MSG_INVALID(msgid, msg, MSG_TRACE_DISABLE)) return -EINVAL; dis_msg = (trace_disable_msg_t *)msg; tb = trace_get_pointer(dis_msg->tdm_trace_buf_id, rte_lcore_index(lcore)); if (unlikely(!tb)) return -ENOENT; if (!tb->tb_enabled) return 0; trace_xchg_ptr(tb, NULL, dis_msg->tdm_oldbuf); /* No need to check that the state has been reset.. */ tb->tb_enabled = false; tb->tb_filled = false; tb->tb_start = tb->tb_end = 0; return 0; }
/***************************************************************************** * trace_handle_enable() ****************************************************************************/ static int trace_handle_enable(uint16_t msgid, uint16_t lcore, void *msg) { trace_buffer_t *tb; trace_enable_msg_t *ena_msg; if (MSG_INVALID(msgid, msg, MSG_TRACE_ENABLE)) return -EINVAL; ena_msg = (trace_enable_msg_t *)msg; tb = trace_get_pointer(ena_msg->tem_trace_buf_id, rte_lcore_index(lcore)); if (unlikely(!tb)) return -ENOENT; if (tb->tb_enabled) return 0; trace_xchg_ptr(tb, ena_msg->tem_newbuf, NULL); /* No need to check that the state has been reset.. */ tb->tb_enabled = true; tb->tb_filled = false; return 0; }
/***************************************************************************** * trace_get_core_enabled() ****************************************************************************/ bool trace_get_core_enabled(const trace_comp_t *tc, int lcore) { trace_buffer_t *tb; if (!tc) return 0; tb = trace_get_pointer(tc->tc_comp_id, rte_lcore_index(lcore)); if (!tb) return 0; return tb->tb_enabled; }
/***************************************************************************** * trace_get_core_bufsize() ****************************************************************************/ uint32_t trace_get_core_bufsize(const trace_comp_t *tc, int lcore) { trace_buffer_t *tb; if (!tc) return 0; tb = trace_get_pointer(tc->tc_comp_id, rte_lcore_index(lcore)); if (!tb) return 0; return tb->tb_size; }
/***************************************************************************** * trace_get_core_level() ****************************************************************************/ const char *trace_get_core_level(const trace_comp_t *tc, int lcore) { trace_buffer_t *tb; if (!tc) return "Unknown"; tb = trace_get_pointer(tc->tc_comp_id, rte_lcore_index(lcore)); if (!tb) return "Unknown"; return trace_level_str(tb->tb_lvl); }
/***************************************************************************** * start_cores() ****************************************************************************/ static void start_cores(void) { uint32_t core; /* * Fire up the packet processing cores */ RTE_LCORE_FOREACH_SLAVE(core) { int index = rte_lcore_index(core); switch (index) { case TPG_CORE_IDX_CLI: assert(false); break; case TPG_CORE_IDX_TEST_MGMT: rte_eal_remote_launch(test_mgmt_loop, NULL, core); break; default: assert(index >= TPG_NR_OF_NON_PACKET_PROCESSING_CORES); rte_eal_remote_launch(pkt_receive_loop, NULL, core); } } /* * Wait for packet cores to finish initialization. */ RTE_LCORE_FOREACH_SLAVE(core) { int error; msg_t msg; if (!cfg_is_pkt_core(core)) continue; msg_init(&msg, MSG_PKTLOOP_INIT_WAIT, core, 0); /* BLOCK waiting for msg to be processed */ error = msg_send(&msg, 0); if (error) TPG_ERROR_ABORT("ERROR: Failed to send pktloop init wait msg: %s(%d)!\n", rte_strerror(-error), -error); } }
/***************************************************************************** * tlkp_udp_lcore_init() ****************************************************************************/ void tlkp_udp_lcore_init(uint32_t lcore_id) { unsigned int i; RTE_PER_LCORE(tlkp_ucb_hash_table) = rte_zmalloc_socket("udp_hash_table", rte_eth_dev_count() * TPG_HASH_BUCKET_SIZE * sizeof(tlkp_hash_bucket_t), RTE_CACHE_LINE_SIZE, rte_lcore_to_socket_id(lcore_id)); if (RTE_PER_LCORE(tlkp_ucb_hash_table) == NULL) { TPG_ERROR_ABORT("[%d]: Failed to allocate per lcore udp htable!\n", rte_lcore_index(lcore_id)); } for (i = 0; i < (rte_eth_dev_count() * TPG_HASH_BUCKET_SIZE); i++) { /* * Initialize all list headers. */ LIST_INIT((&RTE_PER_LCORE(tlkp_ucb_hash_table)[i])); } }
int32_t populateNodeInfo (void) { int32_t i = 0, socketId = -1, lcoreIndex = 0, enable = 0; uint8_t coreCount, portCount; struct rte_eth_dev_info devInfo; /* fetch total lcore count under DPDK */ coreCount = rte_lcore_count(); for (i = 0; i < coreCount; i++) { socketId = rte_lcore_to_socket_id(i); lcoreIndex = rte_lcore_index(i); enable = rte_lcore_is_enabled(i); //printf ("\n Logical %d Physical %d Socket %d Enabled %d \n", // i, lcoreIndex, socketId, enable); if (likely(enable)) { /* classify the lcore info per NUMA node */ numaNodeInfo[socketId].lcoreAvail = numaNodeInfo[socketId].lcoreAvail | (1 << lcoreIndex); numaNodeInfo[socketId].lcoreTotal += 1; } else { rte_panic("ERROR: Lcore %d Socket %d not enabled\n", lcoreIndex, socketId); exit(EXIT_FAILURE); } } /* Create mempool per numa node based on interface available */ portCount = rte_eth_dev_count(); for (i =0; i < portCount; i++) { rte_eth_dev_info_get(i, &devInfo); printf("\n Inteface %d", i); printf("\n - driver: %s", devInfo.driver_name); printf("\n - if_index: %d", devInfo.if_index); if (devInfo.pci_dev) { printf("\n - PCI INFO "); printf("\n -- ADDR - domain:bus:devid:function %x:%x:%x:%x", devInfo.pci_dev->addr.domain, devInfo.pci_dev->addr.bus, devInfo.pci_dev->addr.devid, devInfo.pci_dev->addr.function); printf("\n == PCI ID - vendor:device:sub-vendor:sub-device %x:%x:%x:%x", devInfo.pci_dev->id.vendor_id, devInfo.pci_dev->id.device_id, devInfo.pci_dev->id.subsystem_vendor_id, devInfo.pci_dev->id.subsystem_device_id); printf("\n -- numa node: %d", devInfo.pci_dev->numa_node); } socketId = (devInfo.pci_dev->numa_node == -1)?0:devInfo.pci_dev->numa_node; numaNodeInfo[socketId].intfAvail = numaNodeInfo[socketId].intfAvail | (1 << i); numaNodeInfo[socketId].intfTotal += 1; } /* allocate mempool for numa which has NIC interfaces */ for (i = 0; i < MAX_NUMANODE; i++) { if (likely(numaNodeInfo[i].intfAvail)) { /* ToDo: per interface */ uint8_t portIndex = 0; char mempoolName[25]; /* create mempool for TX */ sprintf(mempoolName, "mbuf_pool-%d-%d-tx", i, portIndex); numaNodeInfo[i].tx[portIndex] = rte_mempool_create( mempoolName, NB_MBUF, MBUF_SIZE, 64, sizeof(struct rte_pktmbuf_pool_private), rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, NULL, i,/*SOCKET_ID_ANY*/ 0/*MEMPOOL_F_SP_PUT*/); if (unlikely(numaNodeInfo[i].tx[portIndex] == NULL)) { rte_panic("\n ERROR: failed to get mem-pool for tx on node %d intf %d\n", i, portIndex); exit(EXIT_FAILURE); } /* create mempool for RX */ sprintf(mempoolName, "mbuf_pool-%d-%d-rx", i, portIndex); numaNodeInfo[i].rx[portIndex] = rte_mempool_create( mempoolName, NB_MBUF, MBUF_SIZE, 64, sizeof(struct rte_pktmbuf_pool_private), rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, NULL, i,/*SOCKET_ID_ANY*/ 0/*MEMPOOL_F_SP_PUT*/); if (unlikely(numaNodeInfo[i].rx[portIndex] == NULL)) { rte_panic("\n ERROR: failed to get mem-pool for rx on node %d intf %d\n", i, portIndex); exit(EXIT_FAILURE); } } } return 0; }