int scheduler_suite_init(void) { odp_cpumask_t mask; odp_shm_t shm; odp_pool_t pool; test_globals_t *globals; thread_args_t *args; odp_pool_param_t params; odp_pool_param_init(¶ms); params.buf.size = BUF_SIZE; params.buf.align = 0; params.buf.num = MSG_POOL_SIZE; params.type = ODP_POOL_BUFFER; pool = odp_pool_create(MSG_POOL_NAME, ¶ms); if (pool == ODP_POOL_INVALID) { printf("Pool creation failed (msg).\n"); return -1; } shm = odp_shm_reserve(GLOBALS_SHM_NAME, sizeof(test_globals_t), ODP_CACHE_LINE_SIZE, 0); globals = odp_shm_addr(shm); if (!globals) { printf("Shared memory reserve failed (globals).\n"); return -1; } memset(globals, 0, sizeof(test_globals_t)); globals->num_workers = odp_cpumask_default_worker(&mask, 0); if (globals->num_workers > MAX_WORKERS) globals->num_workers = MAX_WORKERS; shm = odp_shm_reserve(SHM_THR_ARGS_NAME, sizeof(thread_args_t), ODP_CACHE_LINE_SIZE, 0); args = odp_shm_addr(shm); if (!args) { printf("Shared memory reserve failed (args).\n"); return -1; } memset(args, 0, sizeof(thread_args_t)); /* Barrier to sync test case execution */ odp_barrier_init(&globals->barrier, globals->num_workers); odp_ticketlock_init(&globals->lock); odp_spinlock_init(&globals->atomic_lock); if (create_queues() != 0) return -1; return 0; }
int main(int argc, char **argv) { int ret; odp_shm_t shm; int max_thrs; if (odp_init_global(NULL, NULL) != 0) LOG_ABORT("Failed global init.\n"); if (odp_init_local(ODP_THREAD_CONTROL) != 0) LOG_ABORT("Failed local init.\n"); shm = odp_shm_reserve("test_globals", sizeof(test_globals_t), ODP_CACHE_LINE_SIZE, 0); gbl_args = odp_shm_addr(shm); if (gbl_args == NULL) LOG_ABORT("Shared memory reserve failed.\n"); memset(gbl_args, 0, sizeof(test_globals_t)); max_thrs = odp_thread_count_max(); gbl_args->rx_stats_size = max_thrs * sizeof(pkt_rx_stats_t); gbl_args->tx_stats_size = max_thrs * sizeof(pkt_tx_stats_t); shm = odp_shm_reserve("test_globals.rx_stats", gbl_args->rx_stats_size, ODP_CACHE_LINE_SIZE, 0); gbl_args->rx_stats = odp_shm_addr(shm); if (gbl_args->rx_stats == NULL) LOG_ABORT("Shared memory reserve failed.\n"); memset(gbl_args->rx_stats, 0, gbl_args->rx_stats_size); shm = odp_shm_reserve("test_globals.tx_stats", gbl_args->tx_stats_size, ODP_CACHE_LINE_SIZE, 0); gbl_args->tx_stats = odp_shm_addr(shm); if (gbl_args->tx_stats == NULL) LOG_ABORT("Shared memory reserve failed.\n"); memset(gbl_args->tx_stats, 0, gbl_args->tx_stats_size); parse_args(argc, argv, &gbl_args->args); ret = test_init(); if (ret == 0) { ret = run_test(); test_term(); } return ret; }
int main(int argc, char **argv) { int ret; odp_shm_t shm; if (odp_init_global(NULL, NULL) != 0) LOG_ABORT("Failed global init.\n"); if (odp_init_local() != 0) LOG_ABORT("Failed local init.\n"); shm = odp_shm_reserve("test_globals", sizeof(test_globals_t), ODP_CACHE_LINE_SIZE, 0); gbl_args = odp_shm_addr(shm); if (gbl_args == NULL) LOG_ABORT("Shared memory reserve failed.\n"); memset(gbl_args, 0, sizeof(test_globals_t)); parse_args(argc, argv, &gbl_args->args); ret = test_init(); if (ret == 0) { ret = run_test(); test_term(); } return ret; }
int odp_pktio_init_global(void) { pktio_entry_t *pktio_entry; int id, i; int dev_num = sizeof(pktio_devs)/sizeof(pktio_devs[0]); pktio_tbl = odp_shm_reserve("odp_pktio_entries", sizeof(pktio_table_t), sizeof(pktio_entry_t)); if (pktio_tbl == NULL) return -1; memset(pktio_tbl, 0, sizeof(pktio_table_t)); for (id = 1; id <= ODP_CONFIG_PKTIO_ENTRIES; ++id) { pktio_entry = get_entry(id); odp_spinlock_init(&pktio_entry->s.lock); } /* Close all used RX channels */ for (i = 0; i < dev_num; i++) ti_em_osal_cppi_rx_channel_close(Cppi_CpDma_PASS_CPDMA, pktio_devs[i].rx_channel); return 0; }
int odp_queue_init_global(void) { uint32_t i; odp_shm_t shm; ODP_DBG("Queue init ... "); shm = odp_shm_reserve("odp_queues", sizeof(queue_table_t), sizeof(queue_entry_t), 0); queue_tbl = odp_shm_addr(shm); if (queue_tbl == NULL) return -1; memset(queue_tbl, 0, sizeof(queue_table_t)); for (i = 0; i < ODP_CONFIG_QUEUES; i++) { /* init locks */ queue_entry_t *queue = get_qentry(i); LOCK_INIT(queue); queue->s.handle = queue_from_id(i); } ODP_DBG("done\n"); ODP_DBG("Queue init global\n"); ODP_DBG(" struct queue_entry_s size %zu\n", sizeof(struct queue_entry_s)); ODP_DBG(" queue_entry_t size %zu\n", sizeof(queue_entry_t)); ODP_DBG("\n"); __k1_wmb(); return 0; }
odph_table_t odph_linear_table_create(const char *name, uint32_t capacity, uint32_t ODP_IGNORED, uint32_t value_size) { int idx; uint32_t node_num; odp_shm_t shmem; odph_linear_table_imp *tbl; if (strlen(name) >= ODPH_TABLE_NAME_LEN || capacity < 1 || capacity >= 0x1000 || value_size == 0) { printf("create para input error or less than !"); return NULL; } /* check name confict in shm*/ tbl = (odph_linear_table_imp *)odp_shm_addr(odp_shm_lookup(name)); if (tbl != NULL) { ODPH_DBG("name already exist\n"); return NULL; } /* alloc memory from shm */ shmem = odp_shm_reserve(name, capacity << 20, 64, ODP_SHM_SW_ONLY); if (shmem == ODP_SHM_INVALID) { ODPH_DBG("shm reserve fail\n"); return NULL; } tbl = (odph_linear_table_imp *)odp_shm_addr(shmem); /* clean this block of memory */ memset(tbl, 0, capacity << 20); tbl->init_cap = capacity < 20; strncpy(tbl->name, name, ODPH_TABLE_NAME_LEN - 1); /* for linear table, the key is just the index, without confict * so we just need to record the value content * there is a rwlock in the head of every node */ tbl->value_size = value_size + sizeof(odp_rwlock_t); node_num = tbl->init_cap / tbl->value_size; tbl->node_sum = node_num; tbl->value_array = (void *)((char *)tbl + sizeof(odph_linear_table_imp)); /* initialize rwlock*/ for (idx = 0; idx < tbl->node_sum; idx++) { odp_rwlock_t *lock = (odp_rwlock_t *)((char *)tbl->value_array + idx * tbl->value_size); odp_rwlock_init(lock); } tbl->magicword = ODPH_LINEAR_TABLE_MAGIC_WORD; return (odph_table_t)(tbl); }
int odp_pktio_init_global(void) { char name[ODP_QUEUE_NAME_LEN]; pktio_entry_t *pktio_entry; queue_entry_t *queue_entry; odp_queue_t qid; int id; odp_shm_t shm; int pktio_if; shm = odp_shm_reserve("odp_pktio_entries", sizeof(pktio_table_t), sizeof(pktio_entry_t), 0); pktio_tbl = odp_shm_addr(shm); if (pktio_tbl == NULL) return -1; memset(pktio_tbl, 0, sizeof(pktio_table_t)); odp_spinlock_init(&pktio_tbl->lock); for (id = 1; id <= ODP_CONFIG_PKTIO_ENTRIES; ++id) { pktio_entry = &pktio_tbl->entries[id - 1]; odp_spinlock_init(&pktio_entry->s.lock); odp_spinlock_init(&pktio_entry->s.cls.lock); odp_spinlock_init(&pktio_entry->s.cls.l2_cos_table.lock); odp_spinlock_init(&pktio_entry->s.cls.l3_cos_table.lock); pktio_entry_ptr[id - 1] = pktio_entry; /* Create a default output queue for each pktio resource */ snprintf(name, sizeof(name), "%i-pktio_outq_default", (int)id); name[ODP_QUEUE_NAME_LEN - 1] = '\0'; qid = odp_queue_create(name, ODP_QUEUE_TYPE_PKTOUT, NULL); if (qid == ODP_QUEUE_INVALID) return -1; pktio_entry->s.outq_default = qid; queue_entry = queue_to_qentry(qid); queue_entry->s.pktout = _odp_cast_scalar(odp_pktio_t, id); } for (pktio_if = 0; pktio_if_ops[pktio_if]; ++pktio_if) if (pktio_if_ops[pktio_if]->init) if (pktio_if_ops[pktio_if]->init()) ODP_ERR("failed to initialized pktio type %d", pktio_if); return 0; }
int odp_pool_init_global(void) { uint32_t i; odp_shm_t shm; shm = odp_shm_reserve(SHM_DEFAULT_NAME, sizeof(pool_table_t), sizeof(pool_entry_t), 0); pool_tbl = odp_shm_addr(shm); if (pool_tbl == NULL) return -1; memset(pool_tbl, 0, sizeof(pool_table_t)); for (i = 0; i < ODP_CONFIG_POOLS; i++) { /* init locks */ pool_entry_t *pool = &pool_tbl->pool[i]; POOL_LOCK_INIT(&pool->s.lock); POOL_LOCK_INIT(&pool->s.buf_lock); POOL_LOCK_INIT(&pool->s.blk_lock); pool->s.pool_hdl = pool_index_to_handle(i); pool->s.pool_id = i; pool_entry_ptr[i] = pool; odp_atomic_init_u32(&pool->s.bufcount, 0); odp_atomic_init_u32(&pool->s.blkcount, 0); /* Initialize pool statistics counters */ odp_atomic_init_u64(&pool->s.poolstats.bufallocs, 0); odp_atomic_init_u64(&pool->s.poolstats.buffrees, 0); odp_atomic_init_u64(&pool->s.poolstats.blkallocs, 0); odp_atomic_init_u64(&pool->s.poolstats.blkfrees, 0); odp_atomic_init_u64(&pool->s.poolstats.bufempty, 0); odp_atomic_init_u64(&pool->s.poolstats.blkempty, 0); odp_atomic_init_u64(&pool->s.poolstats.high_wm_count, 0); odp_atomic_init_u64(&pool->s.poolstats.low_wm_count, 0); } ODP_DBG("\nPool init global\n"); ODP_DBG(" pool_entry_s size %zu\n", sizeof(struct pool_entry_s)); ODP_DBG(" pool_entry_t size %zu\n", sizeof(pool_entry_t)); ODP_DBG(" odp_buffer_hdr_t size %zu\n", sizeof(odp_buffer_hdr_t)); ODP_DBG("\n"); return 0; }
void init_stream_db(void) { odp_shm_t shm; shm = odp_shm_reserve("stream_db", sizeof(stream_db_t), ODP_CACHE_LINE_SIZE, 0); stream_db = odp_shm_addr(shm); if (stream_db == NULL) { EXAMPLE_ERR("Error: shared mem alloc failed.\n"); exit(EXIT_FAILURE); } memset(stream_db, 0, sizeof(*stream_db)); }
void init_ipsec_cache(void) { odp_shm_t shm; shm = odp_shm_reserve("shm_ipsec_cache", sizeof(ipsec_cache_t), ODP_CACHE_LINE_SIZE, 0); ipsec_cache = odp_shm_addr(shm); if (ipsec_cache == NULL) { EXAMPLE_ERR("Error: shared mem alloc failed.\n"); exit(EXIT_FAILURE); } memset(ipsec_cache, 0, sizeof(*ipsec_cache)); }
void *ofp_shared_memory_alloc(const char *name, uint64_t size) { odp_shm_t shm_h; void *shm; shm_h = odp_shm_reserve(name, size, ODP_CACHE_LINE_SIZE, 0); if (shm_h == ODP_SHM_INVALID) return NULL; shm = odp_shm_addr(shm_h); if (shm == NULL) { odp_shm_free(shm_h); return NULL; } return shm; }
static void *allocate_shared_memory(const char *name, uint64_t size) { odp_shm_t shm_h; void *shm; shm_h = odp_shm_reserve(name, size, ODP_CACHE_LINE_SIZE, OFP_SHM_SINGLE_VA); if (shm_h == ODP_SHM_INVALID) return NULL; shm = odp_shm_addr(shm_h); if (shm == NULL) { odp_shm_free(shm_h); return NULL; } return shm; }
int odp_thread_init_global(void) { odp_shm_t shm; shm = odp_shm_reserve("odp_thread_globals", sizeof(thread_globals_t), ODP_CACHE_LINE_SIZE, 0); thread_globals = odp_shm_addr(shm); if (thread_globals == NULL) return -1; memset(thread_globals, 0, sizeof(thread_globals_t)); odp_spinlock_init(&thread_globals->lock); odp_thrmask_zero(&thread_globals->all); odp_thrmask_zero(&thread_globals->worker); odp_thrmask_zero(&thread_globals->control); return 0; }
int odp_pktio_init_global(void) { char name[ODP_QUEUE_NAME_LEN]; pktio_entry_t *pktio_entry; queue_entry_t *queue_entry; odp_queue_t qid; int id; pktio_tbl = odp_shm_reserve("odp_pktio_entries", sizeof(pktio_table_t), sizeof(pktio_entry_t)); if (pktio_tbl == NULL) return -1; memset(pktio_tbl, 0, sizeof(pktio_table_t)); for (id = 1; id <= ODP_CONFIG_PKTIO_ENTRIES; ++id) { pktio_entry = get_entry(id); odp_spinlock_init(&pktio_entry->s.lock); /* Create a default output queue for each pktio resource */ snprintf(name, sizeof(name), "%i-pktio_outq_default", (int)id); name[ODP_QUEUE_NAME_LEN-1] = '\0'; qid = odp_queue_create(name, ODP_QUEUE_TYPE_PKTOUT, NULL); if (qid == ODP_QUEUE_INVALID) return -1; pktio_entry->s.outq_default = qid; queue_entry = queue_to_qentry(qid); queue_entry->s.pktout = id; } return 0; }
int odp_schedule_init_global(void) { odp_shm_t shm; odp_pool_t pool; int i, j; odp_pool_param_t params; shm = odp_shm_reserve("odp_scheduler", sizeof(sched_t), ODP_CACHE_LINE_SIZE, 0); sched = odp_shm_addr(shm); if (sched == NULL) { ODP_ERR("Schedule init: Shm reserve failed.\n"); return -1; } memset(sched, 0, sizeof(sched_t)); params.buf.size = sizeof(sched_cmd_t); params.buf.align = 0; params.buf.num = NUM_SCHED_CMD; params.type = ODP_POOL_BUFFER; pool = odp_pool_create("odp_sched_pool", ¶ms); if (pool == ODP_POOL_INVALID) { ODP_ERR("Schedule init: Pool create failed.\n"); return -1; } sched->pool = pool; sched->shm = shm; odp_spinlock_init(&sched->mask_lock); for (i = 0; i < ODP_CONFIG_SCHED_PRIOS; i++) { odp_queue_t queue; char name[] = "odp_priXX_YY"; name[7] = '0' + i / 10; name[8] = '0' + i - 10*(i / 10); for (j = 0; j < QUEUES_PER_PRIO; j++) { name[10] = '0' + j / 10; name[11] = '0' + j - 10*(j / 10); queue = odp_queue_create(name, ODP_QUEUE_TYPE_POLL, NULL); if (queue == ODP_QUEUE_INVALID) { ODP_ERR("Sched init: Queue create failed.\n"); return -1; } sched->pri_queue[i][j] = queue; sched->pri_mask[i] = 0; } } return 0; }
odp_pool_t odp_pool_create(const char *name, odp_pool_param_t *params) { odp_pool_t pool_hdl = ODP_POOL_INVALID; pool_entry_t *pool; uint32_t i, headroom = 0, tailroom = 0; odp_shm_t shm; if (params == NULL) return ODP_POOL_INVALID; /* Default size and align for timeouts */ if (params->type == ODP_POOL_TIMEOUT) { params->buf.size = 0; /* tmo.__res1 */ params->buf.align = 0; /* tmo.__res2 */ } /* Default initialization parameters */ uint32_t p_udata_size = 0; uint32_t udata_stride = 0; /* Restriction for v1.0: All non-packet buffers are unsegmented */ int unseg = 1; /* Restriction for v1.0: No zeroization support */ const int zeroized = 0; uint32_t blk_size, buf_stride, buf_num, seg_len = 0; uint32_t buf_align = params->type == ODP_POOL_BUFFER ? params->buf.align : 0; /* Validate requested buffer alignment */ if (buf_align > ODP_CONFIG_BUFFER_ALIGN_MAX || buf_align != ODP_ALIGN_ROUNDDOWN_POWER_2(buf_align, buf_align)) return ODP_POOL_INVALID; /* Set correct alignment based on input request */ if (buf_align == 0) buf_align = ODP_CACHE_LINE_SIZE; else if (buf_align < ODP_CONFIG_BUFFER_ALIGN_MIN) buf_align = ODP_CONFIG_BUFFER_ALIGN_MIN; /* Calculate space needed for buffer blocks and metadata */ switch (params->type) { case ODP_POOL_BUFFER: buf_num = params->buf.num; blk_size = params->buf.size; /* Optimize small raw buffers */ if (blk_size > ODP_MAX_INLINE_BUF || params->buf.align != 0) blk_size = ODP_ALIGN_ROUNDUP(blk_size, buf_align); buf_stride = sizeof(odp_buffer_hdr_stride); break; case ODP_POOL_PACKET: unseg = 0; /* Packets are always segmented */ headroom = ODP_CONFIG_PACKET_HEADROOM; tailroom = ODP_CONFIG_PACKET_TAILROOM; buf_num = params->pkt.num + 1; /* more one for pkt_ctx */ seg_len = params->pkt.seg_len <= ODP_CONFIG_PACKET_SEG_LEN_MIN ? ODP_CONFIG_PACKET_SEG_LEN_MIN : (params->pkt.seg_len <= ODP_CONFIG_PACKET_SEG_LEN_MAX ? params->pkt.seg_len : ODP_CONFIG_PACKET_SEG_LEN_MAX); seg_len = ODP_ALIGN_ROUNDUP( headroom + seg_len + tailroom, ODP_CONFIG_BUFFER_ALIGN_MIN); blk_size = params->pkt.len <= seg_len ? seg_len : ODP_ALIGN_ROUNDUP(params->pkt.len, seg_len); /* Reject create if pkt.len needs too many segments */ if (blk_size / seg_len > ODP_BUFFER_MAX_SEG) return ODP_POOL_INVALID; p_udata_size = params->pkt.uarea_size; udata_stride = ODP_ALIGN_ROUNDUP(p_udata_size, sizeof(uint64_t)); buf_stride = sizeof(odp_packet_hdr_stride); break; case ODP_POOL_TIMEOUT: blk_size = 0; buf_num = params->tmo.num; buf_stride = sizeof(odp_timeout_hdr_stride); break; default: return ODP_POOL_INVALID; } /* Validate requested number of buffers against addressable limits */ if (buf_num > (ODP_BUFFER_MAX_BUFFERS / (buf_stride / ODP_CACHE_LINE_SIZE))) return ODP_POOL_INVALID; /* Find an unused buffer pool slot and iniitalize it as requested */ for (i = 0; i < ODP_CONFIG_POOLS; i++) { pool = get_pool_entry(i); POOL_LOCK(&pool->s.lock); if (pool->s.pool_shm != ODP_SHM_INVALID) { POOL_UNLOCK(&pool->s.lock); continue; } /* found free pool */ size_t block_size, pad_size, mdata_size, udata_size; pool->s.flags.all = 0; if (name == NULL) { pool->s.name[0] = 0; } else { strncpy(pool->s.name, name, ODP_POOL_NAME_LEN - 1); pool->s.name[ODP_POOL_NAME_LEN - 1] = 0; pool->s.flags.has_name = 1; } pool->s.params = *params; pool->s.buf_align = buf_align; /* Optimize for short buffers: Data stored in buffer hdr */ if (blk_size <= ODP_MAX_INLINE_BUF) { block_size = 0; pool->s.buf_align = blk_size == 0 ? 0 : sizeof(void *); } else { /* more 64bytes for storing hdr address*/ block_size = buf_num * (blk_size + ODP_HDR_BACK_PTR_SIZE); pool->s.buf_align = buf_align; } pad_size = ODP_CACHE_LINE_SIZE_ROUNDUP(block_size) - block_size; mdata_size = buf_num * buf_stride; udata_size = buf_num * udata_stride; pool->s.buf_num = buf_num; pool->s.pool_size = ODP_PAGE_SIZE_ROUNDUP(block_size + pad_size + mdata_size + udata_size); shm = odp_shm_reserve(pool->s.name, pool->s.pool_size, ODP_PAGE_SIZE, ODP_SHM_MONOPOLIZE_CNTNUS_PHY); if (shm == ODP_SHM_INVALID) { POOL_UNLOCK(&pool->s.lock); return ODP_POOL_INVALID; } pool->s.pool_base_addr = odp_shm_addr(shm); pool->s.pool_shm = shm; /* Now safe to unlock since pool entry has been allocated */ POOL_UNLOCK(&pool->s.lock); pool->s.flags.unsegmented = unseg; pool->s.flags.zeroized = zeroized; pool->s.seg_size = unseg ? blk_size : seg_len; pool->s.blk_size = blk_size; uint8_t *block_base_addr = pool->s.pool_base_addr; uint8_t *mdata_base_addr = block_base_addr + block_size + pad_size; uint8_t *udata_base_addr = mdata_base_addr + mdata_size; uint64_t pool_base_phy = odp_v2p(pool->s.pool_base_addr); pool->s.v_p_offset = (uint64_t)pool->s.pool_base_addr - pool_base_phy; /* Pool mdata addr is used for indexing buffer metadata */ pool->s.pool_mdata_addr = mdata_base_addr; pool->s.udata_size = p_udata_size; pool->s.buf_stride = buf_stride; pool->s.buf_freelist = NULL; pool->s.blk_freelist = NULL; /* Initialization will increment these to their target vals */ odp_atomic_store_u32(&pool->s.bufcount, 0); odp_atomic_store_u32(&pool->s.blkcount, 0); uint8_t *buf = udata_base_addr - buf_stride; uint8_t *udat = udata_stride == 0 ? NULL : udata_base_addr + udata_size - udata_stride; /* Init buffer common header and add to pool buffer freelist */ do { odp_buffer_hdr_t *tmp = (odp_buffer_hdr_t *)(void *)buf; /* Iniitalize buffer metadata */ tmp->allocator = ODP_FREEBUF; tmp->flags.all = 0; tmp->flags.zeroized = zeroized; tmp->size = 0; odp_atomic_init_u32(&tmp->ref_count, 0); tmp->type = params->type; tmp->event_type = params->type; tmp->pool_hdl = pool->s.pool_hdl; tmp->uarea_addr = (void *)udat; tmp->uarea_size = p_udata_size; tmp->segcount = 0; tmp->segsize = pool->s.seg_size; tmp->handle.handle = odp_buffer_encode_handle(tmp); /* Set 1st seg addr for zero-len buffers */ tmp->addr[0] = NULL; /* Special case for short buffer data */ if (blk_size <= ODP_MAX_INLINE_BUF) { tmp->flags.hdrdata = 1; if (blk_size > 0) { tmp->segcount = 1; tmp->addr[0] = &tmp->addr[1]; tmp->size = blk_size; } } /* Push buffer onto pool's freelist */ ret_buf(&pool->s, tmp); buf -= buf_stride; udat -= udata_stride; } while (buf >= mdata_base_addr); /* Make sure blocks is divided into size align to 8 bytes, * as odp_packet_seg_t refers to address and segment count. * pool->s.seg_size is align to 8 bytes before here */ pool->s.seg_size = ODP_ALIGN_ROUNDUP(pool->s.seg_size, sizeof(uint64_t)); /* Form block freelist for pool */ uint8_t *blk = block_base_addr + block_size - pool->s.seg_size - ODP_HDR_BACK_PTR_SIZE; if (blk_size > ODP_MAX_INLINE_BUF) do { ret_blk(&pool->s, blk + ODP_HDR_BACK_PTR_SIZE); blk -= (pool->s.seg_size + ODP_HDR_BACK_PTR_SIZE); } while (blk >= block_base_addr); /* For pkt pool, initiating packet hdr relative area is stored * in the pool entry. */ if (params->type == ODP_POOL_PACKET) { odp_buffer_hdr_t *bh = get_buf(&pool->s); uint8_t *pkt_ctx = ((uint8_t *)bh + ODP_FIELD_SIZEOF(odp_packet_hdr_t, buf_hdr)); memset(pkt_ctx, 0, sizeof(odp_packet_hdr_t) - ODP_FIELD_SIZEOF(odp_packet_hdr_t, buf_hdr)); ((odp_packet_hdr_t *)bh)->l3_offset = ODP_PACKET_OFFSET_INVALID; ((odp_packet_hdr_t *)bh)->l4_offset = ODP_PACKET_OFFSET_INVALID; ((odp_packet_hdr_t *)bh)->payload_offset = ODP_PACKET_OFFSET_INVALID; ((odp_packet_hdr_t *)bh)->headroom = headroom; pool->s.cache_pkt_hdr = pkt_ctx; pool->s.buf_num -= 1; } /* Every kind of pool has max_size unit, as alloc, just need to * compare with max_size here to check */ if (!unseg) pool->s.max_size = pool->s.seg_size * ODP_BUFFER_MAX_SEG - headroom - tailroom; else pool->s.max_size = pool->s.seg_size; /* Initialize pool statistics counters */ odp_atomic_store_u64(&pool->s.poolstats.bufallocs, 0); odp_atomic_store_u64(&pool->s.poolstats.buffrees, 0); odp_atomic_store_u64(&pool->s.poolstats.blkallocs, 0); odp_atomic_store_u64(&pool->s.poolstats.blkfrees, 0); odp_atomic_store_u64(&pool->s.poolstats.bufempty, 0); odp_atomic_store_u64(&pool->s.poolstats.blkempty, 0); odp_atomic_store_u64(&pool->s.poolstats.high_wm_count, 0); odp_atomic_store_u64(&pool->s.poolstats.low_wm_count, 0); /* Reset other pool globals to initial state */ pool->s.low_wm_assert = 0; pool->s.quiesced = 0; pool->s.headroom = headroom; pool->s.tailroom = tailroom; pool->s.room_size = headroom + tailroom; /* Watermarks are hard-coded for now to control caching */ pool->s.high_wm = pool->s.buf_num / 2; pool->s.low_wm = pool->s.buf_num / 4; pool_hdl = pool->s.pool_hdl; break; } return pool_hdl; }
/** * ODP packet example main function */ int main(int argc, char *argv[]) { odp_linux_pthread_t thread_tbl[MAX_WORKERS]; odp_buffer_pool_t pool; int thr_id; int num_workers; void *pool_base; int i; int first_core; int core_count; /* Init ODP before calling anything else */ if (odp_init_global()) { ODP_ERR("Error: ODP global init failed.\n"); exit(EXIT_FAILURE); } /* Reserve memory for args from shared mem */ args = odp_shm_reserve("shm_args", sizeof(args_t), ODP_CACHE_LINE_SIZE); if (args == NULL) { ODP_ERR("Error: shared mem alloc failed.\n"); exit(EXIT_FAILURE); } memset(args, 0, sizeof(*args)); /* Parse and store the application arguments */ parse_args(argc, argv, &args->appl); /* Print both system and application information */ print_info(NO_PATH(argv[0]), &args->appl); core_count = odp_sys_core_count(); num_workers = core_count; if (args->appl.core_count) num_workers = args->appl.core_count; if (num_workers > MAX_WORKERS) num_workers = MAX_WORKERS; printf("Num worker threads: %i\n", num_workers); /* * By default core #0 runs Linux kernel background tasks. * Start mapping thread from core #1 */ first_core = 1; if (core_count == 1) first_core = 0; printf("First core: %i\n\n", first_core); /* Init this thread */ thr_id = odp_thread_create(0); odp_init_local(thr_id); /* Create packet pool */ pool_base = odp_shm_reserve("shm_packet_pool", SHM_PKT_POOL_SIZE, ODP_CACHE_LINE_SIZE); if (pool_base == NULL) { ODP_ERR("Error: packet pool mem alloc failed.\n"); exit(EXIT_FAILURE); } pool = odp_buffer_pool_create("packet_pool", pool_base, SHM_PKT_POOL_SIZE, SHM_PKT_POOL_BUF_SIZE, ODP_CACHE_LINE_SIZE, ODP_BUFFER_TYPE_PACKET); if (pool == ODP_BUFFER_POOL_INVALID) { ODP_ERR("Error: packet pool create failed.\n"); exit(EXIT_FAILURE); } odp_buffer_pool_print(pool); /* Create and init worker threads */ memset(thread_tbl, 0, sizeof(thread_tbl)); for (i = 0; i < num_workers; ++i) { void *(*thr_run_func) (void *); int core; int if_idx; core = (first_core + i) % core_count; if_idx = i % args->appl.if_count; args->thread[i].pktio_dev = args->appl.if_names[if_idx]; args->thread[i].pool = pool; args->thread[i].mode = args->appl.mode; args->thread[i].type = args->appl.type; args->thread[i].fanout = args->appl.fanout; if (args->appl.mode == APPL_MODE_PKT_BURST) thr_run_func = pktio_ifburst_thread; else /* APPL_MODE_PKT_QUEUE */ thr_run_func = pktio_queue_thread; /* * Create threads one-by-one instead of all-at-once, * because each thread might get different arguments. * Calls odp_thread_create(cpu) for each thread */ odp_linux_pthread_create(thread_tbl, 1, core, thr_run_func, &args->thread[i]); } /* Master thread waits for other threads to exit */ odp_linux_pthread_join(thread_tbl, num_workers); printf("Exit\n\n"); return 0; }
/** * ODP L2 forwarding main function */ int main(int argc, char *argv[]) { odph_linux_pthread_t thread_tbl[MAX_WORKERS]; odp_pool_t pool; int i; int cpu; int num_workers; odp_shm_t shm; odp_cpumask_t cpumask; char cpumaskstr[ODP_CPUMASK_STR_SIZE]; odp_pool_param_t params; /* Init ODP before calling anything else */ if (odp_init_global(NULL, NULL)) { LOG_ERR("Error: ODP global init failed.\n"); exit(EXIT_FAILURE); } /* Init this thread */ if (odp_init_local(ODP_THREAD_CONTROL)) { LOG_ERR("Error: ODP local init failed.\n"); exit(EXIT_FAILURE); } /* Reserve memory for args from shared mem */ shm = odp_shm_reserve("shm_args", sizeof(args_t), ODP_CACHE_LINE_SIZE, 0); gbl_args = odp_shm_addr(shm); if (gbl_args == NULL) { LOG_ERR("Error: shared mem alloc failed.\n"); exit(EXIT_FAILURE); } memset(gbl_args, 0, sizeof(*gbl_args)); /* Parse and store the application arguments */ parse_args(argc, argv, &gbl_args->appl); /* Print both system and application information */ print_info(NO_PATH(argv[0]), &gbl_args->appl); /* Default to system CPU count unless user specified */ num_workers = MAX_WORKERS; if (gbl_args->appl.cpu_count) num_workers = gbl_args->appl.cpu_count; /* Get default worker cpumask */ num_workers = odp_cpumask_def_worker(&cpumask, num_workers); (void)odp_cpumask_to_str(&cpumask, cpumaskstr, sizeof(cpumaskstr)); printf("num worker threads: %i\n", num_workers); printf("first CPU: %i\n", odp_cpumask_first(&cpumask)); printf("cpu mask: %s\n", cpumaskstr); if (num_workers < gbl_args->appl.if_count) { LOG_ERR("Error: CPU count %d less than interface count\n", num_workers); exit(EXIT_FAILURE); } if (gbl_args->appl.if_count % 2 != 0) { LOG_ERR("Error: interface count %d is odd in fwd appl.\n", gbl_args->appl.if_count); exit(EXIT_FAILURE); } /* Create packet pool */ memset(¶ms, 0, sizeof(params)); params.pkt.seg_len = SHM_PKT_POOL_BUF_SIZE; params.pkt.len = SHM_PKT_POOL_BUF_SIZE; params.pkt.num = SHM_PKT_POOL_SIZE/SHM_PKT_POOL_BUF_SIZE; params.type = ODP_POOL_PACKET; pool = odp_pool_create("packet pool", ¶ms); if (pool == ODP_POOL_INVALID) { LOG_ERR("Error: packet pool create failed.\n"); exit(EXIT_FAILURE); } odp_pool_print(pool); for (i = 0; i < gbl_args->appl.if_count; ++i) { gbl_args->pktios[i] = create_pktio(gbl_args->appl.if_names[i], pool, gbl_args->appl.mode); if (gbl_args->pktios[i] == ODP_PKTIO_INVALID) exit(EXIT_FAILURE); } gbl_args->pktios[i] = ODP_PKTIO_INVALID; memset(thread_tbl, 0, sizeof(thread_tbl)); stats_t **stats = calloc(1, sizeof(stats_t) * num_workers); odp_barrier_init(&barrier, num_workers + 1); /* Create worker threads */ cpu = odp_cpumask_first(&cpumask); for (i = 0; i < num_workers; ++i) { odp_cpumask_t thd_mask; void *(*thr_run_func) (void *); if (gbl_args->appl.mode == APPL_MODE_PKT_BURST) thr_run_func = pktio_ifburst_thread; else /* APPL_MODE_PKT_QUEUE */ thr_run_func = pktio_queue_thread; gbl_args->thread[i].src_idx = i % gbl_args->appl.if_count; gbl_args->thread[i].stats = &stats[i]; odp_cpumask_zero(&thd_mask); odp_cpumask_set(&thd_mask, cpu); odph_linux_pthread_create(&thread_tbl[i], &thd_mask, thr_run_func, &gbl_args->thread[i]); cpu = odp_cpumask_next(&cpumask, cpu); } print_speed_stats(num_workers, stats, gbl_args->appl.time, gbl_args->appl.accuracy); free(stats); exit_threads = 1; /* Master thread waits for other threads to exit */ odph_linux_pthread_join(thread_tbl, num_workers); free(gbl_args->appl.if_names); free(gbl_args->appl.if_str); printf("Exit\n\n"); return 0; }
/** * Test main function */ int main(int argc, char *argv[]) { odph_linux_pthread_t thread_tbl[MAX_WORKERS]; int num_workers; odp_queue_t queue; uint64_t cycles, ns; odp_queue_param_t param; odp_pool_param_t params; odp_timer_pool_param_t tparams; odp_timer_pool_info_t tpinfo; odp_cpumask_t cpumask; char cpumaskstr[ODP_CPUMASK_STR_SIZE]; odp_shm_t shm; test_globals_t *gbls; printf("\nODP timer example starts\n"); if (odp_init_global(NULL, NULL)) { printf("ODP global init failed.\n"); return -1; } /* Init this thread. */ if (odp_init_local()) { printf("ODP local init failed.\n"); return -1; } printf("\n"); printf("ODP system info\n"); printf("---------------\n"); printf("ODP API version: %s\n", odp_version_api_str()); printf("CPU model: %s\n", odp_sys_cpu_model_str()); printf("CPU freq (hz): %"PRIu64"\n", odp_sys_cpu_hz()); printf("Cache line size: %i\n", odp_sys_cache_line_size()); printf("Max CPU count: %i\n", odp_cpu_count()); printf("\n"); /* Reserve memory for test_globals_t from shared mem */ shm = odp_shm_reserve("shm_test_globals", sizeof(test_globals_t), ODP_CACHE_LINE_SIZE, 0); if (ODP_SHM_INVALID == shm) { EXAMPLE_ERR("Error: shared mem reserve failed.\n"); return -1; } gbls = odp_shm_addr(shm); if (NULL == gbls) { EXAMPLE_ERR("Error: shared mem alloc failed.\n"); return -1; } memset(gbls, 0, sizeof(test_globals_t)); parse_args(argc, argv, &gbls->args); memset(thread_tbl, 0, sizeof(thread_tbl)); /* Default to system CPU count unless user specified */ num_workers = MAX_WORKERS; if (gbls->args.cpu_count) num_workers = gbls->args.cpu_count; /* * By default CPU #0 runs Linux kernel background tasks. * Start mapping thread from CPU #1 */ num_workers = odph_linux_cpumask_default(&cpumask, num_workers); (void)odp_cpumask_to_str(&cpumask, cpumaskstr, sizeof(cpumaskstr)); printf("num worker threads: %i\n", num_workers); printf("first CPU: %i\n", odp_cpumask_first(&cpumask)); printf("cpu mask: %s\n", cpumaskstr); printf("resolution: %i usec\n", gbls->args.resolution_us); printf("min timeout: %i usec\n", gbls->args.min_us); printf("max timeout: %i usec\n", gbls->args.max_us); printf("period: %i usec\n", gbls->args.period_us); printf("timeouts: %i\n", gbls->args.tmo_count); /* * Create pool for timeouts */ params.tmo.num = NUM_TMOS; params.type = ODP_POOL_TIMEOUT; gbls->pool = odp_pool_create("msg_pool", ODP_SHM_NULL, ¶ms); if (gbls->pool == ODP_POOL_INVALID) { EXAMPLE_ERR("Pool create failed.\n"); return -1; } tparams.res_ns = gbls->args.resolution_us*ODP_TIME_USEC; tparams.min_tmo = gbls->args.min_us*ODP_TIME_USEC; tparams.max_tmo = gbls->args.max_us*ODP_TIME_USEC; tparams.num_timers = num_workers; /* One timer per worker */ tparams.priv = 0; /* Shared */ tparams.clk_src = ODP_CLOCK_CPU; gbls->tp = odp_timer_pool_create("timer_pool", &tparams); if (gbls->tp == ODP_TIMER_POOL_INVALID) { EXAMPLE_ERR("Timer pool create failed.\n"); return -1; } odp_timer_pool_start(); odp_shm_print_all(); (void)odp_timer_pool_info(gbls->tp, &tpinfo); printf("Timer pool\n"); printf("----------\n"); printf(" name: %s\n", tpinfo.name); printf(" resolution: %"PRIu64" ns\n", tpinfo.param.res_ns); printf(" min tmo: %"PRIu64" ticks\n", tpinfo.param.min_tmo); printf(" max tmo: %"PRIu64" ticks\n", tpinfo.param.max_tmo); printf("\n"); /* * Create a queue for timer test */ memset(¶m, 0, sizeof(param)); param.sched.prio = ODP_SCHED_PRIO_DEFAULT; param.sched.sync = ODP_SCHED_SYNC_NONE; param.sched.group = ODP_SCHED_GROUP_DEFAULT; queue = odp_queue_create("timer_queue", ODP_QUEUE_TYPE_SCHED, ¶m); if (queue == ODP_QUEUE_INVALID) { EXAMPLE_ERR("Timer queue create failed.\n"); return -1; } printf("CPU freq %"PRIu64" Hz\n", odp_sys_cpu_hz()); printf("Cycles vs nanoseconds:\n"); ns = 0; cycles = odp_time_ns_to_cycles(ns); printf(" %12"PRIu64" ns -> %12"PRIu64" cycles\n", ns, cycles); printf(" %12"PRIu64" cycles -> %12"PRIu64" ns\n", cycles, odp_time_cycles_to_ns(cycles)); for (ns = 1; ns <= 100*ODP_TIME_SEC; ns *= 10) { cycles = odp_time_ns_to_cycles(ns); printf(" %12"PRIu64" ns -> %12"PRIu64" cycles\n", ns, cycles); printf(" %12"PRIu64" cycles -> %12"PRIu64" ns\n", cycles, odp_time_cycles_to_ns(cycles)); } printf("\n"); /* Initialize number of timeouts to receive */ odp_atomic_init_u32(&gbls->remain, gbls->args.tmo_count * num_workers); /* Barrier to sync test case execution */ odp_barrier_init(&gbls->test_barrier, num_workers); /* Create and launch worker threads */ odph_linux_pthread_create(thread_tbl, &cpumask, run_thread, gbls); /* Wait for worker threads to exit */ odph_linux_pthread_join(thread_tbl, num_workers); printf("ODP timer test complete\n\n"); return 0; }
int main(int argc, char **argv) { odph_odpthread_t thread_tbl[MAX_WORKERS]; int i, j; int cpu; int num_workers; odp_shm_t shm; odp_cpumask_t cpumask; char cpumaskstr[ODP_CPUMASK_STR_SIZE]; odp_pool_param_t params; int ret; stats_t (*stats)[MAX_PKTIOS]; int if_count; odp_instance_t instance; odph_odpthread_params_t thr_params; /* Init ODP before calling anything else */ if (odp_init_global(&instance, NULL, NULL)) { printf("Error: ODP global init failed.\n"); exit(EXIT_FAILURE); } /* Init this thread */ if (odp_init_local(instance, ODP_THREAD_CONTROL)) { printf("Error: ODP local init failed.\n"); exit(EXIT_FAILURE); } /* Reserve memory for args from shared mem */ shm = odp_shm_reserve("shm_args", sizeof(args_t), ODP_CACHE_LINE_SIZE, 0); gbl_args = odp_shm_addr(shm); if (gbl_args == NULL) { printf("Error: shared mem alloc failed.\n"); exit(EXIT_FAILURE); } gbl_args_init(gbl_args); for (i = 0; (unsigned)i < MAC_TBL_SIZE; i++) odp_atomic_init_u64(&gbl_args->mac_tbl[i], 0); /* Parse and store the application arguments */ parse_args(argc, argv, &gbl_args->appl); /* Print both system and application information */ print_info(NO_PATH(argv[0]), &gbl_args->appl); /* Default to system CPU count unless user specified */ num_workers = MAX_WORKERS; if (gbl_args->appl.cpu_count) num_workers = gbl_args->appl.cpu_count; /* Get default worker cpumask */ num_workers = odp_cpumask_default_worker(&cpumask, num_workers); (void)odp_cpumask_to_str(&cpumask, cpumaskstr, sizeof(cpumaskstr)); gbl_args->appl.num_workers = num_workers; if_count = gbl_args->appl.if_count; printf("num worker threads: %i\n", num_workers); printf("first CPU: %i\n", odp_cpumask_first(&cpumask)); printf("cpu mask: %s\n", cpumaskstr); /* Create packet pool */ odp_pool_param_init(¶ms); params.pkt.seg_len = SHM_PKT_POOL_BUF_SIZE; params.pkt.len = SHM_PKT_POOL_BUF_SIZE; params.pkt.num = SHM_PKT_POOL_SIZE; params.type = ODP_POOL_PACKET; gbl_args->pool = odp_pool_create("packet pool", ¶ms); if (gbl_args->pool == ODP_POOL_INVALID) { printf("Error: packet pool create failed.\n"); exit(EXIT_FAILURE); } odp_pool_print(gbl_args->pool); bind_workers(); for (i = 0; i < if_count; ++i) { const char *dev = gbl_args->appl.if_names[i]; int num_rx; /* An RX queue per assigned worker and a private TX queue for * each worker */ num_rx = gbl_args->pktios[i].num_rx_thr; if (create_pktio(dev, i, num_rx, num_workers, gbl_args->pool)) exit(EXIT_FAILURE); ret = odp_pktio_promisc_mode_set(gbl_args->pktios[i].pktio, 1); if (ret != 0) { printf("Error: failed to set port to promiscuous mode.\n"); exit(EXIT_FAILURE); } } gbl_args->pktios[i].pktio = ODP_PKTIO_INVALID; bind_queues(); print_port_mapping(); memset(thread_tbl, 0, sizeof(thread_tbl)); odp_barrier_init(&barrier, num_workers + 1); stats = gbl_args->stats; memset(&thr_params, 0, sizeof(thr_params)); thr_params.thr_type = ODP_THREAD_WORKER; thr_params.instance = instance; thr_params.start = run_worker; /* Create worker threads */ cpu = odp_cpumask_first(&cpumask); for (i = 0; i < num_workers; ++i) { odp_cpumask_t thd_mask; for (j = 0; j < MAX_PKTIOS; j++) gbl_args->thread[i].stats[j] = &stats[i][j]; thr_params.arg = &gbl_args->thread[i]; odp_cpumask_zero(&thd_mask); odp_cpumask_set(&thd_mask, cpu); odph_odpthreads_create(&thread_tbl[i], &thd_mask, &thr_params); cpu = odp_cpumask_next(&cpumask, cpu); } /* Start packet receive and transmit */ for (i = 0; i < if_count; ++i) { odp_pktio_t pktio; pktio = gbl_args->pktios[i].pktio; ret = odp_pktio_start(pktio); if (ret) { printf("Error: unable to start %s\n", gbl_args->appl.if_names[i]); exit(EXIT_FAILURE); } } ret = print_speed_stats(num_workers, gbl_args->stats, gbl_args->appl.time, gbl_args->appl.accuracy); exit_threads = 1; /* Master thread waits for other threads to exit */ for (i = 0; i < num_workers; ++i) odph_odpthreads_join(&thread_tbl[i]); free(gbl_args->appl.if_names); free(gbl_args->appl.if_str); if (odp_pool_destroy(gbl_args->pool)) { printf("Error: pool destroy\n"); exit(EXIT_FAILURE); } if (odp_term_local()) { printf("Error: term local\n"); exit(EXIT_FAILURE); } if (odp_term_global(instance)) { printf("Error: term global\n"); exit(EXIT_FAILURE); } printf("Exit: %d\n\n", ret); return ret; }
/** * ODP packet example main function */ int main(int argc, char * argv[]) { odph_linux_pthread_t thread_tbl[MAX_WORKERS]; odp_pool_t pool; int num_workers; int i; odp_shm_t shm; odp_cpumask_t cpumask; char cpumaskstr[ODP_CPUMASK_STR_SIZE]; odp_pool_param_t params; odp_timer_pool_param_t tparams; odp_timer_pool_t tp; odp_pool_t tmop; /* Init ODP before calling anything else */ if (odp_init_global(NULL, NULL)) { EXAMPLE_ERR("Error: ODP global init failed.\n"); exit(EXIT_FAILURE); } if (odp_init_local(ODP_THREAD_CONTROL)) { EXAMPLE_ERR("Error: ODP local init failed.\n"); exit(EXIT_FAILURE); } my_sleep(1 + __k1_get_cluster_id() / 4); /* init counters */ odp_atomic_init_u64(&counters.seq, 0); odp_atomic_init_u64(&counters.ip, 0); odp_atomic_init_u64(&counters.udp, 0); odp_atomic_init_u64(&counters.icmp, 0); odp_atomic_init_u64(&counters.cnt, 0); /* Reserve memory for args from shared mem */ shm = odp_shm_reserve("shm_args", sizeof(args_t), ODP_CACHE_LINE_SIZE, 0); args = odp_shm_addr(shm); if (args == NULL) { EXAMPLE_ERR("Error: shared mem alloc failed.\n"); exit(EXIT_FAILURE); } memset(args, 0, sizeof(*args)); /* Parse and store the application arguments */ parse_args(argc, argv, &args->appl); /* Print both system and application information */ print_info(NO_PATH(argv[0]), &args->appl); /* Default to system CPU count unless user specified */ num_workers = MAX_WORKERS; if (args->appl.cpu_count) num_workers = args->appl.cpu_count; num_workers = odp_cpumask_default_worker(&cpumask, num_workers); if (args->appl.mask) { odp_cpumask_from_str(&cpumask, args->appl.mask); num_workers = odp_cpumask_count(&cpumask); } (void)odp_cpumask_to_str(&cpumask, cpumaskstr, sizeof(cpumaskstr)); printf("num worker threads: %i\n", num_workers); printf("first CPU: %i\n", odp_cpumask_first(&cpumask)); printf("cpu mask: %s\n", cpumaskstr); /* ping mode need two workers */ if (args->appl.mode == APPL_MODE_PING) { if (num_workers < 2) { EXAMPLE_ERR("Need at least two worker threads\n"); exit(EXIT_FAILURE); } else { num_workers = 2; } } /* Create packet pool */ odp_pool_param_init(¶ms); params.pkt.seg_len = SHM_PKT_POOL_BUF_SIZE; params.pkt.len = SHM_PKT_POOL_BUF_SIZE; params.pkt.num = SHM_PKT_POOL_SIZE/SHM_PKT_POOL_BUF_SIZE; params.type = ODP_POOL_PACKET; pool = odp_pool_create("packet_pool", ¶ms); if (pool == ODP_POOL_INVALID) { EXAMPLE_ERR("Error: packet pool create failed.\n"); exit(EXIT_FAILURE); } odp_pool_print(pool); /* Create timer pool */ tparams.res_ns = 1 * ODP_TIME_MSEC_IN_NS; tparams.min_tmo = 0; tparams.max_tmo = 10000 * ODP_TIME_SEC_IN_NS; tparams.num_timers = num_workers; /* One timer per worker */ tparams.priv = 0; /* Shared */ tparams.clk_src = ODP_CLOCK_CPU; tp = odp_timer_pool_create("timer_pool", &tparams); if (tp == ODP_TIMER_POOL_INVALID) { EXAMPLE_ERR("Timer pool create failed.\n"); exit(EXIT_FAILURE); } odp_timer_pool_start(); /* Create timeout pool */ memset(¶ms, 0, sizeof(params)); params.tmo.num = tparams.num_timers; /* One timeout per timer */ params.type = ODP_POOL_TIMEOUT; tmop = odp_pool_create("timeout_pool", ¶ms); if (pool == ODP_POOL_INVALID) { EXAMPLE_ERR("Error: packet pool create failed.\n"); exit(EXIT_FAILURE); } for (i = 0; i < args->appl.if_count; ++i) create_pktio(args->appl.if_names[i], pool); /* Create and init worker threads */ memset(thread_tbl, 0, sizeof(thread_tbl)); if (args->appl.mode == APPL_MODE_PING) { odp_cpumask_t cpu_mask; odp_queue_t tq; int cpu_first, cpu_next; odp_cpumask_zero(&cpu_mask); cpu_first = odp_cpumask_first(&cpumask); odp_cpumask_set(&cpu_mask, cpu_first); tq = odp_queue_create("", ODP_QUEUE_TYPE_POLL, NULL); if (tq == ODP_QUEUE_INVALID) abort(); args->thread[1].pktio_dev = args->appl.if_names[0]; args->thread[1].pool = pool; args->thread[1].tp = tp; args->thread[1].tq = tq; args->thread[1].tim = odp_timer_alloc(tp, tq, NULL); if (args->thread[1].tim == ODP_TIMER_INVALID) abort(); args->thread[1].tmo_ev = odp_timeout_alloc(tmop); if (args->thread[1].tmo_ev == ODP_TIMEOUT_INVALID) abort(); args->thread[1].mode = args->appl.mode; odph_linux_pthread_create(&thread_tbl[1], &cpu_mask, gen_recv_thread, &args->thread[1], ODP_THREAD_WORKER); tq = odp_queue_create("", ODP_QUEUE_TYPE_POLL, NULL); if (tq == ODP_QUEUE_INVALID) abort(); args->thread[0].pktio_dev = args->appl.if_names[0]; args->thread[0].pool = pool; args->thread[0].tp = tp; args->thread[0].tq = tq; args->thread[0].tim = odp_timer_alloc(tp, tq, NULL); if (args->thread[0].tim == ODP_TIMER_INVALID) abort(); args->thread[0].tmo_ev = odp_timeout_alloc(tmop); if (args->thread[0].tmo_ev == ODP_TIMEOUT_INVALID) abort(); args->thread[0].mode = args->appl.mode; cpu_next = odp_cpumask_next(&cpumask, cpu_first); odp_cpumask_zero(&cpu_mask); odp_cpumask_set(&cpu_mask, cpu_next); odph_linux_pthread_create(&thread_tbl[0], &cpu_mask, gen_send_thread, &args->thread[0], ODP_THREAD_WORKER); } else { int cpu = odp_cpumask_first(&cpumask); for (i = 0; i < num_workers; ++i) { odp_cpumask_t thd_mask; void *(*thr_run_func) (void *); int if_idx; odp_queue_t tq; if_idx = i % args->appl.if_count; args->thread[i].pktio_dev = args->appl.if_names[if_idx]; tq = odp_queue_create("", ODP_QUEUE_TYPE_POLL, NULL); if (tq == ODP_QUEUE_INVALID) abort(); args->thread[i].pool = pool; args->thread[i].tp = tp; args->thread[i].tq = tq; args->thread[i].tim = odp_timer_alloc(tp, tq, NULL); if (args->thread[i].tim == ODP_TIMER_INVALID) abort(); args->thread[i].tmo_ev = odp_timeout_alloc(tmop); if (args->thread[i].tmo_ev == ODP_TIMEOUT_INVALID) abort(); args->thread[i].mode = args->appl.mode; if (args->appl.mode == APPL_MODE_UDP) { thr_run_func = gen_send_thread; } else if (args->appl.mode == APPL_MODE_RCV) { thr_run_func = gen_recv_thread; } else { EXAMPLE_ERR("ERR MODE\n"); exit(EXIT_FAILURE); } /* * Create threads one-by-one instead of all-at-once, * because each thread might get different arguments. * Calls odp_thread_create(cpu) for each thread */ odp_cpumask_zero(&thd_mask); odp_cpumask_set(&thd_mask, cpu); odph_linux_pthread_create(&thread_tbl[i], &thd_mask, thr_run_func, &args->thread[i], ODP_THREAD_WORKER); cpu = odp_cpumask_next(&cpumask, cpu); } } print_global_stats(num_workers); /* Master thread waits for other threads to exit */ odph_linux_pthread_join(thread_tbl, num_workers); free(args->appl.if_names); free(args->appl.if_str); printf("Exit\n\n"); return 0; }
/** * ODP L2 forwarding main function */ int main(int argc, char *argv[]) { odph_linux_pthread_t thread_tbl[MAX_WORKERS]; odp_pool_t pool; int i; int cpu; int num_workers; odp_shm_t shm; odp_cpumask_t cpumask; char cpumaskstr[ODP_CPUMASK_STR_SIZE]; odph_ethaddr_t new_addr; odp_pktio_t pktio; odp_pool_param_t params; int ret; stats_t *stats; /* Init ODP before calling anything else */ if (odp_init_global(NULL, NULL)) { LOG_ERR("Error: ODP global init failed.\n"); exit(EXIT_FAILURE); } /* Init this thread */ if (odp_init_local(ODP_THREAD_CONTROL)) { LOG_ERR("Error: ODP local init failed.\n"); exit(EXIT_FAILURE); } /* Reserve memory for args from shared mem */ shm = odp_shm_reserve("shm_args", sizeof(args_t), ODP_CACHE_LINE_SIZE, 0); gbl_args = odp_shm_addr(shm); if (gbl_args == NULL) { LOG_ERR("Error: shared mem alloc failed.\n"); exit(EXIT_FAILURE); } memset(gbl_args, 0, sizeof(*gbl_args)); /* Parse and store the application arguments */ parse_args(argc, argv, &gbl_args->appl); /* Print both system and application information */ print_info(NO_PATH(argv[0]), &gbl_args->appl); /* Default to system CPU count unless user specified */ num_workers = MAX_WORKERS; if (gbl_args->appl.cpu_count) num_workers = gbl_args->appl.cpu_count; /* Get default worker cpumask */ num_workers = odp_cpumask_default_worker(&cpumask, num_workers); (void)odp_cpumask_to_str(&cpumask, cpumaskstr, sizeof(cpumaskstr)); printf("num worker threads: %i\n", num_workers); printf("first CPU: %i\n", odp_cpumask_first(&cpumask)); printf("cpu mask: %s\n", cpumaskstr); if (num_workers < gbl_args->appl.if_count) { LOG_ERR("Error: CPU count %d less than interface count\n", num_workers); exit(EXIT_FAILURE); } /* Create packet pool */ odp_pool_param_init(¶ms); params.pkt.seg_len = SHM_PKT_POOL_BUF_SIZE; params.pkt.len = SHM_PKT_POOL_BUF_SIZE; params.pkt.num = SHM_PKT_POOL_SIZE/SHM_PKT_POOL_BUF_SIZE; params.type = ODP_POOL_PACKET; pool = odp_pool_create("packet pool", ¶ms); if (pool == ODP_POOL_INVALID) { LOG_ERR("Error: packet pool create failed.\n"); exit(EXIT_FAILURE); } odp_pool_print(pool); for (i = 0; i < gbl_args->appl.if_count; ++i) { pktio = create_pktio(gbl_args->appl.if_names[i], pool); if (pktio == ODP_PKTIO_INVALID) exit(EXIT_FAILURE); gbl_args->pktios[i] = pktio; /* Save interface ethernet address */ if (odp_pktio_mac_addr(pktio, gbl_args->port_eth_addr[i].addr, ODPH_ETHADDR_LEN) != ODPH_ETHADDR_LEN) { LOG_ERR("Error: interface ethernet address unknown\n"); exit(EXIT_FAILURE); } /* Save destination eth address */ if (gbl_args->appl.dst_change) { /* 02:00:00:00:00:XX */ memset(&new_addr, 0, sizeof(odph_ethaddr_t)); new_addr.addr[0] = 0x02; new_addr.addr[5] = i; gbl_args->dst_eth_addr[i] = new_addr; } /* Save interface destination port */ gbl_args->dst_port[i] = find_dest_port(i); } gbl_args->pktios[i] = ODP_PKTIO_INVALID; memset(thread_tbl, 0, sizeof(thread_tbl)); stats = gbl_args->stats; odp_barrier_init(&barrier, num_workers + 1); /* Create worker threads */ cpu = odp_cpumask_first(&cpumask); for (i = 0; i < num_workers; ++i) { odp_cpumask_t thd_mask; void *(*thr_run_func) (void *); if (gbl_args->appl.mode == DIRECT_RECV) thr_run_func = pktio_direct_recv_thread; else /* SCHED_NONE / SCHED_ATOMIC / SCHED_ORDERED */ thr_run_func = pktio_queue_thread; gbl_args->thread[i].src_idx = i % gbl_args->appl.if_count; gbl_args->thread[i].stats = &stats[i]; odp_cpumask_zero(&thd_mask); odp_cpumask_set(&thd_mask, cpu); odph_linux_pthread_create(&thread_tbl[i], &thd_mask, thr_run_func, &gbl_args->thread[i], ODP_THREAD_WORKER); cpu = odp_cpumask_next(&cpumask, cpu); } /* Start packet receive and transmit */ for (i = 0; i < gbl_args->appl.if_count; ++i) { pktio = gbl_args->pktios[i]; ret = odp_pktio_start(pktio); if (ret) { LOG_ERR("Error: unable to start %s\n", gbl_args->appl.if_names[i]); exit(EXIT_FAILURE); } } ret = print_speed_stats(num_workers, stats, gbl_args->appl.time, gbl_args->appl.accuracy); exit_threads = 1; /* Master thread waits for other threads to exit */ odph_linux_pthread_join(thread_tbl, num_workers); free(gbl_args->appl.if_names); free(gbl_args->appl.if_str); printf("Exit\n\n"); return ret; }