static void reset_queues(thread_args_t *args) { int i, j, k; int num_prio = args->num_prio; int num_queues = args->num_queues; char name[32]; for (i = 0; i < num_prio; i++) { for (j = 0; j < num_queues; j++) { odp_queue_t queue; snprintf(name, sizeof(name), "sched_%d_%d_o", i, j); queue = odp_queue_lookup(name); CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID); for (k = 0; k < args->num_bufs; k++) { queue_context *qctx = odp_queue_context(queue); int ndx; int ndx_max; ndx_max = odp_queue_lock_count(queue); CU_ASSERT_FATAL(ndx_max >= 0); qctx->sequence = 0; for (ndx = 0; ndx < ndx_max; ndx++) qctx->lock_sequence[ndx] = 0; } } } }
void scheduler_test_pause_resume(void) { odp_queue_t queue; odp_buffer_t buf; odp_event_t ev; odp_queue_t from; int i; int local_bufs = 0; queue = odp_queue_lookup("sched_0_0_n"); CU_ASSERT(queue != ODP_QUEUE_INVALID); pool = odp_pool_lookup(MSG_POOL_NAME); CU_ASSERT_FATAL(pool != ODP_POOL_INVALID); for (i = 0; i < NUM_BUFS_PAUSE; i++) { buf = odp_buffer_alloc(pool); CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID); ev = odp_buffer_to_event(buf); if (odp_queue_enq(queue, ev)) odp_buffer_free(buf); } for (i = 0; i < NUM_BUFS_BEFORE_PAUSE; i++) { from = ODP_QUEUE_INVALID; ev = odp_schedule(&from, ODP_SCHED_WAIT); CU_ASSERT(from == queue); buf = odp_buffer_from_event(ev); odp_buffer_free(buf); } odp_schedule_pause(); while (1) { ev = odp_schedule(&from, ODP_SCHED_NO_WAIT); if (ev == ODP_EVENT_INVALID) break; CU_ASSERT(from == queue); buf = odp_buffer_from_event(ev); odp_buffer_free(buf); local_bufs++; } CU_ASSERT(local_bufs < NUM_BUFS_PAUSE - NUM_BUFS_BEFORE_PAUSE); odp_schedule_resume(); for (i = local_bufs + NUM_BUFS_BEFORE_PAUSE; i < NUM_BUFS_PAUSE; i++) { ev = odp_schedule(&from, ODP_SCHED_WAIT); CU_ASSERT(from == queue); buf = odp_buffer_from_event(ev); odp_buffer_free(buf); } CU_ASSERT(exit_schedule_loop() == 0); }
static int test_init(void) { odp_pool_param_t params; odp_queue_param_t qparam; odp_queue_t inq_def; char inq_name[ODP_QUEUE_NAME_LEN]; memset(¶ms, 0, sizeof(params)); params.pkt.len = PKT_HDR_LEN + gbl_args->args.pkt_len; params.pkt.seg_len = params.pkt.len; params.pkt.num = PKT_BUF_NUM; params.type = ODP_POOL_PACKET; transmit_pkt_pool = odp_pool_create("pkt_pool_transmit", ODP_SHM_NULL, ¶ms); if (transmit_pkt_pool == ODP_POOL_INVALID) LOG_ABORT("Failed to create transmit pool\n"); odp_atomic_init_u32(&ip_seq, 0); odp_atomic_init_u32(&shutdown, 0); /* create pktios and associate input/output queues */ gbl_args->pktio_tx = create_pktio(gbl_args->args.ifaces[0]); if (gbl_args->args.num_ifaces > 1) gbl_args->pktio_rx = create_pktio(gbl_args->args.ifaces[1]); else gbl_args->pktio_rx = gbl_args->pktio_tx; if (gbl_args->pktio_rx == ODP_PKTIO_INVALID || gbl_args->pktio_tx == ODP_PKTIO_INVALID) { LOG_ERR("failed to open pktio\n"); return -1; } /* create and associate an input queue for the RX side */ qparam.sched.prio = ODP_SCHED_PRIO_DEFAULT; qparam.sched.sync = ODP_SCHED_SYNC_NONE; qparam.sched.group = ODP_SCHED_GROUP_DEFAULT; snprintf(inq_name, sizeof(inq_name), "inq-pktio-%" PRIu64, odp_pktio_to_u64(gbl_args->pktio_rx)); inq_def = odp_queue_lookup(inq_name); if (inq_def == ODP_QUEUE_INVALID) inq_def = odp_queue_create(inq_name, ODP_QUEUE_TYPE_PKTIN, &qparam); if (inq_def == ODP_QUEUE_INVALID) return -1; if (odp_pktio_inq_setdef(gbl_args->pktio_rx, inq_def) != 0) return -1; return 0; }
static int destroy_queue(const char *name) { odp_queue_t q; queue_context *qctx; q = odp_queue_lookup(name); if (q == ODP_QUEUE_INVALID) return -1; qctx = odp_queue_context(q); if (qctx) odp_buffer_free(qctx->ctx_handle); return odp_queue_destroy(q); }
void queue_test_capa(void) { odp_queue_capability_t capa; odp_queue_param_t qparams; char name[ODP_QUEUE_NAME_LEN]; odp_queue_t queue[MAX_QUEUES]; uint32_t num_queues, i; memset(&capa, 0, sizeof(odp_queue_capability_t)); CU_ASSERT(odp_queue_capability(&capa) == 0); CU_ASSERT(capa.max_queues != 0); CU_ASSERT(capa.max_ordered_locks != 0); CU_ASSERT(capa.max_sched_groups != 0); CU_ASSERT(capa.sched_prios != 0); for (i = 0; i < ODP_QUEUE_NAME_LEN; i++) name[i] = 'A' + (i % 26); name[ODP_QUEUE_NAME_LEN - 1] = 0; if (capa.max_queues > MAX_QUEUES) num_queues = MAX_QUEUES; else num_queues = capa.max_queues; odp_queue_param_init(&qparams); for (i = 0; i < num_queues; i++) { generate_name(name, i); queue[i] = odp_queue_create(name, &qparams); if (queue[i] == ODP_QUEUE_INVALID) { CU_FAIL("Queue create failed"); num_queues = i; break; } CU_ASSERT(odp_queue_lookup(name) != ODP_QUEUE_INVALID); } for (i = 0; i < num_queues; i++) CU_ASSERT(odp_queue_destroy(queue[i]) == 0); }
static int create_inq(odp_pktio_t pktio, odp_queue_type_t qtype) { odp_queue_param_t qparam; odp_queue_t inq_def; char inq_name[ODP_QUEUE_NAME_LEN]; odp_queue_param_init(&qparam); qparam.sched.prio = ODP_SCHED_PRIO_DEFAULT; qparam.sched.sync = ODP_SCHED_SYNC_ATOMIC; qparam.sched.group = ODP_SCHED_GROUP_ALL; snprintf(inq_name, sizeof(inq_name), "inq-pktio-%" PRIu64, odp_pktio_to_u64(pktio)); inq_def = odp_queue_lookup(inq_name); if (inq_def == ODP_QUEUE_INVALID) inq_def = odp_queue_create( inq_name, ODP_QUEUE_TYPE_PKTIN, qtype == ODP_QUEUE_TYPE_POLL ? NULL : &qparam); CU_ASSERT(inq_def != ODP_QUEUE_INVALID); return odp_pktio_inq_setdef(pktio, inq_def); }
static void fill_queues(thread_args_t *args) { odp_schedule_sync_t sync; int num_queues, num_prio; odp_pool_t pool; int i, j, k; int buf_count = 0; test_globals_t *globals; char name[32]; int ret; odp_buffer_t buf; odp_event_t ev; globals = args->globals; sync = args->sync; num_queues = args->num_queues; num_prio = args->num_prio; pool = odp_pool_lookup(MSG_POOL_NAME); CU_ASSERT_FATAL(pool != ODP_POOL_INVALID); for (i = 0; i < num_prio; i++) { for (j = 0; j < num_queues; j++) { odp_queue_t queue; switch (sync) { case ODP_SCHED_SYNC_PARALLEL: snprintf(name, sizeof(name), "sched_%d_%d_n", i, j); break; case ODP_SCHED_SYNC_ATOMIC: snprintf(name, sizeof(name), "sched_%d_%d_a", i, j); break; case ODP_SCHED_SYNC_ORDERED: snprintf(name, sizeof(name), "sched_%d_%d_o", i, j); break; default: CU_ASSERT_FATAL(0); break; } queue = odp_queue_lookup(name); CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID); for (k = 0; k < args->num_bufs; k++) { buf = odp_buffer_alloc(pool); CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID); ev = odp_buffer_to_event(buf); if (sync == ODP_SCHED_SYNC_ORDERED) { queue_context *qctx = odp_queue_context(queue); buf_contents *bctx = odp_buffer_addr(buf); bctx->sequence = qctx->sequence++; } ret = odp_queue_enq(queue, ev); CU_ASSERT_FATAL(ret == 0); if (ret) odp_buffer_free(buf); else buf_count++; } } } globals->buf_count = buf_count; globals->buf_count_cpy = buf_count; }
static int schedule_common_(void *arg) { thread_args_t *args = (thread_args_t *)arg; odp_schedule_sync_t sync; test_globals_t *globals; queue_context *qctx; buf_contents *bctx, *bctx_cpy; odp_pool_t pool; int locked; int num; odp_event_t ev; odp_buffer_t buf, buf_cpy; odp_queue_t from; globals = args->globals; sync = args->sync; pool = odp_pool_lookup(MSG_POOL_NAME); CU_ASSERT_FATAL(pool != ODP_POOL_INVALID); if (args->num_workers > 1) odp_barrier_wait(&globals->barrier); while (1) { from = ODP_QUEUE_INVALID; num = 0; odp_ticketlock_lock(&globals->lock); if (globals->buf_count == 0) { odp_ticketlock_unlock(&globals->lock); break; } odp_ticketlock_unlock(&globals->lock); if (args->enable_schd_multi) { odp_event_t events[BURST_BUF_SIZE], ev_cpy[BURST_BUF_SIZE]; odp_buffer_t buf_cpy[BURST_BUF_SIZE]; int j; num = odp_schedule_multi(&from, ODP_SCHED_NO_WAIT, events, BURST_BUF_SIZE); CU_ASSERT(num >= 0); CU_ASSERT(num <= BURST_BUF_SIZE); if (num == 0) continue; if (sync == ODP_SCHED_SYNC_ORDERED) { int ndx; int ndx_max; int rc; ndx_max = odp_queue_lock_count(from); CU_ASSERT_FATAL(ndx_max >= 0); qctx = odp_queue_context(from); for (j = 0; j < num; j++) { bctx = odp_buffer_addr( odp_buffer_from_event (events[j])); buf_cpy[j] = odp_buffer_alloc(pool); CU_ASSERT_FATAL(buf_cpy[j] != ODP_BUFFER_INVALID); bctx_cpy = odp_buffer_addr(buf_cpy[j]); memcpy(bctx_cpy, bctx, sizeof(buf_contents)); bctx_cpy->output_sequence = bctx_cpy->sequence; ev_cpy[j] = odp_buffer_to_event(buf_cpy[j]); } rc = odp_queue_enq_multi(qctx->pq_handle, ev_cpy, num); CU_ASSERT(rc == num); bctx = odp_buffer_addr( odp_buffer_from_event(events[0])); for (ndx = 0; ndx < ndx_max; ndx++) { odp_schedule_order_lock(ndx); CU_ASSERT(bctx->sequence == qctx->lock_sequence[ndx]); qctx->lock_sequence[ndx] += num; odp_schedule_order_unlock(ndx); } } for (j = 0; j < num; j++) odp_event_free(events[j]); } else { ev = odp_schedule(&from, ODP_SCHED_NO_WAIT); if (ev == ODP_EVENT_INVALID) continue; buf = odp_buffer_from_event(ev); num = 1; if (sync == ODP_SCHED_SYNC_ORDERED) { int ndx; int ndx_max; int rc; ndx_max = odp_queue_lock_count(from); CU_ASSERT_FATAL(ndx_max >= 0); qctx = odp_queue_context(from); bctx = odp_buffer_addr(buf); buf_cpy = odp_buffer_alloc(pool); CU_ASSERT_FATAL(buf_cpy != ODP_BUFFER_INVALID); bctx_cpy = odp_buffer_addr(buf_cpy); memcpy(bctx_cpy, bctx, sizeof(buf_contents)); bctx_cpy->output_sequence = bctx_cpy->sequence; rc = odp_queue_enq(qctx->pq_handle, odp_buffer_to_event (buf_cpy)); CU_ASSERT(rc == 0); for (ndx = 0; ndx < ndx_max; ndx++) { odp_schedule_order_lock(ndx); CU_ASSERT(bctx->sequence == qctx->lock_sequence[ndx]); qctx->lock_sequence[ndx] += num; odp_schedule_order_unlock(ndx); } } odp_buffer_free(buf); } if (args->enable_excl_atomic) { locked = odp_spinlock_trylock(&globals->atomic_lock); CU_ASSERT(locked != 0); CU_ASSERT(from != ODP_QUEUE_INVALID); if (locked) { int cnt; odp_time_t time = ODP_TIME_NULL; /* Do some work here to keep the thread busy */ for (cnt = 0; cnt < 1000; cnt++) time = odp_time_sum(time, odp_time_local()); odp_spinlock_unlock(&globals->atomic_lock); } } if (sync == ODP_SCHED_SYNC_ATOMIC) odp_schedule_release_atomic(); if (sync == ODP_SCHED_SYNC_ORDERED) odp_schedule_release_ordered(); odp_ticketlock_lock(&globals->lock); globals->buf_count -= num; if (globals->buf_count < 0) { odp_ticketlock_unlock(&globals->lock); CU_FAIL_FATAL("Buffer counting failed"); } odp_ticketlock_unlock(&globals->lock); } if (args->num_workers > 1) odp_barrier_wait(&globals->barrier); if (sync == ODP_SCHED_SYNC_ORDERED) locked = odp_ticketlock_trylock(&globals->lock); else locked = 0; if (locked && globals->buf_count_cpy > 0) { odp_event_t ev; odp_queue_t pq; uint64_t seq; uint64_t bcount = 0; int i, j; char name[32]; uint64_t num_bufs = args->num_bufs; uint64_t buf_count = globals->buf_count_cpy; for (i = 0; i < args->num_prio; i++) { for (j = 0; j < args->num_queues; j++) { snprintf(name, sizeof(name), "plain_%d_%d_o", i, j); pq = odp_queue_lookup(name); CU_ASSERT_FATAL(pq != ODP_QUEUE_INVALID); seq = 0; while (1) { ev = odp_queue_deq(pq); if (ev == ODP_EVENT_INVALID) { CU_ASSERT(seq == num_bufs); break; } bctx = odp_buffer_addr( odp_buffer_from_event(ev)); CU_ASSERT(bctx->sequence == seq); seq++; bcount++; odp_event_free(ev); } } } CU_ASSERT(bcount == buf_count); globals->buf_count_cpy = 0; } if (locked) odp_ticketlock_unlock(&globals->lock); /* Clear scheduler atomic / ordered context between tests */ num = exit_schedule_loop(); CU_ASSERT(num == 0); if (num) printf("\nDROPPED %i events\n\n", num); return 0; }
void scheduler_test_groups(void) { odp_pool_t p; odp_pool_param_t params; odp_queue_t queue_grp1, queue_grp2; odp_buffer_t buf; odp_event_t ev; uint32_t *u32; int i, j, rc; odp_schedule_sync_t sync[] = {ODP_SCHED_SYNC_PARALLEL, ODP_SCHED_SYNC_ATOMIC, ODP_SCHED_SYNC_ORDERED}; int thr_id = odp_thread_id(); odp_thrmask_t zeromask, mymask, testmask; odp_schedule_group_t mygrp1, mygrp2, lookup; odp_schedule_group_info_t info; odp_thrmask_zero(&zeromask); odp_thrmask_zero(&mymask); odp_thrmask_set(&mymask, thr_id); /* Can't find a group before we create it */ lookup = odp_schedule_group_lookup("Test Group 1"); CU_ASSERT(lookup == ODP_SCHED_GROUP_INVALID); /* Now create the group */ mygrp1 = odp_schedule_group_create("Test Group 1", &zeromask); CU_ASSERT_FATAL(mygrp1 != ODP_SCHED_GROUP_INVALID); /* Verify we can now find it */ lookup = odp_schedule_group_lookup("Test Group 1"); CU_ASSERT(lookup == mygrp1); /* Threadmask should be retrievable and be what we expect */ rc = odp_schedule_group_thrmask(mygrp1, &testmask); CU_ASSERT(rc == 0); CU_ASSERT(!odp_thrmask_isset(&testmask, thr_id)); /* Now join the group and verify we're part of it */ rc = odp_schedule_group_join(mygrp1, &mymask); CU_ASSERT(rc == 0); rc = odp_schedule_group_thrmask(mygrp1, &testmask); CU_ASSERT(rc == 0); CU_ASSERT(odp_thrmask_isset(&testmask, thr_id)); /* Info struct */ memset(&info, 0, sizeof(odp_schedule_group_info_t)); rc = odp_schedule_group_info(mygrp1, &info); CU_ASSERT(rc == 0); CU_ASSERT(odp_thrmask_equal(&info.thrmask, &mymask) != 0); CU_ASSERT(strcmp(info.name, "Test Group 1") == 0); /* We can't join or leave an unknown group */ rc = odp_schedule_group_join(ODP_SCHED_GROUP_INVALID, &mymask); CU_ASSERT(rc != 0); rc = odp_schedule_group_leave(ODP_SCHED_GROUP_INVALID, &mymask); CU_ASSERT(rc != 0); /* But we can leave our group */ rc = odp_schedule_group_leave(mygrp1, &mymask); CU_ASSERT(rc == 0); rc = odp_schedule_group_thrmask(mygrp1, &testmask); CU_ASSERT(rc == 0); CU_ASSERT(!odp_thrmask_isset(&testmask, thr_id)); /* We shouldn't be able to find our second group before creating it */ lookup = odp_schedule_group_lookup("Test Group 2"); CU_ASSERT(lookup == ODP_SCHED_GROUP_INVALID); /* Now create it and verify we can find it */ mygrp2 = odp_schedule_group_create("Test Group 2", &zeromask); CU_ASSERT_FATAL(mygrp2 != ODP_SCHED_GROUP_INVALID); lookup = odp_schedule_group_lookup("Test Group 2"); CU_ASSERT(lookup == mygrp2); /* Verify we're not part of it */ rc = odp_schedule_group_thrmask(mygrp2, &testmask); CU_ASSERT(rc == 0); CU_ASSERT(!odp_thrmask_isset(&testmask, thr_id)); /* Now join the group and verify we're part of it */ rc = odp_schedule_group_join(mygrp2, &mymask); CU_ASSERT(rc == 0); rc = odp_schedule_group_thrmask(mygrp2, &testmask); CU_ASSERT(rc == 0); CU_ASSERT(odp_thrmask_isset(&testmask, thr_id)); /* Now verify scheduler adherence to groups */ odp_pool_param_init(¶ms); params.buf.size = 100; params.buf.align = 0; params.buf.num = 2; params.type = ODP_POOL_BUFFER; p = odp_pool_create("sched_group_pool", ¶ms); CU_ASSERT_FATAL(p != ODP_POOL_INVALID); for (i = 0; i < 3; i++) { odp_queue_param_t qp; odp_queue_t queue, from; odp_schedule_group_t mygrp[NUM_GROUPS]; odp_queue_t queue_grp[NUM_GROUPS]; int num = NUM_GROUPS; odp_queue_param_init(&qp); qp.type = ODP_QUEUE_TYPE_SCHED; qp.sched.prio = ODP_SCHED_PRIO_DEFAULT; qp.sched.sync = sync[i]; qp.sched.group = mygrp1; /* Create and populate a group in group 1 */ queue_grp1 = odp_queue_create("sched_group_test_queue_1", &qp); CU_ASSERT_FATAL(queue_grp1 != ODP_QUEUE_INVALID); CU_ASSERT_FATAL(odp_queue_sched_group(queue_grp1) == mygrp1); buf = odp_buffer_alloc(p); CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID); u32 = odp_buffer_addr(buf); u32[0] = MAGIC1; ev = odp_buffer_to_event(buf); rc = odp_queue_enq(queue_grp1, ev); CU_ASSERT(rc == 0); if (rc) odp_buffer_free(buf); /* Now create and populate a queue in group 2 */ qp.sched.group = mygrp2; queue_grp2 = odp_queue_create("sched_group_test_queue_2", &qp); CU_ASSERT_FATAL(queue_grp2 != ODP_QUEUE_INVALID); CU_ASSERT_FATAL(odp_queue_sched_group(queue_grp2) == mygrp2); buf = odp_buffer_alloc(p); CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID); u32 = odp_buffer_addr(buf); u32[0] = MAGIC2; ev = odp_buffer_to_event(buf); rc = odp_queue_enq(queue_grp2, ev); CU_ASSERT(rc == 0); if (rc) odp_buffer_free(buf); /* Swap between two groups. Application should serve both * groups to avoid potential head of line blocking in * scheduler. */ mygrp[0] = mygrp1; mygrp[1] = mygrp2; queue_grp[0] = queue_grp1; queue_grp[1] = queue_grp2; j = 0; /* Ensure that each test run starts from mygrp1 */ odp_schedule_group_leave(mygrp1, &mymask); odp_schedule_group_leave(mygrp2, &mymask); odp_schedule_group_join(mygrp1, &mymask); while (num) { queue = queue_grp[j]; ev = odp_schedule(&from, ODP_SCHED_NO_WAIT); if (ev == ODP_EVENT_INVALID) { /* change group */ rc = odp_schedule_group_leave(mygrp[j], &mymask); CU_ASSERT_FATAL(rc == 0); j = (j + 1) % NUM_GROUPS; rc = odp_schedule_group_join(mygrp[j], &mymask); CU_ASSERT_FATAL(rc == 0); continue; } CU_ASSERT_FATAL(from == queue); buf = odp_buffer_from_event(ev); u32 = odp_buffer_addr(buf); if (from == queue_grp1) { /* CU_ASSERT_FATAL needs these brackets */ CU_ASSERT_FATAL(u32[0] == MAGIC1); } else { CU_ASSERT_FATAL(u32[0] == MAGIC2); } odp_buffer_free(buf); /* Tell scheduler we're about to request an event. * Not needed, but a convenient place to test this API. */ odp_schedule_prefetch(1); num--; } /* Release schduler context and leave groups */ odp_schedule_group_join(mygrp1, &mymask); odp_schedule_group_join(mygrp2, &mymask); CU_ASSERT(exit_schedule_loop() == 0); odp_schedule_group_leave(mygrp1, &mymask); odp_schedule_group_leave(mygrp2, &mymask); /* Done with queues for this round */ CU_ASSERT_FATAL(odp_queue_destroy(queue_grp1) == 0); CU_ASSERT_FATAL(odp_queue_destroy(queue_grp2) == 0); /* Verify we can no longer find our queues */ CU_ASSERT_FATAL(odp_queue_lookup("sched_group_test_queue_1") == ODP_QUEUE_INVALID); CU_ASSERT_FATAL(odp_queue_lookup("sched_group_test_queue_2") == ODP_QUEUE_INVALID); } CU_ASSERT_FATAL(odp_schedule_group_destroy(mygrp1) == 0); CU_ASSERT_FATAL(odp_schedule_group_destroy(mygrp2) == 0); CU_ASSERT_FATAL(odp_pool_destroy(p) == 0); }
void queue_test_param(void) { odp_queue_t queue; odp_event_t enev[MAX_BUFFER_QUEUE]; odp_event_t deev[MAX_BUFFER_QUEUE]; odp_buffer_t buf; odp_event_t ev; odp_pool_t msg_pool; odp_event_t *pev_tmp; int i, deq_ret, ret; int nr_deq_entries = 0; int max_iteration = CONFIG_MAX_ITERATION; odp_queue_param_t qparams; odp_buffer_t enbuf; /* Schedule type queue */ odp_queue_param_init(&qparams); qparams.type = ODP_QUEUE_TYPE_SCHED; qparams.sched.prio = ODP_SCHED_PRIO_LOWEST; qparams.sched.sync = ODP_SCHED_SYNC_PARALLEL; qparams.sched.group = ODP_SCHED_GROUP_WORKER; queue = odp_queue_create("test_queue", &qparams); CU_ASSERT(ODP_QUEUE_INVALID != queue); CU_ASSERT(odp_queue_to_u64(queue) != odp_queue_to_u64(ODP_QUEUE_INVALID)); CU_ASSERT(queue == odp_queue_lookup("test_queue")); CU_ASSERT(ODP_QUEUE_TYPE_SCHED == odp_queue_type(queue)); CU_ASSERT(ODP_SCHED_PRIO_LOWEST == odp_queue_sched_prio(queue)); CU_ASSERT(ODP_SCHED_SYNC_PARALLEL == odp_queue_sched_type(queue)); CU_ASSERT(ODP_SCHED_GROUP_WORKER == odp_queue_sched_group(queue)); CU_ASSERT(0 == odp_queue_context_set(queue, &queue_context, sizeof(queue_context))); CU_ASSERT(&queue_context == odp_queue_context(queue)); CU_ASSERT(odp_queue_destroy(queue) == 0); /* Plain type queue */ odp_queue_param_init(&qparams); qparams.type = ODP_QUEUE_TYPE_PLAIN; qparams.context = &queue_context; qparams.context_len = sizeof(queue_context); queue = odp_queue_create("test_queue", &qparams); CU_ASSERT(ODP_QUEUE_INVALID != queue); CU_ASSERT(queue == odp_queue_lookup("test_queue")); CU_ASSERT(ODP_QUEUE_TYPE_PLAIN == odp_queue_type(queue)); CU_ASSERT(&queue_context == odp_queue_context(queue)); msg_pool = odp_pool_lookup("msg_pool"); buf = odp_buffer_alloc(msg_pool); CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID); ev = odp_buffer_to_event(buf); if (!(CU_ASSERT(odp_queue_enq(queue, ev) == 0))) { odp_buffer_free(buf); } else { CU_ASSERT(ev == odp_queue_deq(queue)); odp_buffer_free(buf); } for (i = 0; i < MAX_BUFFER_QUEUE; i++) { buf = odp_buffer_alloc(msg_pool); enev[i] = odp_buffer_to_event(buf); } /* * odp_queue_enq_multi may return 0..n buffers due to the resource * constraints in the implementation at that given point of time. * But here we assume that we succeed in enqueuing all buffers. */ ret = odp_queue_enq_multi(queue, enev, MAX_BUFFER_QUEUE); CU_ASSERT(MAX_BUFFER_QUEUE == ret); i = ret < 0 ? 0 : ret; for ( ; i < MAX_BUFFER_QUEUE; i++) odp_event_free(enev[i]); pev_tmp = deev; do { deq_ret = odp_queue_deq_multi(queue, pev_tmp, MAX_BUFFER_QUEUE); nr_deq_entries += deq_ret; max_iteration--; pev_tmp += deq_ret; CU_ASSERT(max_iteration >= 0); } while (nr_deq_entries < MAX_BUFFER_QUEUE); for (i = 0; i < MAX_BUFFER_QUEUE; i++) { enbuf = odp_buffer_from_event(enev[i]); CU_ASSERT(enev[i] == deev[i]); odp_buffer_free(enbuf); } CU_ASSERT(odp_queue_destroy(queue) == 0); }
/** @private test timeout */ static void test_abs_timeouts(int thr, test_globals_t *gbls) { uint64_t period; uint64_t period_ns; odp_queue_t queue; uint64_t tick; struct test_timer *ttp; odp_timeout_t tmo; EXAMPLE_DBG(" [%i] test_timeouts\n", thr); queue = odp_queue_lookup("timer_queue"); period_ns = gbls->args.period_us*ODP_TIME_USEC; period = odp_timer_ns_to_tick(gbls->tp, period_ns); EXAMPLE_DBG(" [%i] period %"PRIu64" ticks, %"PRIu64" ns\n", thr, period, period_ns); EXAMPLE_DBG(" [%i] current tick %"PRIu64"\n", thr, odp_timer_current_tick(gbls->tp)); ttp = &gbls->tt[thr]; ttp->tim = odp_timer_alloc(gbls->tp, queue, ttp); if (ttp->tim == ODP_TIMER_INVALID) { EXAMPLE_ERR("Failed to allocate timer\n"); return; } tmo = odp_timeout_alloc(gbls->pool); if (tmo == ODP_TIMEOUT_INVALID) { EXAMPLE_ERR("Failed to allocate timeout\n"); return; } ttp->ev = odp_timeout_to_event(tmo); tick = odp_timer_current_tick(gbls->tp); while ((int)odp_atomic_load_u32(&gbls->remain) > 0) { odp_event_t ev; odp_timer_set_t rc; tick += period; rc = odp_timer_set_abs(ttp->tim, tick, &ttp->ev); if (odp_unlikely(rc != ODP_TIMER_SUCCESS)) { /* Too early or too late timeout requested */ EXAMPLE_ABORT("odp_timer_set_abs() failed: %s\n", timerset2str(rc)); } /* Get the next expired timeout. * We invoke the scheduler in a loop with a timeout because * we are not guaranteed to receive any more timeouts. The * scheduler isn't guaranteeing fairness when scheduling * buffers to threads. * Use 1.5 second timeout for scheduler */ uint64_t sched_tmo = odp_schedule_wait_time(1500000000ULL); do { ev = odp_schedule(&queue, sched_tmo); /* Check if odp_schedule() timed out, possibly there * are no remaining timeouts to receive */ } while (ev == ODP_EVENT_INVALID && (int)odp_atomic_load_u32(&gbls->remain) > 0); if (ev == ODP_EVENT_INVALID) break; /* No more timeouts */ if (odp_event_type(ev) != ODP_EVENT_TIMEOUT) { /* Not a default timeout event */ EXAMPLE_ABORT("Unexpected event type (%u) received\n", odp_event_type(ev)); } odp_timeout_t tmo = odp_timeout_from_event(ev); tick = odp_timeout_tick(tmo); ttp = odp_timeout_user_ptr(tmo); ttp->ev = ev; if (!odp_timeout_fresh(tmo)) { /* Not the expected expiration tick, timer has * been reset or cancelled or freed */ EXAMPLE_ABORT("Unexpected timeout received (timer %" PRIx32 ", tick %" PRIu64 ")\n", ttp->tim, tick); } EXAMPLE_DBG(" [%i] timeout, tick %"PRIu64"\n", thr, tick); odp_atomic_dec_u32(&gbls->remain); } /* Cancel and free last timer used */ (void)odp_timer_cancel(ttp->tim, &ttp->ev); if (ttp->ev != ODP_EVENT_INVALID) odp_timeout_free(odp_timeout_from_event(ttp->ev)); else EXAMPLE_ERR("Lost timeout event at timer cancel\n"); /* Since we have cancelled the timer, there is no timeout event to * return from odp_timer_free() */ (void)odp_timer_free(ttp->tim); /* Remove any prescheduled events */ remove_prescheduled_events(); }
/** @private test timeout */ static void test_abs_timeouts(int thr, test_globals_t *gbls) { uint64_t period; uint64_t period_ns; odp_queue_t queue; uint64_t tick; struct test_timer *ttp; odp_timeout_t tmo; uint32_t num_workers = gbls->num_workers; EXAMPLE_DBG(" [%i] test_timeouts\n", thr); queue = odp_queue_lookup("timer_queue"); period_ns = gbls->args.period_us * ODP_TIME_USEC; period = odp_timer_ns_to_tick(gbls->tp, period_ns); EXAMPLE_DBG(" [%i] period %d ticks, %d ns\n", thr, period, period_ns); EXAMPLE_DBG(" [%i] current tick %d\n", thr, odp_timer_current_tick(gbls->tp)); ttp = &gbls->tt[thr]; ttp->tim = odp_timer_alloc(gbls->tp, queue, ttp); if (ttp->tim == ODP_TIMER_INVALID) { EXAMPLE_ERR("Failed to allocate timer\n"); return; } tmo = odp_timeout_alloc(gbls->pool); if (tmo == ODP_TIMEOUT_INVALID) { EXAMPLE_ERR("Failed to allocate timeout\n"); return; } ttp->ev = odp_timeout_to_event(tmo); tick = odp_timer_current_tick(gbls->tp); while (1) { int wait = 0; odp_event_t ev; odp_timer_set_t rc; if (ttp) { tick += period; rc = odp_timer_set_abs(ttp->tim, tick, &ttp->ev); if (odp_unlikely(rc != ODP_TIMER_SUCCESS)) /* Too early or too late timeout requested */ EXAMPLE_ABORT("odp_timer_set_abs() failed: %s\n", timerset2str(rc)); } /* Get the next expired timeout. * We invoke the scheduler in a loop with a timeout because * we are not guaranteed to receive any more timeouts. The * scheduler isn't guaranteeing fairness when scheduling * buffers to threads. * Use 1.5 second timeout for scheduler */ uint64_t sched_tmo = odp_schedule_wait_time(1500000000ULL); do { ev = odp_schedule(&queue, sched_tmo); /* Check if odp_schedule() timed out, possibly there * are no remaining timeouts to receive */ if ((++wait > WAIT_NUM) && (odp_atomic_load_u32(&gbls->remain) < num_workers)) EXAMPLE_ABORT("At least one TMO was lost\n"); } while (ev == ODP_EVENT_INVALID && (int)odp_atomic_load_u32(&gbls->remain) > 0); if (ev == ODP_EVENT_INVALID) break; /* No more timeouts */ if (odp_event_type(ev) != ODP_EVENT_TIMEOUT) /* Not a default timeout event */ EXAMPLE_ABORT("Unexpected event type (%u) received\n", odp_event_type(ev)); odp_timeout_t tmo = odp_timeout_from_event(ev); tick = odp_timeout_tick(tmo); ttp = odp_timeout_user_ptr(tmo); ttp->ev = ev; if (!odp_timeout_fresh(tmo)) /* Not the expected expiration tick, timer has * been reset or cancelled or freed */ EXAMPLE_ABORT("Unexpected timeout received (timer %x, tick %d)\n", ttp->tim, tick); EXAMPLE_DBG(" [%i] timeout, tick %d\n", thr, tick); uint32_t rx_num = odp_atomic_fetch_dec_u32(&gbls->remain); if (!rx_num) EXAMPLE_ABORT("Unexpected timeout received (timer %x, tick %d)\n", ttp->tim, tick); else if (rx_num > num_workers) continue; odp_timeout_free(odp_timeout_from_event(ttp->ev)); odp_timer_free(ttp->tim); ttp = NULL; } /* Remove any prescheduled events */ remove_prescheduled_events(); }
void queue_test_sunnydays(void) { odp_queue_t queue_creat_id, queue_id; odp_event_t enev[MAX_BUFFER_QUEUE]; odp_event_t deev[MAX_BUFFER_QUEUE]; odp_buffer_t buf; odp_event_t ev; odp_pool_t msg_pool; odp_event_t *pev_tmp; int i, deq_ret, ret; int nr_deq_entries = 0; int max_iteration = CONFIG_MAX_ITERATION; void *prtn = NULL; odp_queue_param_t qparams; odp_queue_param_init(&qparams); qparams.sched.prio = ODP_SCHED_PRIO_LOWEST; qparams.sched.sync = ODP_SCHED_SYNC_NONE; qparams.sched.group = ODP_SCHED_GROUP_WORKER; queue_creat_id = odp_queue_create("test_queue", ODP_QUEUE_TYPE_POLL, &qparams); CU_ASSERT(ODP_QUEUE_INVALID != queue_creat_id); CU_ASSERT_EQUAL(ODP_QUEUE_TYPE_POLL, odp_queue_type(queue_creat_id)); queue_id = odp_queue_lookup("test_queue"); CU_ASSERT_EQUAL(queue_creat_id, queue_id); CU_ASSERT_EQUAL(ODP_SCHED_GROUP_WORKER, odp_queue_sched_group(queue_id)); CU_ASSERT_EQUAL(ODP_SCHED_PRIO_LOWEST, odp_queue_sched_prio(queue_id)); CU_ASSERT_EQUAL(ODP_SCHED_SYNC_NONE, odp_queue_sched_type(queue_id)); CU_ASSERT(0 == odp_queue_context_set(queue_id, &queue_contest)); prtn = odp_queue_context(queue_id); CU_ASSERT(&queue_contest == (int *)prtn); msg_pool = odp_pool_lookup("msg_pool"); buf = odp_buffer_alloc(msg_pool); CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID); ev = odp_buffer_to_event(buf); if (!(CU_ASSERT(odp_queue_enq(queue_id, ev) == 0))) { odp_buffer_free(buf); } else { CU_ASSERT_EQUAL(ev, odp_queue_deq(queue_id)); odp_buffer_free(buf); } for (i = 0; i < MAX_BUFFER_QUEUE; i++) { odp_buffer_t buf = odp_buffer_alloc(msg_pool); enev[i] = odp_buffer_to_event(buf); } /* * odp_queue_enq_multi may return 0..n buffers due to the resource * constraints in the implementation at that given point of time. * But here we assume that we succeed in enqueuing all buffers. */ ret = odp_queue_enq_multi(queue_id, enev, MAX_BUFFER_QUEUE); CU_ASSERT(MAX_BUFFER_QUEUE == ret); i = ret < 0 ? 0 : ret; for ( ; i < MAX_BUFFER_QUEUE; i++) odp_event_free(enev[i]); pev_tmp = deev; do { deq_ret = odp_queue_deq_multi(queue_id, pev_tmp, MAX_BUFFER_QUEUE); nr_deq_entries += deq_ret; max_iteration--; pev_tmp += deq_ret; CU_ASSERT(max_iteration >= 0); } while (nr_deq_entries < MAX_BUFFER_QUEUE); for (i = 0; i < MAX_BUFFER_QUEUE; i++) { odp_buffer_t enbuf = odp_buffer_from_event(enev[i]); CU_ASSERT_EQUAL(enev[i], deev[i]); odp_buffer_free(enbuf); } CU_ASSERT(odp_queue_destroy(queue_id) == 0); }
void scheduler_test_groups(void) { odp_pool_t p; odp_pool_param_t params; odp_queue_param_t qp; odp_queue_t queue_grp1, queue_grp2, from; odp_buffer_t buf; odp_event_t ev; uint32_t *u32; int i, j, rc; odp_schedule_sync_t sync[] = {ODP_SCHED_SYNC_NONE, ODP_SCHED_SYNC_ATOMIC/* , */ /* ODP_SCHED_SYNC_ORDERED */}; const int num_sync = (sizeof(sync) / sizeof(sync[0])); int thr_id = odp_thread_id(); odp_thrmask_t zeromask, mymask, testmask; odp_schedule_group_t mygrp1, mygrp2, lookup; odp_thrmask_zero(&zeromask); odp_thrmask_zero(&mymask); odp_thrmask_set(&mymask, thr_id); /* Can't find a group before we create it */ lookup = odp_schedule_group_lookup("Test Group 1"); CU_ASSERT(lookup == ODP_SCHED_GROUP_INVALID); /* Now create the group */ mygrp1 = odp_schedule_group_create("Test Group 1", &zeromask); CU_ASSERT_FATAL(mygrp1 != ODP_SCHED_GROUP_INVALID); /* Verify we can now find it */ lookup = odp_schedule_group_lookup("Test Group 1"); CU_ASSERT(lookup == mygrp1); /* Threadmask should be retrievable and be what we expect */ rc = odp_schedule_group_thrmask(mygrp1, &testmask); CU_ASSERT(rc == 0); CU_ASSERT(!odp_thrmask_isset(&testmask, thr_id)); /* Now join the group and verify we're part of it */ rc = odp_schedule_group_join(mygrp1, &mymask); CU_ASSERT(rc == 0); rc = odp_schedule_group_thrmask(mygrp1, &testmask); CU_ASSERT(rc == 0); CU_ASSERT(odp_thrmask_isset(&testmask, thr_id)); /* We can't join or leave an unknown group */ rc = odp_schedule_group_join(ODP_SCHED_GROUP_INVALID, &mymask); CU_ASSERT(rc != 0); rc = odp_schedule_group_leave(ODP_SCHED_GROUP_INVALID, &mymask); CU_ASSERT(rc != 0); /* But we can leave our group */ rc = odp_schedule_group_leave(mygrp1, &mymask); CU_ASSERT(rc == 0); rc = odp_schedule_group_thrmask(mygrp1, &testmask); CU_ASSERT(rc == 0); CU_ASSERT(!odp_thrmask_isset(&testmask, thr_id)); /* We shouldn't be able to find our second group before creating it */ lookup = odp_schedule_group_lookup("Test Group 2"); CU_ASSERT(lookup == ODP_SCHED_GROUP_INVALID); /* Now create it and verify we can find it */ mygrp2 = odp_schedule_group_create("Test Group 2", &zeromask); CU_ASSERT_FATAL(mygrp2 != ODP_SCHED_GROUP_INVALID); lookup = odp_schedule_group_lookup("Test Group 2"); CU_ASSERT(lookup == mygrp2); /* Verify we're not part of it */ rc = odp_schedule_group_thrmask(mygrp2, &testmask); CU_ASSERT(rc == 0); CU_ASSERT(!odp_thrmask_isset(&testmask, thr_id)); /* Now join the group and verify we're part of it */ rc = odp_schedule_group_join(mygrp2, &mymask); CU_ASSERT(rc == 0); rc = odp_schedule_group_thrmask(mygrp2, &testmask); CU_ASSERT(rc == 0); CU_ASSERT(odp_thrmask_isset(&testmask, thr_id)); /* Now verify scheduler adherence to groups */ odp_queue_param_init(&qp); odp_pool_param_init(¶ms); params.buf.size = 100; params.buf.align = 0; params.buf.num = 2; params.type = ODP_POOL_BUFFER; p = odp_pool_create("sched_group_pool", ¶ms); CU_ASSERT_FATAL(p != ODP_POOL_INVALID); for (i = 0; i < num_sync; i++) { qp.sched.prio = ODP_SCHED_PRIO_DEFAULT; qp.sched.sync = sync[i]; qp.sched.group = mygrp1; /* Create and populate a group in group 1 */ queue_grp1 = odp_queue_create("sched_group_test_queue_1", ODP_QUEUE_TYPE_SCHED, &qp); CU_ASSERT_FATAL(queue_grp1 != ODP_QUEUE_INVALID); CU_ASSERT_FATAL(odp_queue_sched_group(queue_grp1) == mygrp1); buf = odp_buffer_alloc(p); CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID); u32 = odp_buffer_addr(buf); u32[0] = MAGIC1; ev = odp_buffer_to_event(buf); if (!(CU_ASSERT(odp_queue_enq(queue_grp1, ev) == 0))) odp_buffer_free(buf); /* Now create and populate a queue in group 2 */ qp.sched.group = mygrp2; queue_grp2 = odp_queue_create("sched_group_test_queue_2", ODP_QUEUE_TYPE_SCHED, &qp); CU_ASSERT_FATAL(queue_grp2 != ODP_QUEUE_INVALID); CU_ASSERT_FATAL(odp_queue_sched_group(queue_grp2) == mygrp2); buf = odp_buffer_alloc(p); CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID); u32 = odp_buffer_addr(buf); u32[0] = MAGIC2; ev = odp_buffer_to_event(buf); if (!(CU_ASSERT(odp_queue_enq(queue_grp2, ev) == 0))) odp_buffer_free(buf); /* Scheduler should give us the event from Group 2 */ ev = odp_schedule(&from, ODP_SCHED_WAIT); CU_ASSERT_FATAL(ev != ODP_EVENT_INVALID); CU_ASSERT_FATAL(from == queue_grp2); buf = odp_buffer_from_event(ev); u32 = odp_buffer_addr(buf); CU_ASSERT_FATAL(u32[0] == MAGIC2); odp_buffer_free(buf); /* Scheduler should not return anything now since we're * not in Group 1 and Queue 2 is empty. Do this several * times to confirm. */ for (j = 0; j < 10; j++) { ev = odp_schedule(&from, ODP_SCHED_NO_WAIT); CU_ASSERT_FATAL(ev == ODP_EVENT_INVALID) } /* Now join group 1 and verify we can get the event */ rc = odp_schedule_group_join(mygrp1, &mymask); CU_ASSERT_FATAL(rc == 0); /* Tell scheduler we're about to request an event. * Not needed, but a convenient place to test this API. */ odp_schedule_prefetch(1); /* Now get the event from Queue 1 */ ev = odp_schedule(&from, ODP_SCHED_WAIT); CU_ASSERT_FATAL(ev != ODP_EVENT_INVALID); CU_ASSERT_FATAL(from == queue_grp1); buf = odp_buffer_from_event(ev); u32 = odp_buffer_addr(buf); CU_ASSERT_FATAL(u32[0] == MAGIC1); odp_buffer_free(buf); /* Leave group 1 for next pass */ rc = odp_schedule_group_leave(mygrp1, &mymask); CU_ASSERT_FATAL(rc == 0); /* We must release order before destroying queues */ odp_schedule_release_ordered(); /* Done with queues for this round */ CU_ASSERT_FATAL(odp_queue_destroy(queue_grp1) == 0); CU_ASSERT_FATAL(odp_queue_destroy(queue_grp2) == 0); /* Verify we can no longer find our queues */ CU_ASSERT_FATAL(odp_queue_lookup("sched_group_test_queue_1") == ODP_QUEUE_INVALID); CU_ASSERT_FATAL(odp_queue_lookup("sched_group_test_queue_2") == ODP_QUEUE_INVALID); } CU_ASSERT_FATAL(odp_schedule_group_destroy(mygrp1) == 0); CU_ASSERT_FATAL(odp_schedule_group_destroy(mygrp2) == 0); CU_ASSERT_FATAL(odp_pool_destroy(p) == 0); }