int ofp_timer_cancel(odp_timer_t tim) { odp_event_t timeout_event = ODP_EVENT_INVALID; odp_timeout_t tmo; uint32_t t = (uint32_t)tim; struct ofp_timer_internal *bufdata; struct ofp_timer_internal *prev = NULL; if (tim == ODP_TIMER_INVALID) return 0; if (t & 0x80000000) { /* long timeout */ odp_spinlock_lock(&shm->lock); bufdata = shm->long_table[t & TIMER_LONG_MASK]; while (bufdata) { struct ofp_timer_internal *next = bufdata->next; if (bufdata->id == t) { if (prev == NULL) shm->long_table[t & TIMER_LONG_MASK] = next; else prev->next = next; odp_buffer_free(bufdata->buf); odp_spinlock_unlock(&shm->lock); return 0; } prev = bufdata; bufdata = next; } odp_spinlock_unlock(&shm->lock); return -1; } else { if (odp_timer_cancel(tim, &timeout_event) < 0) { OFP_WARN("Timeout already expired or inactive"); return 0; } if (timeout_event != ODP_EVENT_INVALID) { tmo = odp_timeout_from_event(timeout_event); bufdata = odp_timeout_user_ptr(tmo); odp_buffer_free(bufdata->buf); odp_timeout_free(tmo); } else { OFP_WARN("Lost timeout buffer at timer cancel"); return -1; } if (odp_timer_free(tim) != ODP_EVENT_INVALID) { OFP_ERR("odp_timer_free failed"); return -1; } } return 0; }
void scheduler_test_pause_resume(void) { odp_queue_t queue; odp_buffer_t buf; odp_event_t ev; odp_queue_t from; int i; int local_bufs = 0; queue = odp_queue_lookup("sched_0_0_n"); CU_ASSERT(queue != ODP_QUEUE_INVALID); pool = odp_pool_lookup(MSG_POOL_NAME); CU_ASSERT_FATAL(pool != ODP_POOL_INVALID); for (i = 0; i < NUM_BUFS_PAUSE; i++) { buf = odp_buffer_alloc(pool); CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID); ev = odp_buffer_to_event(buf); if (odp_queue_enq(queue, ev)) odp_buffer_free(buf); } for (i = 0; i < NUM_BUFS_BEFORE_PAUSE; i++) { from = ODP_QUEUE_INVALID; ev = odp_schedule(&from, ODP_SCHED_WAIT); CU_ASSERT(from == queue); buf = odp_buffer_from_event(ev); odp_buffer_free(buf); } odp_schedule_pause(); while (1) { ev = odp_schedule(&from, ODP_SCHED_NO_WAIT); if (ev == ODP_EVENT_INVALID) break; CU_ASSERT(from == queue); buf = odp_buffer_from_event(ev); odp_buffer_free(buf); local_bufs++; } CU_ASSERT(local_bufs < NUM_BUFS_PAUSE - NUM_BUFS_BEFORE_PAUSE); odp_schedule_resume(); for (i = local_bufs + NUM_BUFS_BEFORE_PAUSE; i < NUM_BUFS_PAUSE; i++) { ev = odp_schedule(&from, ODP_SCHED_WAIT); CU_ASSERT(from == queue); buf = odp_buffer_from_event(ev); odp_buffer_free(buf); } CU_ASSERT(exit_schedule_loop() == 0); }
/* * Should receive timeouts only */ static void *event_dispatcher(void *arg) { odp_event_t ev; (void)arg; ofp_init_local(); while (1) { ev = odp_schedule(NULL, ODP_SCHED_WAIT); if (ev == ODP_EVENT_INVALID) continue; if (odp_event_type(ev) == ODP_EVENT_TIMEOUT) { ofp_timer_handle(ev); continue; } OFP_ERR("Error: unexpected event type: %u\n", odp_event_type(ev)); odp_buffer_free(odp_buffer_from_event(ev)); } /* Never reached */ return NULL; }
static void notify_function(union sigval sigval) { (void) sigval; uint64_t cur_tick; timeout_t *tmo; tick_t *tick; if (odp_timer.timer[0].active == 0) return; /* ODP_DBG("Tick\n"); */ cur_tick = odp_timer.timer[0].cur_tick++; tick = &odp_timer.timer[0].tick[cur_tick % MAX_TICKS]; while ((tmo = rem_tmo(tick)) != NULL) { odp_queue_t queue; odp_buffer_t buf; queue = tmo->queue; buf = tmo->buf; if (buf != tmo->tmo_buf) odp_buffer_free(tmo->tmo_buf); odp_queue_enq(queue, buf); } }
void ofp_uma_pool_free(void *data) { struct uma_pool_metadata *meta = (struct uma_pool_metadata *) ((uint8_t *) data - sizeof(struct uma_pool_metadata)); odp_buffer_free(meta->buffer_handle); }
static int destroy_inq(odp_pktio_t pktio) { odp_queue_t inq; odp_event_t ev; odp_queue_type_t q_type; inq = odp_pktio_inq_getdef(pktio); if (inq == ODP_QUEUE_INVALID) return -1; odp_pktio_inq_remdef(pktio); q_type = odp_queue_type(inq); /* flush any pending events */ while (1) { if (q_type == ODP_QUEUE_TYPE_POLL) ev = odp_queue_deq(inq); else ev = odp_schedule(NULL, ODP_SCHED_NO_WAIT); if (ev != ODP_EVENT_INVALID) odp_buffer_free(odp_buffer_from_event(ev)); else break; } return odp_queue_destroy(inq); }
static int destroy_inq(odp_pktio_t pktio) { odp_queue_t inq; odp_event_t ev; inq = odp_pktio_inq_getdef(pktio); if (inq == ODP_QUEUE_INVALID) { CU_FAIL("attempting to destroy invalid inq"); return -1; } if (0 > odp_pktio_inq_remdef(pktio)) return -1; while (1) { ev = odp_schedule(NULL, ODP_SCHED_NO_WAIT); if (ev != ODP_EVENT_INVALID) odp_buffer_free(odp_buffer_from_event(ev)); else break; } return odp_queue_destroy(inq); }
void odp_buffer_free_multi(const odp_buffer_t buf[], int len) { int i; for (i = 0; i < len; ++i) odp_buffer_free(buf[i]); }
int odp_schedule_term_global(void) { int ret = 0; int rc = 0; int i, j; for (i = 0; i < ODP_CONFIG_SCHED_PRIOS; i++) { for (j = 0; j < QUEUES_PER_PRIO; j++) { odp_queue_t pri_q; odp_event_t ev; pri_q = sched->pri_queue[i][j]; while ((ev = odp_queue_deq(pri_q)) != ODP_EVENT_INVALID) { odp_buffer_t buf; sched_cmd_t *sched_cmd; buf = odp_buffer_from_event(ev); sched_cmd = odp_buffer_addr(buf); if (sched_cmd->cmd == SCHED_CMD_DEQUEUE) { queue_entry_t *qe; odp_buffer_hdr_t *buf_hdr[1]; int num; qe = sched_cmd->qe; num = queue_deq_multi(qe, buf_hdr, 1); if (num < 0) queue_destroy_finalize(qe); if (num > 0) ODP_ERR("Queue not empty\n"); } else odp_buffer_free(buf); } if (odp_queue_destroy(pri_q)) { ODP_ERR("Pri queue destroy fail.\n"); rc = -1; } } } if (odp_pool_destroy(sched->pool) != 0) { ODP_ERR("Pool destroy fail.\n"); rc = -1; } ret = odp_shm_free(sched->shm); if (ret < 0) { ODP_ERR("Shm free failed for odp_scheduler"); rc = -1; } return rc; }
void schedule_queue_destroy(queue_entry_t *qe) { odp_buffer_t buf; buf = odp_buffer_from_event(qe->s.cmd_ev); odp_buffer_free(buf); pri_clr_queue(queue_handle(qe), queue_prio(qe)); qe->s.cmd_ev = ODP_EVENT_INVALID; qe->s.pri_queue = ODP_QUEUE_INVALID; }
/* * During shutdown process, all OFP submodules cancel their timers. However, it * is possible that the timer is cancelled (thus preventing further expiration), * but a timeout event for a previous expiration of this timer is sitting in a * queue waiting to be scheduled. schedule_shutdown() calls this function to * cleanup these events. */ void ofp_timer_evt_cleanup(odp_event_t ev) { odp_timeout_t tmo; odp_timer_t tim; struct ofp_timer_internal *bufdata; tmo = odp_timeout_from_event(ev); tim = odp_timeout_timer(tmo); bufdata = (struct ofp_timer_internal *) odp_timeout_user_ptr(tmo); odp_buffer_free(bufdata->buf); odp_timeout_free(tmo); odp_timer_free(tim); }
void ofp_timer_handle(odp_event_t ev) { struct ofp_timer_internal *bufdata; odp_timeout_t tmo = odp_timeout_from_event(ev); odp_timer_t tim = odp_timeout_timer(tmo); bufdata = (struct ofp_timer_internal *)odp_timeout_user_ptr(tmo); fflush(NULL); bufdata->callback(&bufdata->arg); odp_buffer_free(bufdata->buf); odp_timeout_free(tmo); odp_timer_free(tim); }
int ofp_timer_term_global(void) { int i; struct ofp_timer_internal *bufdata, *next; int rc = 0; if (ofp_timer_lookup_shared_memory()) return -1; CHECK_ERROR(ofp_timer_stop_global(), rc); /* Cleanup long timers*/ for (i = 0; i < TIMER_NUM_LONG_SLOTS; i++) { bufdata = shm->long_table[i]; if (!bufdata) continue; while (bufdata) { next = bufdata->next; odp_buffer_free(bufdata->buf); bufdata = next; } } /* Cleanup timer related ODP objects*/ if (shm->queue != ODP_QUEUE_INVALID) { CHECK_ERROR(odp_queue_destroy(shm->queue), rc); shm->queue = ODP_QUEUE_INVALID; } if (shm->socket_timer_pool != ODP_TIMER_POOL_INVALID) { odp_timer_pool_destroy(shm->socket_timer_pool); shm->socket_timer_pool = ODP_TIMER_POOL_INVALID; } if (shm->buf_pool != ODP_POOL_INVALID) { CHECK_ERROR(odp_pool_destroy(shm->buf_pool), rc); shm->buf_pool = ODP_POOL_INVALID; } if (shm->pool != ODP_POOL_INVALID) { CHECK_ERROR(odp_pool_destroy(shm->pool), rc); shm->pool = ODP_POOL_INVALID; } CHECK_ERROR(ofp_timer_free_shared_memory(), rc); return rc; }
static int destroy_queue(const char *name) { odp_queue_t q; queue_context *qctx; q = odp_queue_lookup(name); if (q == ODP_QUEUE_INVALID) return -1; qctx = odp_queue_context(q); if (qctx) odp_buffer_free(qctx->ctx_handle); return odp_queue_destroy(q); }
static void *run_thread_rx(void *arg) { test_globals_t *globals; int thr_id, batch_len; odp_queue_t pollq = ODP_QUEUE_INVALID; thread_args_t *targs = arg; batch_len = targs->batch_len; if (batch_len > BATCH_LEN_MAX) batch_len = BATCH_LEN_MAX; thr_id = odp_thread_id(); globals = odp_shm_addr(odp_shm_lookup("test_globals")); pkt_rx_stats_t *stats = &globals->rx_stats[thr_id]; if (gbl_args->args.schedule == 0) { pollq = odp_pktio_inq_getdef(globals->pktio_rx); if (pollq == ODP_QUEUE_INVALID) LOG_ABORT("Invalid input queue.\n"); } odp_barrier_wait(&globals->rx_barrier); while (1) { odp_event_t ev[BATCH_LEN_MAX]; int i, n_ev; n_ev = receive_packets(pollq, ev, batch_len); for (i = 0; i < n_ev; ++i) { if (odp_event_type(ev[i]) == ODP_EVENT_PACKET) { odp_packet_t pkt = odp_packet_from_event(ev[i]); if (pktio_pkt_has_magic(pkt)) stats->s.rx_cnt++; else stats->s.rx_ignore++; } odp_buffer_free(odp_buffer_from_event(ev[i])); } if (n_ev == 0 && odp_atomic_load_u32(&shutdown)) break; } return NULL; }
void odp_event_free(odp_event_t event) { switch (odp_event_type(event)) { case ODP_EVENT_BUFFER: odp_buffer_free(odp_buffer_from_event(event)); break; case ODP_EVENT_PACKET: odp_packet_free(odp_packet_from_event(event)); break; case ODP_EVENT_TIMEOUT: odp_timeout_free(odp_timeout_from_event(event)); break; case ODP_EVENT_CRYPTO_COMPL: odp_crypto_compl_free(odp_crypto_compl_from_event(event)); break; default: ODP_ABORT("Invalid event type: %d\n", odp_event_type(event)); } }
static void one_sec(void *arg) { struct ofp_timer_internal *bufdata; (void)arg; odp_spinlock_lock(&shm->lock); shm->sec_counter = (shm->sec_counter + 1) & TIMER_LONG_MASK; bufdata = shm->long_table[shm->sec_counter]; shm->long_table[shm->sec_counter] = NULL; odp_spinlock_unlock(&shm->lock); while (bufdata) { struct ofp_timer_internal *next = bufdata->next; bufdata->callback(&bufdata->arg); odp_buffer_free(bufdata->buf); bufdata = next; } /* Start one second timeout */ shm->timer_1s = ofp_timer_start(1000000UL, one_sec, NULL, 0); }
static void schedule_shutdown(void) { odp_event_t evt; odp_queue_t from; while (1) { evt = odp_schedule(&from, ODP_SCHED_NO_WAIT); if (evt == ODP_EVENT_INVALID) break; switch (odp_event_type(evt)) { case ODP_EVENT_TIMEOUT: { ofp_timer_evt_cleanup(evt); break; } case ODP_EVENT_PACKET: { odp_packet_free(odp_packet_from_event(evt)); break; } case ODP_EVENT_BUFFER: { odp_buffer_free(odp_buffer_from_event(evt)); break; } case ODP_EVENT_CRYPTO_COMPL: { odp_crypto_compl_free( odp_crypto_compl_from_event(evt)); break; } } } odp_schedule_pause(); }
static void fill_queues(thread_args_t *args) { odp_schedule_sync_t sync; int num_queues, num_prio; odp_pool_t pool; int i, j, k; int buf_count = 0; test_globals_t *globals; char name[32]; int ret; odp_buffer_t buf; odp_event_t ev; globals = args->globals; sync = args->sync; num_queues = args->num_queues; num_prio = args->num_prio; pool = odp_pool_lookup(MSG_POOL_NAME); CU_ASSERT_FATAL(pool != ODP_POOL_INVALID); for (i = 0; i < num_prio; i++) { for (j = 0; j < num_queues; j++) { odp_queue_t queue; switch (sync) { case ODP_SCHED_SYNC_PARALLEL: snprintf(name, sizeof(name), "sched_%d_%d_n", i, j); break; case ODP_SCHED_SYNC_ATOMIC: snprintf(name, sizeof(name), "sched_%d_%d_a", i, j); break; case ODP_SCHED_SYNC_ORDERED: snprintf(name, sizeof(name), "sched_%d_%d_o", i, j); break; default: CU_ASSERT_FATAL(0); break; } queue = odp_queue_lookup(name); CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID); for (k = 0; k < args->num_bufs; k++) { buf = odp_buffer_alloc(pool); CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID); ev = odp_buffer_to_event(buf); if (sync == ODP_SCHED_SYNC_ORDERED) { queue_context *qctx = odp_queue_context(queue); buf_contents *bctx = odp_buffer_addr(buf); bctx->sequence = qctx->sequence++; } ret = odp_queue_enq(queue, ev); CU_ASSERT_FATAL(ret == 0); if (ret) odp_buffer_free(buf); else buf_count++; } } } globals->buf_count = buf_count; globals->buf_count_cpy = buf_count; }
static int schedule_common_(void *arg) { thread_args_t *args = (thread_args_t *)arg; odp_schedule_sync_t sync; test_globals_t *globals; queue_context *qctx; buf_contents *bctx, *bctx_cpy; odp_pool_t pool; int locked; int num; odp_event_t ev; odp_buffer_t buf, buf_cpy; odp_queue_t from; globals = args->globals; sync = args->sync; pool = odp_pool_lookup(MSG_POOL_NAME); CU_ASSERT_FATAL(pool != ODP_POOL_INVALID); if (args->num_workers > 1) odp_barrier_wait(&globals->barrier); while (1) { from = ODP_QUEUE_INVALID; num = 0; odp_ticketlock_lock(&globals->lock); if (globals->buf_count == 0) { odp_ticketlock_unlock(&globals->lock); break; } odp_ticketlock_unlock(&globals->lock); if (args->enable_schd_multi) { odp_event_t events[BURST_BUF_SIZE], ev_cpy[BURST_BUF_SIZE]; odp_buffer_t buf_cpy[BURST_BUF_SIZE]; int j; num = odp_schedule_multi(&from, ODP_SCHED_NO_WAIT, events, BURST_BUF_SIZE); CU_ASSERT(num >= 0); CU_ASSERT(num <= BURST_BUF_SIZE); if (num == 0) continue; if (sync == ODP_SCHED_SYNC_ORDERED) { int ndx; int ndx_max; int rc; ndx_max = odp_queue_lock_count(from); CU_ASSERT_FATAL(ndx_max >= 0); qctx = odp_queue_context(from); for (j = 0; j < num; j++) { bctx = odp_buffer_addr( odp_buffer_from_event (events[j])); buf_cpy[j] = odp_buffer_alloc(pool); CU_ASSERT_FATAL(buf_cpy[j] != ODP_BUFFER_INVALID); bctx_cpy = odp_buffer_addr(buf_cpy[j]); memcpy(bctx_cpy, bctx, sizeof(buf_contents)); bctx_cpy->output_sequence = bctx_cpy->sequence; ev_cpy[j] = odp_buffer_to_event(buf_cpy[j]); } rc = odp_queue_enq_multi(qctx->pq_handle, ev_cpy, num); CU_ASSERT(rc == num); bctx = odp_buffer_addr( odp_buffer_from_event(events[0])); for (ndx = 0; ndx < ndx_max; ndx++) { odp_schedule_order_lock(ndx); CU_ASSERT(bctx->sequence == qctx->lock_sequence[ndx]); qctx->lock_sequence[ndx] += num; odp_schedule_order_unlock(ndx); } } for (j = 0; j < num; j++) odp_event_free(events[j]); } else { ev = odp_schedule(&from, ODP_SCHED_NO_WAIT); if (ev == ODP_EVENT_INVALID) continue; buf = odp_buffer_from_event(ev); num = 1; if (sync == ODP_SCHED_SYNC_ORDERED) { int ndx; int ndx_max; int rc; ndx_max = odp_queue_lock_count(from); CU_ASSERT_FATAL(ndx_max >= 0); qctx = odp_queue_context(from); bctx = odp_buffer_addr(buf); buf_cpy = odp_buffer_alloc(pool); CU_ASSERT_FATAL(buf_cpy != ODP_BUFFER_INVALID); bctx_cpy = odp_buffer_addr(buf_cpy); memcpy(bctx_cpy, bctx, sizeof(buf_contents)); bctx_cpy->output_sequence = bctx_cpy->sequence; rc = odp_queue_enq(qctx->pq_handle, odp_buffer_to_event (buf_cpy)); CU_ASSERT(rc == 0); for (ndx = 0; ndx < ndx_max; ndx++) { odp_schedule_order_lock(ndx); CU_ASSERT(bctx->sequence == qctx->lock_sequence[ndx]); qctx->lock_sequence[ndx] += num; odp_schedule_order_unlock(ndx); } } odp_buffer_free(buf); } if (args->enable_excl_atomic) { locked = odp_spinlock_trylock(&globals->atomic_lock); CU_ASSERT(locked != 0); CU_ASSERT(from != ODP_QUEUE_INVALID); if (locked) { int cnt; odp_time_t time = ODP_TIME_NULL; /* Do some work here to keep the thread busy */ for (cnt = 0; cnt < 1000; cnt++) time = odp_time_sum(time, odp_time_local()); odp_spinlock_unlock(&globals->atomic_lock); } } if (sync == ODP_SCHED_SYNC_ATOMIC) odp_schedule_release_atomic(); if (sync == ODP_SCHED_SYNC_ORDERED) odp_schedule_release_ordered(); odp_ticketlock_lock(&globals->lock); globals->buf_count -= num; if (globals->buf_count < 0) { odp_ticketlock_unlock(&globals->lock); CU_FAIL_FATAL("Buffer counting failed"); } odp_ticketlock_unlock(&globals->lock); } if (args->num_workers > 1) odp_barrier_wait(&globals->barrier); if (sync == ODP_SCHED_SYNC_ORDERED) locked = odp_ticketlock_trylock(&globals->lock); else locked = 0; if (locked && globals->buf_count_cpy > 0) { odp_event_t ev; odp_queue_t pq; uint64_t seq; uint64_t bcount = 0; int i, j; char name[32]; uint64_t num_bufs = args->num_bufs; uint64_t buf_count = globals->buf_count_cpy; for (i = 0; i < args->num_prio; i++) { for (j = 0; j < args->num_queues; j++) { snprintf(name, sizeof(name), "plain_%d_%d_o", i, j); pq = odp_queue_lookup(name); CU_ASSERT_FATAL(pq != ODP_QUEUE_INVALID); seq = 0; while (1) { ev = odp_queue_deq(pq); if (ev == ODP_EVENT_INVALID) { CU_ASSERT(seq == num_bufs); break; } bctx = odp_buffer_addr( odp_buffer_from_event(ev)); CU_ASSERT(bctx->sequence == seq); seq++; bcount++; odp_event_free(ev); } } } CU_ASSERT(bcount == buf_count); globals->buf_count_cpy = 0; } if (locked) odp_ticketlock_unlock(&globals->lock); /* Clear scheduler atomic / ordered context between tests */ num = exit_schedule_loop(); CU_ASSERT(num == 0); if (num) printf("\nDROPPED %i events\n\n", num); return 0; }
void scheduler_test_groups(void) { odp_pool_t p; odp_pool_param_t params; odp_queue_t queue_grp1, queue_grp2; odp_buffer_t buf; odp_event_t ev; uint32_t *u32; int i, j, rc; odp_schedule_sync_t sync[] = {ODP_SCHED_SYNC_PARALLEL, ODP_SCHED_SYNC_ATOMIC, ODP_SCHED_SYNC_ORDERED}; int thr_id = odp_thread_id(); odp_thrmask_t zeromask, mymask, testmask; odp_schedule_group_t mygrp1, mygrp2, lookup; odp_schedule_group_info_t info; odp_thrmask_zero(&zeromask); odp_thrmask_zero(&mymask); odp_thrmask_set(&mymask, thr_id); /* Can't find a group before we create it */ lookup = odp_schedule_group_lookup("Test Group 1"); CU_ASSERT(lookup == ODP_SCHED_GROUP_INVALID); /* Now create the group */ mygrp1 = odp_schedule_group_create("Test Group 1", &zeromask); CU_ASSERT_FATAL(mygrp1 != ODP_SCHED_GROUP_INVALID); /* Verify we can now find it */ lookup = odp_schedule_group_lookup("Test Group 1"); CU_ASSERT(lookup == mygrp1); /* Threadmask should be retrievable and be what we expect */ rc = odp_schedule_group_thrmask(mygrp1, &testmask); CU_ASSERT(rc == 0); CU_ASSERT(!odp_thrmask_isset(&testmask, thr_id)); /* Now join the group and verify we're part of it */ rc = odp_schedule_group_join(mygrp1, &mymask); CU_ASSERT(rc == 0); rc = odp_schedule_group_thrmask(mygrp1, &testmask); CU_ASSERT(rc == 0); CU_ASSERT(odp_thrmask_isset(&testmask, thr_id)); /* Info struct */ memset(&info, 0, sizeof(odp_schedule_group_info_t)); rc = odp_schedule_group_info(mygrp1, &info); CU_ASSERT(rc == 0); CU_ASSERT(odp_thrmask_equal(&info.thrmask, &mymask) != 0); CU_ASSERT(strcmp(info.name, "Test Group 1") == 0); /* We can't join or leave an unknown group */ rc = odp_schedule_group_join(ODP_SCHED_GROUP_INVALID, &mymask); CU_ASSERT(rc != 0); rc = odp_schedule_group_leave(ODP_SCHED_GROUP_INVALID, &mymask); CU_ASSERT(rc != 0); /* But we can leave our group */ rc = odp_schedule_group_leave(mygrp1, &mymask); CU_ASSERT(rc == 0); rc = odp_schedule_group_thrmask(mygrp1, &testmask); CU_ASSERT(rc == 0); CU_ASSERT(!odp_thrmask_isset(&testmask, thr_id)); /* We shouldn't be able to find our second group before creating it */ lookup = odp_schedule_group_lookup("Test Group 2"); CU_ASSERT(lookup == ODP_SCHED_GROUP_INVALID); /* Now create it and verify we can find it */ mygrp2 = odp_schedule_group_create("Test Group 2", &zeromask); CU_ASSERT_FATAL(mygrp2 != ODP_SCHED_GROUP_INVALID); lookup = odp_schedule_group_lookup("Test Group 2"); CU_ASSERT(lookup == mygrp2); /* Verify we're not part of it */ rc = odp_schedule_group_thrmask(mygrp2, &testmask); CU_ASSERT(rc == 0); CU_ASSERT(!odp_thrmask_isset(&testmask, thr_id)); /* Now join the group and verify we're part of it */ rc = odp_schedule_group_join(mygrp2, &mymask); CU_ASSERT(rc == 0); rc = odp_schedule_group_thrmask(mygrp2, &testmask); CU_ASSERT(rc == 0); CU_ASSERT(odp_thrmask_isset(&testmask, thr_id)); /* Now verify scheduler adherence to groups */ odp_pool_param_init(¶ms); params.buf.size = 100; params.buf.align = 0; params.buf.num = 2; params.type = ODP_POOL_BUFFER; p = odp_pool_create("sched_group_pool", ¶ms); CU_ASSERT_FATAL(p != ODP_POOL_INVALID); for (i = 0; i < 3; i++) { odp_queue_param_t qp; odp_queue_t queue, from; odp_schedule_group_t mygrp[NUM_GROUPS]; odp_queue_t queue_grp[NUM_GROUPS]; int num = NUM_GROUPS; odp_queue_param_init(&qp); qp.type = ODP_QUEUE_TYPE_SCHED; qp.sched.prio = ODP_SCHED_PRIO_DEFAULT; qp.sched.sync = sync[i]; qp.sched.group = mygrp1; /* Create and populate a group in group 1 */ queue_grp1 = odp_queue_create("sched_group_test_queue_1", &qp); CU_ASSERT_FATAL(queue_grp1 != ODP_QUEUE_INVALID); CU_ASSERT_FATAL(odp_queue_sched_group(queue_grp1) == mygrp1); buf = odp_buffer_alloc(p); CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID); u32 = odp_buffer_addr(buf); u32[0] = MAGIC1; ev = odp_buffer_to_event(buf); rc = odp_queue_enq(queue_grp1, ev); CU_ASSERT(rc == 0); if (rc) odp_buffer_free(buf); /* Now create and populate a queue in group 2 */ qp.sched.group = mygrp2; queue_grp2 = odp_queue_create("sched_group_test_queue_2", &qp); CU_ASSERT_FATAL(queue_grp2 != ODP_QUEUE_INVALID); CU_ASSERT_FATAL(odp_queue_sched_group(queue_grp2) == mygrp2); buf = odp_buffer_alloc(p); CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID); u32 = odp_buffer_addr(buf); u32[0] = MAGIC2; ev = odp_buffer_to_event(buf); rc = odp_queue_enq(queue_grp2, ev); CU_ASSERT(rc == 0); if (rc) odp_buffer_free(buf); /* Swap between two groups. Application should serve both * groups to avoid potential head of line blocking in * scheduler. */ mygrp[0] = mygrp1; mygrp[1] = mygrp2; queue_grp[0] = queue_grp1; queue_grp[1] = queue_grp2; j = 0; /* Ensure that each test run starts from mygrp1 */ odp_schedule_group_leave(mygrp1, &mymask); odp_schedule_group_leave(mygrp2, &mymask); odp_schedule_group_join(mygrp1, &mymask); while (num) { queue = queue_grp[j]; ev = odp_schedule(&from, ODP_SCHED_NO_WAIT); if (ev == ODP_EVENT_INVALID) { /* change group */ rc = odp_schedule_group_leave(mygrp[j], &mymask); CU_ASSERT_FATAL(rc == 0); j = (j + 1) % NUM_GROUPS; rc = odp_schedule_group_join(mygrp[j], &mymask); CU_ASSERT_FATAL(rc == 0); continue; } CU_ASSERT_FATAL(from == queue); buf = odp_buffer_from_event(ev); u32 = odp_buffer_addr(buf); if (from == queue_grp1) { /* CU_ASSERT_FATAL needs these brackets */ CU_ASSERT_FATAL(u32[0] == MAGIC1); } else { CU_ASSERT_FATAL(u32[0] == MAGIC2); } odp_buffer_free(buf); /* Tell scheduler we're about to request an event. * Not needed, but a convenient place to test this API. */ odp_schedule_prefetch(1); num--; } /* Release schduler context and leave groups */ odp_schedule_group_join(mygrp1, &mymask); odp_schedule_group_join(mygrp2, &mymask); CU_ASSERT(exit_schedule_loop() == 0); odp_schedule_group_leave(mygrp1, &mymask); odp_schedule_group_leave(mygrp2, &mymask); /* Done with queues for this round */ CU_ASSERT_FATAL(odp_queue_destroy(queue_grp1) == 0); CU_ASSERT_FATAL(odp_queue_destroy(queue_grp2) == 0); /* Verify we can no longer find our queues */ CU_ASSERT_FATAL(odp_queue_lookup("sched_group_test_queue_1") == ODP_QUEUE_INVALID); CU_ASSERT_FATAL(odp_queue_lookup("sched_group_test_queue_2") == ODP_QUEUE_INVALID); } CU_ASSERT_FATAL(odp_schedule_group_destroy(mygrp1) == 0); CU_ASSERT_FATAL(odp_schedule_group_destroy(mygrp2) == 0); CU_ASSERT_FATAL(odp_pool_destroy(p) == 0); }
void scheduler_test_queue_destroy(void) { odp_pool_t p; odp_pool_param_t params; odp_queue_param_t qp; odp_queue_t queue, from; odp_buffer_t buf; odp_event_t ev; uint32_t *u32; int i; odp_schedule_sync_t sync[] = {ODP_SCHED_SYNC_PARALLEL, ODP_SCHED_SYNC_ATOMIC, ODP_SCHED_SYNC_ORDERED}; odp_queue_param_init(&qp); odp_pool_param_init(¶ms); params.buf.size = 100; params.buf.align = 0; params.buf.num = 1; params.type = ODP_POOL_BUFFER; p = odp_pool_create("sched_destroy_pool", ¶ms); CU_ASSERT_FATAL(p != ODP_POOL_INVALID); for (i = 0; i < 3; i++) { qp.type = ODP_QUEUE_TYPE_SCHED; qp.sched.prio = ODP_SCHED_PRIO_DEFAULT; qp.sched.sync = sync[i]; qp.sched.group = ODP_SCHED_GROUP_ALL; queue = odp_queue_create("sched_destroy_queue", &qp); CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID); buf = odp_buffer_alloc(p); CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID); u32 = odp_buffer_addr(buf); u32[0] = MAGIC; ev = odp_buffer_to_event(buf); if (!(CU_ASSERT(odp_queue_enq(queue, ev) == 0))) odp_buffer_free(buf); ev = odp_schedule(&from, ODP_SCHED_WAIT); CU_ASSERT_FATAL(ev != ODP_EVENT_INVALID); CU_ASSERT_FATAL(from == queue); buf = odp_buffer_from_event(ev); u32 = odp_buffer_addr(buf); CU_ASSERT_FATAL(u32[0] == MAGIC); odp_buffer_free(buf); odp_schedule_release_ordered(); CU_ASSERT_FATAL(odp_queue_destroy(queue) == 0); } CU_ASSERT_FATAL(odp_pool_destroy(p) == 0); }
void scheduler_test_groups(void) { odp_pool_t p; odp_pool_param_t params; odp_queue_param_t qp; odp_queue_t queue_grp1, queue_grp2, from; odp_buffer_t buf; odp_event_t ev; uint32_t *u32; int i, j, rc; odp_schedule_sync_t sync[] = {ODP_SCHED_SYNC_NONE, ODP_SCHED_SYNC_ATOMIC/* , */ /* ODP_SCHED_SYNC_ORDERED */}; const int num_sync = (sizeof(sync) / sizeof(sync[0])); int thr_id = odp_thread_id(); odp_thrmask_t zeromask, mymask, testmask; odp_schedule_group_t mygrp1, mygrp2, lookup; odp_thrmask_zero(&zeromask); odp_thrmask_zero(&mymask); odp_thrmask_set(&mymask, thr_id); /* Can't find a group before we create it */ lookup = odp_schedule_group_lookup("Test Group 1"); CU_ASSERT(lookup == ODP_SCHED_GROUP_INVALID); /* Now create the group */ mygrp1 = odp_schedule_group_create("Test Group 1", &zeromask); CU_ASSERT_FATAL(mygrp1 != ODP_SCHED_GROUP_INVALID); /* Verify we can now find it */ lookup = odp_schedule_group_lookup("Test Group 1"); CU_ASSERT(lookup == mygrp1); /* Threadmask should be retrievable and be what we expect */ rc = odp_schedule_group_thrmask(mygrp1, &testmask); CU_ASSERT(rc == 0); CU_ASSERT(!odp_thrmask_isset(&testmask, thr_id)); /* Now join the group and verify we're part of it */ rc = odp_schedule_group_join(mygrp1, &mymask); CU_ASSERT(rc == 0); rc = odp_schedule_group_thrmask(mygrp1, &testmask); CU_ASSERT(rc == 0); CU_ASSERT(odp_thrmask_isset(&testmask, thr_id)); /* We can't join or leave an unknown group */ rc = odp_schedule_group_join(ODP_SCHED_GROUP_INVALID, &mymask); CU_ASSERT(rc != 0); rc = odp_schedule_group_leave(ODP_SCHED_GROUP_INVALID, &mymask); CU_ASSERT(rc != 0); /* But we can leave our group */ rc = odp_schedule_group_leave(mygrp1, &mymask); CU_ASSERT(rc == 0); rc = odp_schedule_group_thrmask(mygrp1, &testmask); CU_ASSERT(rc == 0); CU_ASSERT(!odp_thrmask_isset(&testmask, thr_id)); /* We shouldn't be able to find our second group before creating it */ lookup = odp_schedule_group_lookup("Test Group 2"); CU_ASSERT(lookup == ODP_SCHED_GROUP_INVALID); /* Now create it and verify we can find it */ mygrp2 = odp_schedule_group_create("Test Group 2", &zeromask); CU_ASSERT_FATAL(mygrp2 != ODP_SCHED_GROUP_INVALID); lookup = odp_schedule_group_lookup("Test Group 2"); CU_ASSERT(lookup == mygrp2); /* Verify we're not part of it */ rc = odp_schedule_group_thrmask(mygrp2, &testmask); CU_ASSERT(rc == 0); CU_ASSERT(!odp_thrmask_isset(&testmask, thr_id)); /* Now join the group and verify we're part of it */ rc = odp_schedule_group_join(mygrp2, &mymask); CU_ASSERT(rc == 0); rc = odp_schedule_group_thrmask(mygrp2, &testmask); CU_ASSERT(rc == 0); CU_ASSERT(odp_thrmask_isset(&testmask, thr_id)); /* Now verify scheduler adherence to groups */ odp_queue_param_init(&qp); odp_pool_param_init(¶ms); params.buf.size = 100; params.buf.align = 0; params.buf.num = 2; params.type = ODP_POOL_BUFFER; p = odp_pool_create("sched_group_pool", ¶ms); CU_ASSERT_FATAL(p != ODP_POOL_INVALID); for (i = 0; i < num_sync; i++) { qp.sched.prio = ODP_SCHED_PRIO_DEFAULT; qp.sched.sync = sync[i]; qp.sched.group = mygrp1; /* Create and populate a group in group 1 */ queue_grp1 = odp_queue_create("sched_group_test_queue_1", ODP_QUEUE_TYPE_SCHED, &qp); CU_ASSERT_FATAL(queue_grp1 != ODP_QUEUE_INVALID); CU_ASSERT_FATAL(odp_queue_sched_group(queue_grp1) == mygrp1); buf = odp_buffer_alloc(p); CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID); u32 = odp_buffer_addr(buf); u32[0] = MAGIC1; ev = odp_buffer_to_event(buf); if (!(CU_ASSERT(odp_queue_enq(queue_grp1, ev) == 0))) odp_buffer_free(buf); /* Now create and populate a queue in group 2 */ qp.sched.group = mygrp2; queue_grp2 = odp_queue_create("sched_group_test_queue_2", ODP_QUEUE_TYPE_SCHED, &qp); CU_ASSERT_FATAL(queue_grp2 != ODP_QUEUE_INVALID); CU_ASSERT_FATAL(odp_queue_sched_group(queue_grp2) == mygrp2); buf = odp_buffer_alloc(p); CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID); u32 = odp_buffer_addr(buf); u32[0] = MAGIC2; ev = odp_buffer_to_event(buf); if (!(CU_ASSERT(odp_queue_enq(queue_grp2, ev) == 0))) odp_buffer_free(buf); /* Scheduler should give us the event from Group 2 */ ev = odp_schedule(&from, ODP_SCHED_WAIT); CU_ASSERT_FATAL(ev != ODP_EVENT_INVALID); CU_ASSERT_FATAL(from == queue_grp2); buf = odp_buffer_from_event(ev); u32 = odp_buffer_addr(buf); CU_ASSERT_FATAL(u32[0] == MAGIC2); odp_buffer_free(buf); /* Scheduler should not return anything now since we're * not in Group 1 and Queue 2 is empty. Do this several * times to confirm. */ for (j = 0; j < 10; j++) { ev = odp_schedule(&from, ODP_SCHED_NO_WAIT); CU_ASSERT_FATAL(ev == ODP_EVENT_INVALID) } /* Now join group 1 and verify we can get the event */ rc = odp_schedule_group_join(mygrp1, &mymask); CU_ASSERT_FATAL(rc == 0); /* Tell scheduler we're about to request an event. * Not needed, but a convenient place to test this API. */ odp_schedule_prefetch(1); /* Now get the event from Queue 1 */ ev = odp_schedule(&from, ODP_SCHED_WAIT); CU_ASSERT_FATAL(ev != ODP_EVENT_INVALID); CU_ASSERT_FATAL(from == queue_grp1); buf = odp_buffer_from_event(ev); u32 = odp_buffer_addr(buf); CU_ASSERT_FATAL(u32[0] == MAGIC1); odp_buffer_free(buf); /* Leave group 1 for next pass */ rc = odp_schedule_group_leave(mygrp1, &mymask); CU_ASSERT_FATAL(rc == 0); /* We must release order before destroying queues */ odp_schedule_release_ordered(); /* Done with queues for this round */ CU_ASSERT_FATAL(odp_queue_destroy(queue_grp1) == 0); CU_ASSERT_FATAL(odp_queue_destroy(queue_grp2) == 0); /* Verify we can no longer find our queues */ CU_ASSERT_FATAL(odp_queue_lookup("sched_group_test_queue_1") == ODP_QUEUE_INVALID); CU_ASSERT_FATAL(odp_queue_lookup("sched_group_test_queue_2") == ODP_QUEUE_INVALID); } CU_ASSERT_FATAL(odp_schedule_group_destroy(mygrp1) == 0); CU_ASSERT_FATAL(odp_schedule_group_destroy(mygrp2) == 0); CU_ASSERT_FATAL(odp_pool_destroy(p) == 0); }
void queue_test_param(void) { odp_queue_t queue; odp_event_t enev[MAX_BUFFER_QUEUE]; odp_event_t deev[MAX_BUFFER_QUEUE]; odp_buffer_t buf; odp_event_t ev; odp_pool_t msg_pool; odp_event_t *pev_tmp; int i, deq_ret, ret; int nr_deq_entries = 0; int max_iteration = CONFIG_MAX_ITERATION; odp_queue_param_t qparams; odp_buffer_t enbuf; /* Schedule type queue */ odp_queue_param_init(&qparams); qparams.type = ODP_QUEUE_TYPE_SCHED; qparams.sched.prio = ODP_SCHED_PRIO_LOWEST; qparams.sched.sync = ODP_SCHED_SYNC_PARALLEL; qparams.sched.group = ODP_SCHED_GROUP_WORKER; queue = odp_queue_create("test_queue", &qparams); CU_ASSERT(ODP_QUEUE_INVALID != queue); CU_ASSERT(odp_queue_to_u64(queue) != odp_queue_to_u64(ODP_QUEUE_INVALID)); CU_ASSERT(queue == odp_queue_lookup("test_queue")); CU_ASSERT(ODP_QUEUE_TYPE_SCHED == odp_queue_type(queue)); CU_ASSERT(ODP_SCHED_PRIO_LOWEST == odp_queue_sched_prio(queue)); CU_ASSERT(ODP_SCHED_SYNC_PARALLEL == odp_queue_sched_type(queue)); CU_ASSERT(ODP_SCHED_GROUP_WORKER == odp_queue_sched_group(queue)); CU_ASSERT(0 == odp_queue_context_set(queue, &queue_context, sizeof(queue_context))); CU_ASSERT(&queue_context == odp_queue_context(queue)); CU_ASSERT(odp_queue_destroy(queue) == 0); /* Plain type queue */ odp_queue_param_init(&qparams); qparams.type = ODP_QUEUE_TYPE_PLAIN; qparams.context = &queue_context; qparams.context_len = sizeof(queue_context); queue = odp_queue_create("test_queue", &qparams); CU_ASSERT(ODP_QUEUE_INVALID != queue); CU_ASSERT(queue == odp_queue_lookup("test_queue")); CU_ASSERT(ODP_QUEUE_TYPE_PLAIN == odp_queue_type(queue)); CU_ASSERT(&queue_context == odp_queue_context(queue)); msg_pool = odp_pool_lookup("msg_pool"); buf = odp_buffer_alloc(msg_pool); CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID); ev = odp_buffer_to_event(buf); if (!(CU_ASSERT(odp_queue_enq(queue, ev) == 0))) { odp_buffer_free(buf); } else { CU_ASSERT(ev == odp_queue_deq(queue)); odp_buffer_free(buf); } for (i = 0; i < MAX_BUFFER_QUEUE; i++) { buf = odp_buffer_alloc(msg_pool); enev[i] = odp_buffer_to_event(buf); } /* * odp_queue_enq_multi may return 0..n buffers due to the resource * constraints in the implementation at that given point of time. * But here we assume that we succeed in enqueuing all buffers. */ ret = odp_queue_enq_multi(queue, enev, MAX_BUFFER_QUEUE); CU_ASSERT(MAX_BUFFER_QUEUE == ret); i = ret < 0 ? 0 : ret; for ( ; i < MAX_BUFFER_QUEUE; i++) odp_event_free(enev[i]); pev_tmp = deev; do { deq_ret = odp_queue_deq_multi(queue, pev_tmp, MAX_BUFFER_QUEUE); nr_deq_entries += deq_ret; max_iteration--; pev_tmp += deq_ret; CU_ASSERT(max_iteration >= 0); } while (nr_deq_entries < MAX_BUFFER_QUEUE); for (i = 0; i < MAX_BUFFER_QUEUE; i++) { enbuf = odp_buffer_from_event(enev[i]); CU_ASSERT(enev[i] == deev[i]); odp_buffer_free(enbuf); } CU_ASSERT(odp_queue_destroy(queue) == 0); }
/* * Schedule queues * * TODO: SYNC_ORDERED not implemented yet */ static int schedule(odp_queue_t *out_queue, odp_event_t out_ev[], unsigned int max_num, unsigned int max_deq) { int i, j; int thr; int ret; if (sched_local.num) { ret = copy_events(out_ev, max_num); if (out_queue) *out_queue = queue_handle(sched_local.qe); return ret; } odp_schedule_release_atomic(); if (odp_unlikely(sched_local.pause)) return 0; thr = odp_thread_id(); for (i = 0; i < ODP_CONFIG_SCHED_PRIOS; i++) { int id; if (sched->pri_mask[i] == 0) continue; id = thr & (QUEUES_PER_PRIO-1); for (j = 0; j < QUEUES_PER_PRIO; j++, id++) { odp_queue_t pri_q; odp_event_t ev; odp_buffer_t buf; sched_cmd_t *sched_cmd; queue_entry_t *qe; int num; if (id >= QUEUES_PER_PRIO) id = 0; if (odp_unlikely((sched->pri_mask[i] & (1 << id)) == 0)) continue; pri_q = sched->pri_queue[i][id]; ev = odp_queue_deq(pri_q); buf = odp_buffer_from_event(ev); if (buf == ODP_BUFFER_INVALID) continue; sched_cmd = odp_buffer_addr(buf); if (sched_cmd->cmd == SCHED_CMD_POLL_PKTIN) { /* Poll packet input */ if (pktin_poll(sched_cmd->pe)) { /* Stop scheduling the pktio */ pri_clr_pktio(sched_cmd->pktio, sched_cmd->prio); odp_buffer_free(buf); } else { /* Continue scheduling the pktio */ if (odp_queue_enq(pri_q, ev)) ODP_ABORT("schedule failed\n"); } continue; } qe = sched_cmd->qe; num = queue_deq_multi(qe, sched_local.buf_hdr, max_deq); if (num < 0) { /* Destroyed queue */ queue_destroy_finalize(qe); continue; } if (num == 0) { /* Remove empty queue from scheduling */ continue; } sched_local.num = num; sched_local.index = 0; sched_local.qe = qe; ret = copy_events(out_ev, max_num); if (queue_is_atomic(qe)) { /* Hold queue during atomic access */ sched_local.pri_queue = pri_q; sched_local.cmd_ev = ev; } else { /* Continue scheduling the queue */ if (odp_queue_enq(pri_q, ev)) ODP_ABORT("schedule failed\n"); } /* Output the source queue handle */ if (out_queue) *out_queue = queue_handle(qe); return ret; } } return 0; }
odp_timer_t ofp_timer_start(uint64_t tmo_us, ofp_timer_callback callback, void *arg, int arglen) { uint64_t tick; uint64_t period; uint64_t period_ns; struct ofp_timer_internal *bufdata; odp_buffer_t buf; odp_timer_set_t t; odp_timeout_t tmo; /* Init shm if not done yet. */ if ((shm == NULL) && ofp_timer_lookup_shared_memory()) { OFP_ERR("ofp_timer_lookup_shared_memory failed"); return ODP_TIMER_INVALID; } /* Alloc user buffer */ buf = odp_buffer_alloc(shm->buf_pool); if (buf == ODP_BUFFER_INVALID) { OFP_ERR("odp_buffer_alloc failed"); return ODP_TIMER_INVALID; } bufdata = (struct ofp_timer_internal *)odp_buffer_addr(buf); bufdata->callback = callback; bufdata->buf = buf; bufdata->t_ev = ODP_EVENT_INVALID; bufdata->next = NULL; bufdata->id = 0; if (arg && arglen) memcpy(bufdata->arg, arg, arglen); if (tmo_us >= OFP_TIMER_MAX_US) { /* Long 1 s resolution timeout */ uint64_t sec = tmo_us/1000000UL; if (sec > TIMER_NUM_LONG_SLOTS) { OFP_ERR("Timeout too long = %"PRIu64"s", sec); } odp_spinlock_lock(&shm->lock); int ix = (shm->sec_counter + sec) & TIMER_LONG_MASK; bufdata->id = ((shm->id++)<<TIMER_LONG_SHIFT) | ix | 0x80000000; bufdata->next = shm->long_table[ix]; shm->long_table[ix] = bufdata; odp_spinlock_unlock(&shm->lock); return (odp_timer_t) bufdata->id; } else { /* Short 10 ms resolution timeout */ odp_timer_t timer; /* Alloc timout event */ tmo = odp_timeout_alloc(shm->pool); if (tmo == ODP_TIMEOUT_INVALID) { odp_buffer_free(buf); OFP_ERR("odp_timeout_alloc failed"); return ODP_TIMER_INVALID; } bufdata->t_ev = odp_timeout_to_event(tmo); period_ns = tmo_us*ODP_TIME_USEC_IN_NS; period = odp_timer_ns_to_tick(shm->socket_timer_pool, period_ns); tick = odp_timer_current_tick(shm->socket_timer_pool); tick += period; timer = odp_timer_alloc(shm->socket_timer_pool, shm->queue, bufdata); if (timer == ODP_TIMER_INVALID) { odp_timeout_free(tmo); odp_buffer_free(buf); OFP_ERR("odp_timer_alloc failed"); return ODP_TIMER_INVALID; } t = odp_timer_set_abs(timer, tick, &bufdata->t_ev); if (t != ODP_TIMER_SUCCESS) { odp_timeout_free(tmo); odp_buffer_free(buf); OFP_ERR("odp_timer_set_abs failed"); return ODP_TIMER_INVALID; } return timer; } return ODP_TIMER_INVALID; }
void queue_test_sunnydays(void) { odp_queue_t queue_creat_id, queue_id; odp_event_t enev[MAX_BUFFER_QUEUE]; odp_event_t deev[MAX_BUFFER_QUEUE]; odp_buffer_t buf; odp_event_t ev; odp_pool_t msg_pool; odp_event_t *pev_tmp; int i, deq_ret, ret; int nr_deq_entries = 0; int max_iteration = CONFIG_MAX_ITERATION; void *prtn = NULL; odp_queue_param_t qparams; odp_queue_param_init(&qparams); qparams.sched.prio = ODP_SCHED_PRIO_LOWEST; qparams.sched.sync = ODP_SCHED_SYNC_NONE; qparams.sched.group = ODP_SCHED_GROUP_WORKER; queue_creat_id = odp_queue_create("test_queue", ODP_QUEUE_TYPE_POLL, &qparams); CU_ASSERT(ODP_QUEUE_INVALID != queue_creat_id); CU_ASSERT_EQUAL(ODP_QUEUE_TYPE_POLL, odp_queue_type(queue_creat_id)); queue_id = odp_queue_lookup("test_queue"); CU_ASSERT_EQUAL(queue_creat_id, queue_id); CU_ASSERT_EQUAL(ODP_SCHED_GROUP_WORKER, odp_queue_sched_group(queue_id)); CU_ASSERT_EQUAL(ODP_SCHED_PRIO_LOWEST, odp_queue_sched_prio(queue_id)); CU_ASSERT_EQUAL(ODP_SCHED_SYNC_NONE, odp_queue_sched_type(queue_id)); CU_ASSERT(0 == odp_queue_context_set(queue_id, &queue_contest)); prtn = odp_queue_context(queue_id); CU_ASSERT(&queue_contest == (int *)prtn); msg_pool = odp_pool_lookup("msg_pool"); buf = odp_buffer_alloc(msg_pool); CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID); ev = odp_buffer_to_event(buf); if (!(CU_ASSERT(odp_queue_enq(queue_id, ev) == 0))) { odp_buffer_free(buf); } else { CU_ASSERT_EQUAL(ev, odp_queue_deq(queue_id)); odp_buffer_free(buf); } for (i = 0; i < MAX_BUFFER_QUEUE; i++) { odp_buffer_t buf = odp_buffer_alloc(msg_pool); enev[i] = odp_buffer_to_event(buf); } /* * odp_queue_enq_multi may return 0..n buffers due to the resource * constraints in the implementation at that given point of time. * But here we assume that we succeed in enqueuing all buffers. */ ret = odp_queue_enq_multi(queue_id, enev, MAX_BUFFER_QUEUE); CU_ASSERT(MAX_BUFFER_QUEUE == ret); i = ret < 0 ? 0 : ret; for ( ; i < MAX_BUFFER_QUEUE; i++) odp_event_free(enev[i]); pev_tmp = deev; do { deq_ret = odp_queue_deq_multi(queue_id, pev_tmp, MAX_BUFFER_QUEUE); nr_deq_entries += deq_ret; max_iteration--; pev_tmp += deq_ret; CU_ASSERT(max_iteration >= 0); } while (nr_deq_entries < MAX_BUFFER_QUEUE); for (i = 0; i < MAX_BUFFER_QUEUE; i++) { odp_buffer_t enbuf = odp_buffer_from_event(enev[i]); CU_ASSERT_EQUAL(enev[i], deev[i]); odp_buffer_free(enbuf); } CU_ASSERT(odp_queue_destroy(queue_id) == 0); }
void *default_event_dispatcher(void *arg) { odp_event_t ev; odp_packet_t pkt; odp_queue_t in_queue; odp_event_t events[OFP_EVT_RX_BURST_SIZE]; int event_idx = 0; int event_cnt = 0; ofp_pkt_processing_func pkt_func = (ofp_pkt_processing_func)arg; odp_bool_t *is_running = NULL; #if ODP_VERSION < 106 if (odp_init_local(ODP_THREAD_WORKER)) { OFP_ERR("odp_init_local failed"); return NULL; } #endif if (ofp_init_local()) { OFP_ERR("ofp_init_local failed"); return NULL; } is_running = ofp_get_processing_state(); if (is_running == NULL) { OFP_ERR("ofp_get_processing_state failed"); ofp_term_local(); return NULL; } /* PER CORE DISPATCHER */ while (*is_running) { event_cnt = odp_schedule_multi(&in_queue, ODP_SCHED_WAIT, events, OFP_EVT_RX_BURST_SIZE); for (event_idx = 0; event_idx < event_cnt; event_idx++) { ev = events[event_idx]; if (ev == ODP_EVENT_INVALID) continue; if (odp_event_type(ev) == ODP_EVENT_TIMEOUT) { ofp_timer_handle(ev); continue; } if (odp_event_type(ev) == ODP_EVENT_PACKET) { pkt = odp_packet_from_event(ev); #if 0 if (odp_unlikely(odp_packet_has_error(pkt))) { OFP_DBG("Dropping packet with error"); odp_packet_free(pkt); continue; } #endif ofp_packet_input(pkt, in_queue, pkt_func); continue; } OFP_ERR("Unexpected event type: %u", odp_event_type(ev)); /* Free events by type */ if (odp_event_type(ev) == ODP_EVENT_BUFFER) { odp_buffer_free(odp_buffer_from_event(ev)); continue; } if (odp_event_type(ev) == ODP_EVENT_CRYPTO_COMPL) { odp_crypto_compl_free( odp_crypto_compl_from_event(ev)); continue; } } ofp_send_pending_pkt(); } if (ofp_term_local()) OFP_ERR("ofp_term_local failed"); return NULL; }
/** * Release per packet resources * * @param ctx Packet context */ static void free_pkt_ctx(pkt_ctx_t *ctx) { odp_buffer_free(ctx->buffer); }
static ngx_int_t ngx_select_process_events(ngx_cycle_t *cycle, ngx_msec_t timer, ngx_uint_t flags) { odp_packet_t pkts[OFP_PKT_RX_BURST_SIZE]; odp_packet_t odp_pkt; int pkt_cnt = 0; int pkt_idx = 0; pkt_cnt = odp_pktin_recv(in_queue, pkts, OFP_PKT_RX_BURST_SIZE); for (pkt_idx = 0; pkt_idx < pkt_cnt; pkt_idx++) { odp_pkt = pkts[pkt_idx]; ofp_packet_input(odp_pkt, ODP_QUEUE_INVALID, ofp_eth_vlan_processing); } if (pkt_cnt < 1) ofp_send_pending_pkt(); static uint32_t count = 0; /*odp_queue_deq has a lock that impacts performance*/ if (count ++ > 50) { count = 0; odp_queue_t timer_queue = ofp_timer_queue_cpu(-1); odp_event_t event = odp_queue_deq(timer_queue); if (event != ODP_EVENT_INVALID) { if (odp_event_type(event) == ODP_EVENT_TIMEOUT) { ofp_timer_handle(event); } else { odp_buffer_free(odp_buffer_from_event(event)); } } } #if OFP_NOTIFY return NGX_OK; #endif int ready, nready; ngx_err_t err; ngx_uint_t i, found; ngx_event_t *ev; ngx_queue_t *queue; struct timeval tv, *tp; ngx_connection_t *c; if (max_fd == -1) { for (i = 0; i < nevents; i++) { c = event_index[i]->data; ngx_log_debug(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "change max_fd: %i, c->fd : %d", max_fd, c->fd); if (max_fd < c->fd) { max_fd = c->fd; } } ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "change max_fd: %i", max_fd); } #if (NGX_DEBUG) if (cycle->log->log_level & NGX_LOG_DEBUG_ALL) { for (i = 0; i < nevents; i++) { ev = event_index[i]; c = ev->data; ngx_log_debug2(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "select event: fd:%d wr:%d", c->fd, ev->write); } ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "max_fd: %i", max_fd); } #endif if (timer == NGX_TIMER_INFINITE) { tp = NULL; } else { tv.tv_sec = (long) (timer / 1000); tv.tv_usec = (long) ((timer % 1000) * 1000); tp = &tv; } ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "select timer: %M", timer); ngx_log_debug2(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "select max_fd %d, tp %p ", max_fd, tp); ngx_log_debug2(NGX_LOG_DEBUG_EVENT, cycle->log, 0, " master %p : work %p", (ofp_fd_set *)&master_read_fd_set, (ofp_fd_set *)&work_read_fd_set); ready = select(max_fd + 1, &master_read_fd_set, &master_write_fd_set, NULL, tp); err = (ready == -1) ? ngx_errno : 0; if (flags & NGX_UPDATE_TIME || ngx_event_timer_alarm) { ngx_time_update(); } ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "select ready %d", ready); if (err) { ngx_uint_t level; if (err == NGX_EINTR) { if (ngx_event_timer_alarm) { ngx_event_timer_alarm = 0; return NGX_OK; } level = NGX_LOG_INFO; } else { level = NGX_LOG_ALERT; } ngx_log_error(level, cycle->log, err, "select() failed"); if (err == NGX_EBADF) { /*ngx_select_repair_fd_sets(cycle);*/ } return NGX_ERROR; } if (ready == 0) { return NGX_OK; if (timer != NGX_TIMER_INFINITE) { return NGX_OK; } ngx_log_error(NGX_LOG_ALERT, cycle->log, 0, "select() returned no events without timeout"); return NGX_ERROR; } nready = 0; for (i = 0; i < nevents; i++) { ev = event_index[i]; c = ev->data; found = 0; if (ev->write) { if (FD_ISSET(c->fd, &master_write_fd_set)) { found = 1; ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "select write %d", c->fd); } } else { if (FD_ISSET(c->fd, &master_read_fd_set)) { found = 1; ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "select read %d", c->fd); } } if (found) { ev->ready = 1; queue = ev->accept ? &ngx_posted_accept_events : &ngx_posted_events; ngx_post_event(ev, queue); nready++; } } if (ready != nready) { /*ngx_log_error(NGX_LOG_ALERT, cycle->log, 0, "select ready != events: %d:%d", ready, nready); ngx_select_repair_fd_sets(cycle);*/ } return NGX_OK; }