/** * Sleep for the specified amount of milliseconds * Use ODP timer, busy wait until timer expired and timeout event received */ static void millisleep(uint32_t ms, odp_timer_pool_t tp, odp_timer_t tim, odp_queue_t q, odp_timeout_t tmo) { uint64_t ticks = odp_timer_ns_to_tick(tp, 1000000ULL * ms); odp_event_t ev = odp_timeout_to_event(tmo); int rc = odp_timer_set_rel(tim, ticks, &ev); if (rc != ODP_TIMER_SUCCESS) EXAMPLE_ABORT("odp_timer_set_rel() failed\n"); /* Spin waiting for timeout event */ while ((ev = odp_queue_deq(q)) == ODP_EVENT_INVALID) (void)0; }
void timer_test_timeout_pool_alloc(void) { odp_pool_t pool; const int num = 3; odp_timeout_t tmo[num]; odp_event_t ev; int index; char wrong_type = 0; odp_pool_param_t params; odp_pool_param_init(¶ms); params.type = ODP_POOL_TIMEOUT; params.tmo.num = num; pool = odp_pool_create("timeout_pool_alloc", ¶ms); CU_ASSERT_FATAL(pool != ODP_POOL_INVALID); odp_pool_print(pool); /* Try to allocate num items from the pool */ for (index = 0; index < num; index++) { tmo[index] = odp_timeout_alloc(pool); if (tmo[index] == ODP_TIMEOUT_INVALID) break; ev = odp_timeout_to_event(tmo[index]); if (odp_event_type(ev) != ODP_EVENT_TIMEOUT) wrong_type = 1; } /* Check that the pool had at least num items */ CU_ASSERT(index == num); /* index points out of buffer[] or it point to an invalid buffer */ index--; /* Check that the pool had correct buffers */ CU_ASSERT(wrong_type == 0); for (; index >= 0; index--) odp_timeout_free(tmo[index]); CU_ASSERT(odp_pool_destroy(pool) == 0); }
/** @private test timeout */ static void test_abs_timeouts(int thr, test_globals_t *gbls) { uint64_t period; uint64_t period_ns; odp_queue_t queue; uint64_t tick; struct test_timer *ttp; odp_timeout_t tmo; EXAMPLE_DBG(" [%i] test_timeouts\n", thr); queue = odp_queue_lookup("timer_queue"); period_ns = gbls->args.period_us*ODP_TIME_USEC; period = odp_timer_ns_to_tick(gbls->tp, period_ns); EXAMPLE_DBG(" [%i] period %"PRIu64" ticks, %"PRIu64" ns\n", thr, period, period_ns); EXAMPLE_DBG(" [%i] current tick %"PRIu64"\n", thr, odp_timer_current_tick(gbls->tp)); ttp = &gbls->tt[thr]; ttp->tim = odp_timer_alloc(gbls->tp, queue, ttp); if (ttp->tim == ODP_TIMER_INVALID) { EXAMPLE_ERR("Failed to allocate timer\n"); return; } tmo = odp_timeout_alloc(gbls->pool); if (tmo == ODP_TIMEOUT_INVALID) { EXAMPLE_ERR("Failed to allocate timeout\n"); return; } ttp->ev = odp_timeout_to_event(tmo); tick = odp_timer_current_tick(gbls->tp); while ((int)odp_atomic_load_u32(&gbls->remain) > 0) { odp_event_t ev; odp_timer_set_t rc; tick += period; rc = odp_timer_set_abs(ttp->tim, tick, &ttp->ev); if (odp_unlikely(rc != ODP_TIMER_SUCCESS)) { /* Too early or too late timeout requested */ EXAMPLE_ABORT("odp_timer_set_abs() failed: %s\n", timerset2str(rc)); } /* Get the next expired timeout. * We invoke the scheduler in a loop with a timeout because * we are not guaranteed to receive any more timeouts. The * scheduler isn't guaranteeing fairness when scheduling * buffers to threads. * Use 1.5 second timeout for scheduler */ uint64_t sched_tmo = odp_schedule_wait_time(1500000000ULL); do { ev = odp_schedule(&queue, sched_tmo); /* Check if odp_schedule() timed out, possibly there * are no remaining timeouts to receive */ } while (ev == ODP_EVENT_INVALID && (int)odp_atomic_load_u32(&gbls->remain) > 0); if (ev == ODP_EVENT_INVALID) break; /* No more timeouts */ if (odp_event_type(ev) != ODP_EVENT_TIMEOUT) { /* Not a default timeout event */ EXAMPLE_ABORT("Unexpected event type (%u) received\n", odp_event_type(ev)); } odp_timeout_t tmo = odp_timeout_from_event(ev); tick = odp_timeout_tick(tmo); ttp = odp_timeout_user_ptr(tmo); ttp->ev = ev; if (!odp_timeout_fresh(tmo)) { /* Not the expected expiration tick, timer has * been reset or cancelled or freed */ EXAMPLE_ABORT("Unexpected timeout received (timer %" PRIx32 ", tick %" PRIu64 ")\n", ttp->tim, tick); } EXAMPLE_DBG(" [%i] timeout, tick %"PRIu64"\n", thr, tick); odp_atomic_dec_u32(&gbls->remain); } /* Cancel and free last timer used */ (void)odp_timer_cancel(ttp->tim, &ttp->ev); if (ttp->ev != ODP_EVENT_INVALID) odp_timeout_free(odp_timeout_from_event(ttp->ev)); else EXAMPLE_ERR("Lost timeout event at timer cancel\n"); /* Since we have cancelled the timer, there is no timeout event to * return from odp_timer_free() */ (void)odp_timer_free(ttp->tim); /* Remove any prescheduled events */ remove_prescheduled_events(); }
void timer_test_odp_timer_cancel(void) { odp_pool_t pool; odp_pool_param_t params; odp_timer_pool_param_t tparam; odp_timer_pool_t tp; odp_queue_t queue; odp_timer_t tim; odp_event_t ev; odp_timeout_t tmo; odp_timer_set_t rc; uint64_t tick; odp_pool_param_init(¶ms); params.type = ODP_POOL_TIMEOUT; params.tmo.num = 1; pool = odp_pool_create("tmo_pool_for_cancel", ¶ms); if (pool == ODP_POOL_INVALID) CU_FAIL_FATAL("Timeout pool create failed"); tparam.res_ns = 100 * ODP_TIME_MSEC_IN_NS; tparam.min_tmo = 1 * ODP_TIME_SEC_IN_NS; tparam.max_tmo = 10 * ODP_TIME_SEC_IN_NS; tparam.num_timers = 1; tparam.priv = 0; tparam.clk_src = ODP_CLOCK_CPU; tp = odp_timer_pool_create("timer_pool0", &tparam); if (tp == ODP_TIMER_POOL_INVALID) CU_FAIL_FATAL("Timer pool create failed"); /* Start all created timer pools */ odp_timer_pool_start(); queue = odp_queue_create("timer_queue", NULL); if (queue == ODP_QUEUE_INVALID) CU_FAIL_FATAL("Queue create failed"); #define USER_PTR ((void *)0xdead) tim = odp_timer_alloc(tp, queue, USER_PTR); if (tim == ODP_TIMER_INVALID) CU_FAIL_FATAL("Failed to allocate timer"); LOG_DBG("Timer handle: %" PRIu64 "\n", odp_timer_to_u64(tim)); ev = odp_timeout_to_event(odp_timeout_alloc(pool)); if (ev == ODP_EVENT_INVALID) CU_FAIL_FATAL("Failed to allocate timeout"); tick = odp_timer_ns_to_tick(tp, 2 * ODP_TIME_SEC_IN_NS); rc = odp_timer_set_rel(tim, tick, &ev); if (rc != ODP_TIMER_SUCCESS) CU_FAIL_FATAL("Failed to set timer (relative time)"); ev = ODP_EVENT_INVALID; if (odp_timer_cancel(tim, &ev) != 0) CU_FAIL_FATAL("Failed to cancel timer (relative time)"); if (ev == ODP_EVENT_INVALID) CU_FAIL_FATAL("Cancel did not return event"); tmo = odp_timeout_from_event(ev); if (tmo == ODP_TIMEOUT_INVALID) CU_FAIL_FATAL("Cancel did not return timeout"); LOG_DBG("Timeout handle: %" PRIu64 "\n", odp_timeout_to_u64(tmo)); if (odp_timeout_timer(tmo) != tim) CU_FAIL("Cancel invalid tmo.timer"); if (odp_timeout_user_ptr(tmo) != USER_PTR) CU_FAIL("Cancel invalid tmo.user_ptr"); odp_timeout_free(tmo); ev = odp_timer_free(tim); if (ev != ODP_EVENT_INVALID) CU_FAIL_FATAL("Free returned event"); odp_timer_pool_destroy(tp); if (odp_queue_destroy(queue) != 0) CU_FAIL_FATAL("Failed to destroy queue"); if (odp_pool_destroy(pool) != 0) CU_FAIL_FATAL("Failed to destroy pool"); }
/** @private test timeout */ static void test_abs_timeouts(int thr, test_globals_t *gbls) { uint64_t period; uint64_t period_ns; odp_queue_t queue; uint64_t tick; struct test_timer *ttp; odp_timeout_t tmo; uint32_t num_workers = gbls->num_workers; EXAMPLE_DBG(" [%i] test_timeouts\n", thr); queue = odp_queue_lookup("timer_queue"); period_ns = gbls->args.period_us * ODP_TIME_USEC; period = odp_timer_ns_to_tick(gbls->tp, period_ns); EXAMPLE_DBG(" [%i] period %d ticks, %d ns\n", thr, period, period_ns); EXAMPLE_DBG(" [%i] current tick %d\n", thr, odp_timer_current_tick(gbls->tp)); ttp = &gbls->tt[thr]; ttp->tim = odp_timer_alloc(gbls->tp, queue, ttp); if (ttp->tim == ODP_TIMER_INVALID) { EXAMPLE_ERR("Failed to allocate timer\n"); return; } tmo = odp_timeout_alloc(gbls->pool); if (tmo == ODP_TIMEOUT_INVALID) { EXAMPLE_ERR("Failed to allocate timeout\n"); return; } ttp->ev = odp_timeout_to_event(tmo); tick = odp_timer_current_tick(gbls->tp); while (1) { int wait = 0; odp_event_t ev; odp_timer_set_t rc; if (ttp) { tick += period; rc = odp_timer_set_abs(ttp->tim, tick, &ttp->ev); if (odp_unlikely(rc != ODP_TIMER_SUCCESS)) /* Too early or too late timeout requested */ EXAMPLE_ABORT("odp_timer_set_abs() failed: %s\n", timerset2str(rc)); } /* Get the next expired timeout. * We invoke the scheduler in a loop with a timeout because * we are not guaranteed to receive any more timeouts. The * scheduler isn't guaranteeing fairness when scheduling * buffers to threads. * Use 1.5 second timeout for scheduler */ uint64_t sched_tmo = odp_schedule_wait_time(1500000000ULL); do { ev = odp_schedule(&queue, sched_tmo); /* Check if odp_schedule() timed out, possibly there * are no remaining timeouts to receive */ if ((++wait > WAIT_NUM) && (odp_atomic_load_u32(&gbls->remain) < num_workers)) EXAMPLE_ABORT("At least one TMO was lost\n"); } while (ev == ODP_EVENT_INVALID && (int)odp_atomic_load_u32(&gbls->remain) > 0); if (ev == ODP_EVENT_INVALID) break; /* No more timeouts */ if (odp_event_type(ev) != ODP_EVENT_TIMEOUT) /* Not a default timeout event */ EXAMPLE_ABORT("Unexpected event type (%u) received\n", odp_event_type(ev)); odp_timeout_t tmo = odp_timeout_from_event(ev); tick = odp_timeout_tick(tmo); ttp = odp_timeout_user_ptr(tmo); ttp->ev = ev; if (!odp_timeout_fresh(tmo)) /* Not the expected expiration tick, timer has * been reset or cancelled or freed */ EXAMPLE_ABORT("Unexpected timeout received (timer %x, tick %d)\n", ttp->tim, tick); EXAMPLE_DBG(" [%i] timeout, tick %d\n", thr, tick); uint32_t rx_num = odp_atomic_fetch_dec_u32(&gbls->remain); if (!rx_num) EXAMPLE_ABORT("Unexpected timeout received (timer %x, tick %d)\n", ttp->tim, tick); else if (rx_num > num_workers) continue; odp_timeout_free(odp_timeout_from_event(ttp->ev)); odp_timer_free(ttp->tim); ttp = NULL; } /* Remove any prescheduled events */ remove_prescheduled_events(); }
odp_timer_t ofp_timer_start(uint64_t tmo_us, ofp_timer_callback callback, void *arg, int arglen) { uint64_t tick; uint64_t period; uint64_t period_ns; struct ofp_timer_internal *bufdata; odp_buffer_t buf; odp_timer_set_t t; odp_timeout_t tmo; /* Init shm if not done yet. */ if ((shm == NULL) && ofp_timer_lookup_shared_memory()) { OFP_ERR("ofp_timer_lookup_shared_memory failed"); return ODP_TIMER_INVALID; } /* Alloc user buffer */ buf = odp_buffer_alloc(shm->buf_pool); if (buf == ODP_BUFFER_INVALID) { OFP_ERR("odp_buffer_alloc failed"); return ODP_TIMER_INVALID; } bufdata = (struct ofp_timer_internal *)odp_buffer_addr(buf); bufdata->callback = callback; bufdata->buf = buf; bufdata->t_ev = ODP_EVENT_INVALID; bufdata->next = NULL; bufdata->id = 0; if (arg && arglen) memcpy(bufdata->arg, arg, arglen); if (tmo_us >= OFP_TIMER_MAX_US) { /* Long 1 s resolution timeout */ uint64_t sec = tmo_us/1000000UL; if (sec > TIMER_NUM_LONG_SLOTS) { OFP_ERR("Timeout too long = %"PRIu64"s", sec); } odp_spinlock_lock(&shm->lock); int ix = (shm->sec_counter + sec) & TIMER_LONG_MASK; bufdata->id = ((shm->id++)<<TIMER_LONG_SHIFT) | ix | 0x80000000; bufdata->next = shm->long_table[ix]; shm->long_table[ix] = bufdata; odp_spinlock_unlock(&shm->lock); return (odp_timer_t) bufdata->id; } else { /* Short 10 ms resolution timeout */ odp_timer_t timer; /* Alloc timout event */ tmo = odp_timeout_alloc(shm->pool); if (tmo == ODP_TIMEOUT_INVALID) { odp_buffer_free(buf); OFP_ERR("odp_timeout_alloc failed"); return ODP_TIMER_INVALID; } bufdata->t_ev = odp_timeout_to_event(tmo); period_ns = tmo_us*ODP_TIME_USEC_IN_NS; period = odp_timer_ns_to_tick(shm->socket_timer_pool, period_ns); tick = odp_timer_current_tick(shm->socket_timer_pool); tick += period; timer = odp_timer_alloc(shm->socket_timer_pool, shm->queue, bufdata); if (timer == ODP_TIMER_INVALID) { odp_timeout_free(tmo); odp_buffer_free(buf); OFP_ERR("odp_timer_alloc failed"); return ODP_TIMER_INVALID; } t = odp_timer_set_abs(timer, tick, &bufdata->t_ev); if (t != ODP_TIMER_SUCCESS) { odp_timeout_free(tmo); odp_buffer_free(buf); OFP_ERR("odp_timer_set_abs failed"); return ODP_TIMER_INVALID; } return timer; } return ODP_TIMER_INVALID; }
static odp_timeout_hdr_t *timeout_hdr(odp_timeout_t tmo) { odp_buffer_t buf = odp_buffer_from_event(odp_timeout_to_event(tmo)); return timeout_hdr_from_buf(buf); }