/* timer_num defines the timer type. At the moment there is only one timer. */ int ofp_timer_ticks(int timer_num) { (void)timer_num; if (!shm) return 0; return odp_timer_current_tick(shm->socket_timer_pool); }
/* @private Handle a received (timeout) event */ static void handle_tmo(odp_event_t ev, bool stale, uint64_t prev_tick) { CU_ASSERT_FATAL(ev != ODP_EVENT_INVALID); /* Internal error */ if (odp_event_type(ev) != ODP_EVENT_TIMEOUT) { /* Not a timeout event */ CU_FAIL("Unexpected event type received"); return; } /* Read the metadata from the timeout */ odp_timeout_t tmo = odp_timeout_from_event(ev); odp_timer_t tim = odp_timeout_timer(tmo); uint64_t tick = odp_timeout_tick(tmo); struct test_timer *ttp = odp_timeout_user_ptr(tmo); if (tim == ODP_TIMER_INVALID) CU_FAIL("odp_timeout_timer() invalid timer"); if (!ttp) CU_FAIL("odp_timeout_user_ptr() null user ptr"); if (ttp && ttp->ev2 != ev) CU_FAIL("odp_timeout_user_ptr() wrong user ptr"); if (ttp && ttp->tim != tim) CU_FAIL("odp_timeout_timer() wrong timer"); if (stale) { if (odp_timeout_fresh(tmo)) CU_FAIL("Wrong status (fresh) for stale timeout"); /* Stale timeout => local timer must have invalid tick */ if (ttp && ttp->tick != TICK_INVALID) CU_FAIL("Stale timeout for active timer"); } else { if (!odp_timeout_fresh(tmo)) CU_FAIL("Wrong status (stale) for fresh timeout"); /* Fresh timeout => local timer must have matching tick */ if (ttp && ttp->tick != tick) { LOG_DBG("Wrong tick: expected %" PRIu64 " actual %" PRIu64 "\n", ttp->tick, tick); CU_FAIL("odp_timeout_tick() wrong tick"); } /* Check that timeout was delivered 'timely' */ if (tick > odp_timer_current_tick(tp)) CU_FAIL("Timeout delivered early"); if (tick < prev_tick) { LOG_DBG("Too late tick: %" PRIu64 " prev_tick %" PRIu64"\n", tick, prev_tick); /* We don't report late timeouts using CU_FAIL */ odp_atomic_inc_u32(&ndelivtoolate); } } if (ttp) { /* Internal error */ CU_ASSERT_FATAL(ttp->ev == ODP_EVENT_INVALID); ttp->ev = ev; } }
/** @private test timeout */ static void test_abs_timeouts(int thr, test_globals_t *gbls) { uint64_t period; uint64_t period_ns; odp_queue_t queue; uint64_t tick; struct test_timer *ttp; odp_timeout_t tmo; EXAMPLE_DBG(" [%i] test_timeouts\n", thr); queue = odp_queue_lookup("timer_queue"); period_ns = gbls->args.period_us*ODP_TIME_USEC; period = odp_timer_ns_to_tick(gbls->tp, period_ns); EXAMPLE_DBG(" [%i] period %"PRIu64" ticks, %"PRIu64" ns\n", thr, period, period_ns); EXAMPLE_DBG(" [%i] current tick %"PRIu64"\n", thr, odp_timer_current_tick(gbls->tp)); ttp = &gbls->tt[thr]; ttp->tim = odp_timer_alloc(gbls->tp, queue, ttp); if (ttp->tim == ODP_TIMER_INVALID) { EXAMPLE_ERR("Failed to allocate timer\n"); return; } tmo = odp_timeout_alloc(gbls->pool); if (tmo == ODP_TIMEOUT_INVALID) { EXAMPLE_ERR("Failed to allocate timeout\n"); return; } ttp->ev = odp_timeout_to_event(tmo); tick = odp_timer_current_tick(gbls->tp); while ((int)odp_atomic_load_u32(&gbls->remain) > 0) { odp_event_t ev; odp_timer_set_t rc; tick += period; rc = odp_timer_set_abs(ttp->tim, tick, &ttp->ev); if (odp_unlikely(rc != ODP_TIMER_SUCCESS)) { /* Too early or too late timeout requested */ EXAMPLE_ABORT("odp_timer_set_abs() failed: %s\n", timerset2str(rc)); } /* Get the next expired timeout. * We invoke the scheduler in a loop with a timeout because * we are not guaranteed to receive any more timeouts. The * scheduler isn't guaranteeing fairness when scheduling * buffers to threads. * Use 1.5 second timeout for scheduler */ uint64_t sched_tmo = odp_schedule_wait_time(1500000000ULL); do { ev = odp_schedule(&queue, sched_tmo); /* Check if odp_schedule() timed out, possibly there * are no remaining timeouts to receive */ } while (ev == ODP_EVENT_INVALID && (int)odp_atomic_load_u32(&gbls->remain) > 0); if (ev == ODP_EVENT_INVALID) break; /* No more timeouts */ if (odp_event_type(ev) != ODP_EVENT_TIMEOUT) { /* Not a default timeout event */ EXAMPLE_ABORT("Unexpected event type (%u) received\n", odp_event_type(ev)); } odp_timeout_t tmo = odp_timeout_from_event(ev); tick = odp_timeout_tick(tmo); ttp = odp_timeout_user_ptr(tmo); ttp->ev = ev; if (!odp_timeout_fresh(tmo)) { /* Not the expected expiration tick, timer has * been reset or cancelled or freed */ EXAMPLE_ABORT("Unexpected timeout received (timer %" PRIx32 ", tick %" PRIu64 ")\n", ttp->tim, tick); } EXAMPLE_DBG(" [%i] timeout, tick %"PRIu64"\n", thr, tick); odp_atomic_dec_u32(&gbls->remain); } /* Cancel and free last timer used */ (void)odp_timer_cancel(ttp->tim, &ttp->ev); if (ttp->ev != ODP_EVENT_INVALID) odp_timeout_free(odp_timeout_from_event(ttp->ev)); else EXAMPLE_ERR("Lost timeout event at timer cancel\n"); /* Since we have cancelled the timer, there is no timeout event to * return from odp_timer_free() */ (void)odp_timer_free(ttp->tim); /* Remove any prescheduled events */ remove_prescheduled_events(); }
/** @private test timeout */ static void test_abs_timeouts(int thr, test_globals_t *gbls) { uint64_t period; uint64_t period_ns; odp_queue_t queue; uint64_t tick; struct test_timer *ttp; odp_timeout_t tmo; uint32_t num_workers = gbls->num_workers; EXAMPLE_DBG(" [%i] test_timeouts\n", thr); queue = odp_queue_lookup("timer_queue"); period_ns = gbls->args.period_us * ODP_TIME_USEC; period = odp_timer_ns_to_tick(gbls->tp, period_ns); EXAMPLE_DBG(" [%i] period %d ticks, %d ns\n", thr, period, period_ns); EXAMPLE_DBG(" [%i] current tick %d\n", thr, odp_timer_current_tick(gbls->tp)); ttp = &gbls->tt[thr]; ttp->tim = odp_timer_alloc(gbls->tp, queue, ttp); if (ttp->tim == ODP_TIMER_INVALID) { EXAMPLE_ERR("Failed to allocate timer\n"); return; } tmo = odp_timeout_alloc(gbls->pool); if (tmo == ODP_TIMEOUT_INVALID) { EXAMPLE_ERR("Failed to allocate timeout\n"); return; } ttp->ev = odp_timeout_to_event(tmo); tick = odp_timer_current_tick(gbls->tp); while (1) { int wait = 0; odp_event_t ev; odp_timer_set_t rc; if (ttp) { tick += period; rc = odp_timer_set_abs(ttp->tim, tick, &ttp->ev); if (odp_unlikely(rc != ODP_TIMER_SUCCESS)) /* Too early or too late timeout requested */ EXAMPLE_ABORT("odp_timer_set_abs() failed: %s\n", timerset2str(rc)); } /* Get the next expired timeout. * We invoke the scheduler in a loop with a timeout because * we are not guaranteed to receive any more timeouts. The * scheduler isn't guaranteeing fairness when scheduling * buffers to threads. * Use 1.5 second timeout for scheduler */ uint64_t sched_tmo = odp_schedule_wait_time(1500000000ULL); do { ev = odp_schedule(&queue, sched_tmo); /* Check if odp_schedule() timed out, possibly there * are no remaining timeouts to receive */ if ((++wait > WAIT_NUM) && (odp_atomic_load_u32(&gbls->remain) < num_workers)) EXAMPLE_ABORT("At least one TMO was lost\n"); } while (ev == ODP_EVENT_INVALID && (int)odp_atomic_load_u32(&gbls->remain) > 0); if (ev == ODP_EVENT_INVALID) break; /* No more timeouts */ if (odp_event_type(ev) != ODP_EVENT_TIMEOUT) /* Not a default timeout event */ EXAMPLE_ABORT("Unexpected event type (%u) received\n", odp_event_type(ev)); odp_timeout_t tmo = odp_timeout_from_event(ev); tick = odp_timeout_tick(tmo); ttp = odp_timeout_user_ptr(tmo); ttp->ev = ev; if (!odp_timeout_fresh(tmo)) /* Not the expected expiration tick, timer has * been reset or cancelled or freed */ EXAMPLE_ABORT("Unexpected timeout received (timer %x, tick %d)\n", ttp->tim, tick); EXAMPLE_DBG(" [%i] timeout, tick %d\n", thr, tick); uint32_t rx_num = odp_atomic_fetch_dec_u32(&gbls->remain); if (!rx_num) EXAMPLE_ABORT("Unexpected timeout received (timer %x, tick %d)\n", ttp->tim, tick); else if (rx_num > num_workers) continue; odp_timeout_free(odp_timeout_from_event(ttp->ev)); odp_timer_free(ttp->tim); ttp = NULL; } /* Remove any prescheduled events */ remove_prescheduled_events(); }
odp_timer_t ofp_timer_start(uint64_t tmo_us, ofp_timer_callback callback, void *arg, int arglen) { uint64_t tick; uint64_t period; uint64_t period_ns; struct ofp_timer_internal *bufdata; odp_buffer_t buf; odp_timer_set_t t; odp_timeout_t tmo; /* Init shm if not done yet. */ if ((shm == NULL) && ofp_timer_lookup_shared_memory()) { OFP_ERR("ofp_timer_lookup_shared_memory failed"); return ODP_TIMER_INVALID; } /* Alloc user buffer */ buf = odp_buffer_alloc(shm->buf_pool); if (buf == ODP_BUFFER_INVALID) { OFP_ERR("odp_buffer_alloc failed"); return ODP_TIMER_INVALID; } bufdata = (struct ofp_timer_internal *)odp_buffer_addr(buf); bufdata->callback = callback; bufdata->buf = buf; bufdata->t_ev = ODP_EVENT_INVALID; bufdata->next = NULL; bufdata->id = 0; if (arg && arglen) memcpy(bufdata->arg, arg, arglen); if (tmo_us >= OFP_TIMER_MAX_US) { /* Long 1 s resolution timeout */ uint64_t sec = tmo_us/1000000UL; if (sec > TIMER_NUM_LONG_SLOTS) { OFP_ERR("Timeout too long = %"PRIu64"s", sec); } odp_spinlock_lock(&shm->lock); int ix = (shm->sec_counter + sec) & TIMER_LONG_MASK; bufdata->id = ((shm->id++)<<TIMER_LONG_SHIFT) | ix | 0x80000000; bufdata->next = shm->long_table[ix]; shm->long_table[ix] = bufdata; odp_spinlock_unlock(&shm->lock); return (odp_timer_t) bufdata->id; } else { /* Short 10 ms resolution timeout */ odp_timer_t timer; /* Alloc timout event */ tmo = odp_timeout_alloc(shm->pool); if (tmo == ODP_TIMEOUT_INVALID) { odp_buffer_free(buf); OFP_ERR("odp_timeout_alloc failed"); return ODP_TIMER_INVALID; } bufdata->t_ev = odp_timeout_to_event(tmo); period_ns = tmo_us*ODP_TIME_USEC_IN_NS; period = odp_timer_ns_to_tick(shm->socket_timer_pool, period_ns); tick = odp_timer_current_tick(shm->socket_timer_pool); tick += period; timer = odp_timer_alloc(shm->socket_timer_pool, shm->queue, bufdata); if (timer == ODP_TIMER_INVALID) { odp_timeout_free(tmo); odp_buffer_free(buf); OFP_ERR("odp_timer_alloc failed"); return ODP_TIMER_INVALID; } t = odp_timer_set_abs(timer, tick, &bufdata->t_ev); if (t != ODP_TIMER_SUCCESS) { odp_timeout_free(tmo); odp_buffer_free(buf); OFP_ERR("odp_timer_set_abs failed"); return ODP_TIMER_INVALID; } return timer; } return ODP_TIMER_INVALID; }