void timer_test_timeout_pool_free(void) { odp_pool_t pool; odp_timeout_t tmo; odp_pool_param_t params; odp_pool_param_init(¶ms); params.type = ODP_POOL_TIMEOUT; params.tmo.num = 1; pool = odp_pool_create("timeout_pool_free", ¶ms); CU_ASSERT_FATAL(pool != ODP_POOL_INVALID); odp_pool_print(pool); /* Allocate the only timeout from the pool */ tmo = odp_timeout_alloc(pool); CU_ASSERT_FATAL(tmo != ODP_TIMEOUT_INVALID); /* Pool should have only one timeout */ CU_ASSERT_FATAL(odp_timeout_alloc(pool) == ODP_TIMEOUT_INVALID) odp_timeout_free(tmo); /* Check that the timeout was returned back to the pool */ tmo = odp_timeout_alloc(pool); CU_ASSERT_FATAL(tmo != ODP_TIMEOUT_INVALID); odp_timeout_free(tmo); CU_ASSERT(odp_pool_destroy(pool) == 0); }
void timer_test_timeout_pool_alloc(void) { odp_pool_t pool; const int num = 3; odp_timeout_t tmo[num]; odp_event_t ev; int index; char wrong_type = 0; odp_pool_param_t params; odp_pool_param_init(¶ms); params.type = ODP_POOL_TIMEOUT; params.tmo.num = num; pool = odp_pool_create("timeout_pool_alloc", ¶ms); CU_ASSERT_FATAL(pool != ODP_POOL_INVALID); odp_pool_print(pool); /* Try to allocate num items from the pool */ for (index = 0; index < num; index++) { tmo[index] = odp_timeout_alloc(pool); if (tmo[index] == ODP_TIMEOUT_INVALID) break; ev = odp_timeout_to_event(tmo[index]); if (odp_event_type(ev) != ODP_EVENT_TIMEOUT) wrong_type = 1; } /* Check that the pool had at least num items */ CU_ASSERT(index == num); /* index points out of buffer[] or it point to an invalid buffer */ index--; /* Check that the pool had correct buffers */ CU_ASSERT(wrong_type == 0); for (; index >= 0; index--) odp_timeout_free(tmo[index]); CU_ASSERT(odp_pool_destroy(pool) == 0); }
/** @private test timeout */ static void test_abs_timeouts(int thr, test_globals_t *gbls) { uint64_t period; uint64_t period_ns; odp_queue_t queue; uint64_t tick; struct test_timer *ttp; odp_timeout_t tmo; EXAMPLE_DBG(" [%i] test_timeouts\n", thr); queue = odp_queue_lookup("timer_queue"); period_ns = gbls->args.period_us*ODP_TIME_USEC; period = odp_timer_ns_to_tick(gbls->tp, period_ns); EXAMPLE_DBG(" [%i] period %"PRIu64" ticks, %"PRIu64" ns\n", thr, period, period_ns); EXAMPLE_DBG(" [%i] current tick %"PRIu64"\n", thr, odp_timer_current_tick(gbls->tp)); ttp = &gbls->tt[thr]; ttp->tim = odp_timer_alloc(gbls->tp, queue, ttp); if (ttp->tim == ODP_TIMER_INVALID) { EXAMPLE_ERR("Failed to allocate timer\n"); return; } tmo = odp_timeout_alloc(gbls->pool); if (tmo == ODP_TIMEOUT_INVALID) { EXAMPLE_ERR("Failed to allocate timeout\n"); return; } ttp->ev = odp_timeout_to_event(tmo); tick = odp_timer_current_tick(gbls->tp); while ((int)odp_atomic_load_u32(&gbls->remain) > 0) { odp_event_t ev; odp_timer_set_t rc; tick += period; rc = odp_timer_set_abs(ttp->tim, tick, &ttp->ev); if (odp_unlikely(rc != ODP_TIMER_SUCCESS)) { /* Too early or too late timeout requested */ EXAMPLE_ABORT("odp_timer_set_abs() failed: %s\n", timerset2str(rc)); } /* Get the next expired timeout. * We invoke the scheduler in a loop with a timeout because * we are not guaranteed to receive any more timeouts. The * scheduler isn't guaranteeing fairness when scheduling * buffers to threads. * Use 1.5 second timeout for scheduler */ uint64_t sched_tmo = odp_schedule_wait_time(1500000000ULL); do { ev = odp_schedule(&queue, sched_tmo); /* Check if odp_schedule() timed out, possibly there * are no remaining timeouts to receive */ } while (ev == ODP_EVENT_INVALID && (int)odp_atomic_load_u32(&gbls->remain) > 0); if (ev == ODP_EVENT_INVALID) break; /* No more timeouts */ if (odp_event_type(ev) != ODP_EVENT_TIMEOUT) { /* Not a default timeout event */ EXAMPLE_ABORT("Unexpected event type (%u) received\n", odp_event_type(ev)); } odp_timeout_t tmo = odp_timeout_from_event(ev); tick = odp_timeout_tick(tmo); ttp = odp_timeout_user_ptr(tmo); ttp->ev = ev; if (!odp_timeout_fresh(tmo)) { /* Not the expected expiration tick, timer has * been reset or cancelled or freed */ EXAMPLE_ABORT("Unexpected timeout received (timer %" PRIx32 ", tick %" PRIu64 ")\n", ttp->tim, tick); } EXAMPLE_DBG(" [%i] timeout, tick %"PRIu64"\n", thr, tick); odp_atomic_dec_u32(&gbls->remain); } /* Cancel and free last timer used */ (void)odp_timer_cancel(ttp->tim, &ttp->ev); if (ttp->ev != ODP_EVENT_INVALID) odp_timeout_free(odp_timeout_from_event(ttp->ev)); else EXAMPLE_ERR("Lost timeout event at timer cancel\n"); /* Since we have cancelled the timer, there is no timeout event to * return from odp_timer_free() */ (void)odp_timer_free(ttp->tim); /* Remove any prescheduled events */ remove_prescheduled_events(); }
void timer_test_odp_timer_cancel(void) { odp_pool_t pool; odp_pool_param_t params; odp_timer_pool_param_t tparam; odp_timer_pool_t tp; odp_queue_t queue; odp_timer_t tim; odp_event_t ev; odp_timeout_t tmo; odp_timer_set_t rc; uint64_t tick; odp_pool_param_init(¶ms); params.type = ODP_POOL_TIMEOUT; params.tmo.num = 1; pool = odp_pool_create("tmo_pool_for_cancel", ¶ms); if (pool == ODP_POOL_INVALID) CU_FAIL_FATAL("Timeout pool create failed"); tparam.res_ns = 100 * ODP_TIME_MSEC_IN_NS; tparam.min_tmo = 1 * ODP_TIME_SEC_IN_NS; tparam.max_tmo = 10 * ODP_TIME_SEC_IN_NS; tparam.num_timers = 1; tparam.priv = 0; tparam.clk_src = ODP_CLOCK_CPU; tp = odp_timer_pool_create("timer_pool0", &tparam); if (tp == ODP_TIMER_POOL_INVALID) CU_FAIL_FATAL("Timer pool create failed"); /* Start all created timer pools */ odp_timer_pool_start(); queue = odp_queue_create("timer_queue", NULL); if (queue == ODP_QUEUE_INVALID) CU_FAIL_FATAL("Queue create failed"); #define USER_PTR ((void *)0xdead) tim = odp_timer_alloc(tp, queue, USER_PTR); if (tim == ODP_TIMER_INVALID) CU_FAIL_FATAL("Failed to allocate timer"); LOG_DBG("Timer handle: %" PRIu64 "\n", odp_timer_to_u64(tim)); ev = odp_timeout_to_event(odp_timeout_alloc(pool)); if (ev == ODP_EVENT_INVALID) CU_FAIL_FATAL("Failed to allocate timeout"); tick = odp_timer_ns_to_tick(tp, 2 * ODP_TIME_SEC_IN_NS); rc = odp_timer_set_rel(tim, tick, &ev); if (rc != ODP_TIMER_SUCCESS) CU_FAIL_FATAL("Failed to set timer (relative time)"); ev = ODP_EVENT_INVALID; if (odp_timer_cancel(tim, &ev) != 0) CU_FAIL_FATAL("Failed to cancel timer (relative time)"); if (ev == ODP_EVENT_INVALID) CU_FAIL_FATAL("Cancel did not return event"); tmo = odp_timeout_from_event(ev); if (tmo == ODP_TIMEOUT_INVALID) CU_FAIL_FATAL("Cancel did not return timeout"); LOG_DBG("Timeout handle: %" PRIu64 "\n", odp_timeout_to_u64(tmo)); if (odp_timeout_timer(tmo) != tim) CU_FAIL("Cancel invalid tmo.timer"); if (odp_timeout_user_ptr(tmo) != USER_PTR) CU_FAIL("Cancel invalid tmo.user_ptr"); odp_timeout_free(tmo); ev = odp_timer_free(tim); if (ev != ODP_EVENT_INVALID) CU_FAIL_FATAL("Free returned event"); odp_timer_pool_destroy(tp); if (odp_queue_destroy(queue) != 0) CU_FAIL_FATAL("Failed to destroy queue"); if (odp_pool_destroy(pool) != 0) CU_FAIL_FATAL("Failed to destroy pool"); }
/** * ODP packet example main function */ int main(int argc, char * argv[]) { odph_linux_pthread_t thread_tbl[MAX_WORKERS]; odp_pool_t pool; int num_workers; int i; odp_shm_t shm; odp_cpumask_t cpumask; char cpumaskstr[ODP_CPUMASK_STR_SIZE]; odp_pool_param_t params; odp_timer_pool_param_t tparams; odp_timer_pool_t tp; odp_pool_t tmop; /* Init ODP before calling anything else */ if (odp_init_global(NULL, NULL)) { EXAMPLE_ERR("Error: ODP global init failed.\n"); exit(EXIT_FAILURE); } if (odp_init_local(ODP_THREAD_CONTROL)) { EXAMPLE_ERR("Error: ODP local init failed.\n"); exit(EXIT_FAILURE); } my_sleep(1 + __k1_get_cluster_id() / 4); /* init counters */ odp_atomic_init_u64(&counters.seq, 0); odp_atomic_init_u64(&counters.ip, 0); odp_atomic_init_u64(&counters.udp, 0); odp_atomic_init_u64(&counters.icmp, 0); odp_atomic_init_u64(&counters.cnt, 0); /* Reserve memory for args from shared mem */ shm = odp_shm_reserve("shm_args", sizeof(args_t), ODP_CACHE_LINE_SIZE, 0); args = odp_shm_addr(shm); if (args == NULL) { EXAMPLE_ERR("Error: shared mem alloc failed.\n"); exit(EXIT_FAILURE); } memset(args, 0, sizeof(*args)); /* Parse and store the application arguments */ parse_args(argc, argv, &args->appl); /* Print both system and application information */ print_info(NO_PATH(argv[0]), &args->appl); /* Default to system CPU count unless user specified */ num_workers = MAX_WORKERS; if (args->appl.cpu_count) num_workers = args->appl.cpu_count; num_workers = odp_cpumask_default_worker(&cpumask, num_workers); if (args->appl.mask) { odp_cpumask_from_str(&cpumask, args->appl.mask); num_workers = odp_cpumask_count(&cpumask); } (void)odp_cpumask_to_str(&cpumask, cpumaskstr, sizeof(cpumaskstr)); printf("num worker threads: %i\n", num_workers); printf("first CPU: %i\n", odp_cpumask_first(&cpumask)); printf("cpu mask: %s\n", cpumaskstr); /* ping mode need two workers */ if (args->appl.mode == APPL_MODE_PING) { if (num_workers < 2) { EXAMPLE_ERR("Need at least two worker threads\n"); exit(EXIT_FAILURE); } else { num_workers = 2; } } /* Create packet pool */ odp_pool_param_init(¶ms); params.pkt.seg_len = SHM_PKT_POOL_BUF_SIZE; params.pkt.len = SHM_PKT_POOL_BUF_SIZE; params.pkt.num = SHM_PKT_POOL_SIZE/SHM_PKT_POOL_BUF_SIZE; params.type = ODP_POOL_PACKET; pool = odp_pool_create("packet_pool", ¶ms); if (pool == ODP_POOL_INVALID) { EXAMPLE_ERR("Error: packet pool create failed.\n"); exit(EXIT_FAILURE); } odp_pool_print(pool); /* Create timer pool */ tparams.res_ns = 1 * ODP_TIME_MSEC_IN_NS; tparams.min_tmo = 0; tparams.max_tmo = 10000 * ODP_TIME_SEC_IN_NS; tparams.num_timers = num_workers; /* One timer per worker */ tparams.priv = 0; /* Shared */ tparams.clk_src = ODP_CLOCK_CPU; tp = odp_timer_pool_create("timer_pool", &tparams); if (tp == ODP_TIMER_POOL_INVALID) { EXAMPLE_ERR("Timer pool create failed.\n"); exit(EXIT_FAILURE); } odp_timer_pool_start(); /* Create timeout pool */ memset(¶ms, 0, sizeof(params)); params.tmo.num = tparams.num_timers; /* One timeout per timer */ params.type = ODP_POOL_TIMEOUT; tmop = odp_pool_create("timeout_pool", ¶ms); if (pool == ODP_POOL_INVALID) { EXAMPLE_ERR("Error: packet pool create failed.\n"); exit(EXIT_FAILURE); } for (i = 0; i < args->appl.if_count; ++i) create_pktio(args->appl.if_names[i], pool); /* Create and init worker threads */ memset(thread_tbl, 0, sizeof(thread_tbl)); if (args->appl.mode == APPL_MODE_PING) { odp_cpumask_t cpu_mask; odp_queue_t tq; int cpu_first, cpu_next; odp_cpumask_zero(&cpu_mask); cpu_first = odp_cpumask_first(&cpumask); odp_cpumask_set(&cpu_mask, cpu_first); tq = odp_queue_create("", ODP_QUEUE_TYPE_POLL, NULL); if (tq == ODP_QUEUE_INVALID) abort(); args->thread[1].pktio_dev = args->appl.if_names[0]; args->thread[1].pool = pool; args->thread[1].tp = tp; args->thread[1].tq = tq; args->thread[1].tim = odp_timer_alloc(tp, tq, NULL); if (args->thread[1].tim == ODP_TIMER_INVALID) abort(); args->thread[1].tmo_ev = odp_timeout_alloc(tmop); if (args->thread[1].tmo_ev == ODP_TIMEOUT_INVALID) abort(); args->thread[1].mode = args->appl.mode; odph_linux_pthread_create(&thread_tbl[1], &cpu_mask, gen_recv_thread, &args->thread[1], ODP_THREAD_WORKER); tq = odp_queue_create("", ODP_QUEUE_TYPE_POLL, NULL); if (tq == ODP_QUEUE_INVALID) abort(); args->thread[0].pktio_dev = args->appl.if_names[0]; args->thread[0].pool = pool; args->thread[0].tp = tp; args->thread[0].tq = tq; args->thread[0].tim = odp_timer_alloc(tp, tq, NULL); if (args->thread[0].tim == ODP_TIMER_INVALID) abort(); args->thread[0].tmo_ev = odp_timeout_alloc(tmop); if (args->thread[0].tmo_ev == ODP_TIMEOUT_INVALID) abort(); args->thread[0].mode = args->appl.mode; cpu_next = odp_cpumask_next(&cpumask, cpu_first); odp_cpumask_zero(&cpu_mask); odp_cpumask_set(&cpu_mask, cpu_next); odph_linux_pthread_create(&thread_tbl[0], &cpu_mask, gen_send_thread, &args->thread[0], ODP_THREAD_WORKER); } else { int cpu = odp_cpumask_first(&cpumask); for (i = 0; i < num_workers; ++i) { odp_cpumask_t thd_mask; void *(*thr_run_func) (void *); int if_idx; odp_queue_t tq; if_idx = i % args->appl.if_count; args->thread[i].pktio_dev = args->appl.if_names[if_idx]; tq = odp_queue_create("", ODP_QUEUE_TYPE_POLL, NULL); if (tq == ODP_QUEUE_INVALID) abort(); args->thread[i].pool = pool; args->thread[i].tp = tp; args->thread[i].tq = tq; args->thread[i].tim = odp_timer_alloc(tp, tq, NULL); if (args->thread[i].tim == ODP_TIMER_INVALID) abort(); args->thread[i].tmo_ev = odp_timeout_alloc(tmop); if (args->thread[i].tmo_ev == ODP_TIMEOUT_INVALID) abort(); args->thread[i].mode = args->appl.mode; if (args->appl.mode == APPL_MODE_UDP) { thr_run_func = gen_send_thread; } else if (args->appl.mode == APPL_MODE_RCV) { thr_run_func = gen_recv_thread; } else { EXAMPLE_ERR("ERR MODE\n"); exit(EXIT_FAILURE); } /* * Create threads one-by-one instead of all-at-once, * because each thread might get different arguments. * Calls odp_thread_create(cpu) for each thread */ odp_cpumask_zero(&thd_mask); odp_cpumask_set(&thd_mask, cpu); odph_linux_pthread_create(&thread_tbl[i], &thd_mask, thr_run_func, &args->thread[i], ODP_THREAD_WORKER); cpu = odp_cpumask_next(&cpumask, cpu); } } print_global_stats(num_workers); /* Master thread waits for other threads to exit */ odph_linux_pthread_join(thread_tbl, num_workers); free(args->appl.if_names); free(args->appl.if_str); printf("Exit\n\n"); return 0; }
/** @private test timeout */ static void test_abs_timeouts(int thr, test_globals_t *gbls) { uint64_t period; uint64_t period_ns; odp_queue_t queue; uint64_t tick; struct test_timer *ttp; odp_timeout_t tmo; uint32_t num_workers = gbls->num_workers; EXAMPLE_DBG(" [%i] test_timeouts\n", thr); queue = odp_queue_lookup("timer_queue"); period_ns = gbls->args.period_us * ODP_TIME_USEC; period = odp_timer_ns_to_tick(gbls->tp, period_ns); EXAMPLE_DBG(" [%i] period %d ticks, %d ns\n", thr, period, period_ns); EXAMPLE_DBG(" [%i] current tick %d\n", thr, odp_timer_current_tick(gbls->tp)); ttp = &gbls->tt[thr]; ttp->tim = odp_timer_alloc(gbls->tp, queue, ttp); if (ttp->tim == ODP_TIMER_INVALID) { EXAMPLE_ERR("Failed to allocate timer\n"); return; } tmo = odp_timeout_alloc(gbls->pool); if (tmo == ODP_TIMEOUT_INVALID) { EXAMPLE_ERR("Failed to allocate timeout\n"); return; } ttp->ev = odp_timeout_to_event(tmo); tick = odp_timer_current_tick(gbls->tp); while (1) { int wait = 0; odp_event_t ev; odp_timer_set_t rc; if (ttp) { tick += period; rc = odp_timer_set_abs(ttp->tim, tick, &ttp->ev); if (odp_unlikely(rc != ODP_TIMER_SUCCESS)) /* Too early or too late timeout requested */ EXAMPLE_ABORT("odp_timer_set_abs() failed: %s\n", timerset2str(rc)); } /* Get the next expired timeout. * We invoke the scheduler in a loop with a timeout because * we are not guaranteed to receive any more timeouts. The * scheduler isn't guaranteeing fairness when scheduling * buffers to threads. * Use 1.5 second timeout for scheduler */ uint64_t sched_tmo = odp_schedule_wait_time(1500000000ULL); do { ev = odp_schedule(&queue, sched_tmo); /* Check if odp_schedule() timed out, possibly there * are no remaining timeouts to receive */ if ((++wait > WAIT_NUM) && (odp_atomic_load_u32(&gbls->remain) < num_workers)) EXAMPLE_ABORT("At least one TMO was lost\n"); } while (ev == ODP_EVENT_INVALID && (int)odp_atomic_load_u32(&gbls->remain) > 0); if (ev == ODP_EVENT_INVALID) break; /* No more timeouts */ if (odp_event_type(ev) != ODP_EVENT_TIMEOUT) /* Not a default timeout event */ EXAMPLE_ABORT("Unexpected event type (%u) received\n", odp_event_type(ev)); odp_timeout_t tmo = odp_timeout_from_event(ev); tick = odp_timeout_tick(tmo); ttp = odp_timeout_user_ptr(tmo); ttp->ev = ev; if (!odp_timeout_fresh(tmo)) /* Not the expected expiration tick, timer has * been reset or cancelled or freed */ EXAMPLE_ABORT("Unexpected timeout received (timer %x, tick %d)\n", ttp->tim, tick); EXAMPLE_DBG(" [%i] timeout, tick %d\n", thr, tick); uint32_t rx_num = odp_atomic_fetch_dec_u32(&gbls->remain); if (!rx_num) EXAMPLE_ABORT("Unexpected timeout received (timer %x, tick %d)\n", ttp->tim, tick); else if (rx_num > num_workers) continue; odp_timeout_free(odp_timeout_from_event(ttp->ev)); odp_timer_free(ttp->tim); ttp = NULL; } /* Remove any prescheduled events */ remove_prescheduled_events(); }
odp_timer_t ofp_timer_start(uint64_t tmo_us, ofp_timer_callback callback, void *arg, int arglen) { uint64_t tick; uint64_t period; uint64_t period_ns; struct ofp_timer_internal *bufdata; odp_buffer_t buf; odp_timer_set_t t; odp_timeout_t tmo; /* Init shm if not done yet. */ if ((shm == NULL) && ofp_timer_lookup_shared_memory()) { OFP_ERR("ofp_timer_lookup_shared_memory failed"); return ODP_TIMER_INVALID; } /* Alloc user buffer */ buf = odp_buffer_alloc(shm->buf_pool); if (buf == ODP_BUFFER_INVALID) { OFP_ERR("odp_buffer_alloc failed"); return ODP_TIMER_INVALID; } bufdata = (struct ofp_timer_internal *)odp_buffer_addr(buf); bufdata->callback = callback; bufdata->buf = buf; bufdata->t_ev = ODP_EVENT_INVALID; bufdata->next = NULL; bufdata->id = 0; if (arg && arglen) memcpy(bufdata->arg, arg, arglen); if (tmo_us >= OFP_TIMER_MAX_US) { /* Long 1 s resolution timeout */ uint64_t sec = tmo_us/1000000UL; if (sec > TIMER_NUM_LONG_SLOTS) { OFP_ERR("Timeout too long = %"PRIu64"s", sec); } odp_spinlock_lock(&shm->lock); int ix = (shm->sec_counter + sec) & TIMER_LONG_MASK; bufdata->id = ((shm->id++)<<TIMER_LONG_SHIFT) | ix | 0x80000000; bufdata->next = shm->long_table[ix]; shm->long_table[ix] = bufdata; odp_spinlock_unlock(&shm->lock); return (odp_timer_t) bufdata->id; } else { /* Short 10 ms resolution timeout */ odp_timer_t timer; /* Alloc timout event */ tmo = odp_timeout_alloc(shm->pool); if (tmo == ODP_TIMEOUT_INVALID) { odp_buffer_free(buf); OFP_ERR("odp_timeout_alloc failed"); return ODP_TIMER_INVALID; } bufdata->t_ev = odp_timeout_to_event(tmo); period_ns = tmo_us*ODP_TIME_USEC_IN_NS; period = odp_timer_ns_to_tick(shm->socket_timer_pool, period_ns); tick = odp_timer_current_tick(shm->socket_timer_pool); tick += period; timer = odp_timer_alloc(shm->socket_timer_pool, shm->queue, bufdata); if (timer == ODP_TIMER_INVALID) { odp_timeout_free(tmo); odp_buffer_free(buf); OFP_ERR("odp_timer_alloc failed"); return ODP_TIMER_INVALID; } t = odp_timer_set_abs(timer, tick, &bufdata->t_ev); if (t != ODP_TIMER_SUCCESS) { odp_timeout_free(tmo); odp_buffer_free(buf); OFP_ERR("odp_timer_set_abs failed"); return ODP_TIMER_INVALID; } return timer; } return ODP_TIMER_INVALID; }