Exemplo n.º 1
0
/** @private test timeout */
static void test_abs_timeouts(int thr, test_globals_t *gbls)
{
    uint64_t    period;
    uint64_t    period_ns;
    odp_queue_t queue;
    uint64_t    tick;
    struct test_timer *ttp;
    odp_timeout_t tmo;
    uint32_t num_workers = gbls->num_workers;

    EXAMPLE_DBG("  [%i] test_timeouts\n", thr);

    queue      = odp_queue_lookup("timer_queue");

    period_ns  = gbls->args.period_us * ODP_TIME_USEC;
    period     = odp_timer_ns_to_tick(gbls->tp, period_ns);

    EXAMPLE_DBG("  [%i] period %d ticks,  %d ns\n", thr,
                period, period_ns);

    EXAMPLE_DBG("  [%i] current tick %d\n", thr,
                odp_timer_current_tick(gbls->tp));

    ttp = &gbls->tt[thr];
    ttp->tim = odp_timer_alloc(gbls->tp, queue, ttp);
    if (ttp->tim == ODP_TIMER_INVALID) {
        EXAMPLE_ERR("Failed to allocate timer\n");
        return;
    }

    tmo = odp_timeout_alloc(gbls->pool);
    if (tmo == ODP_TIMEOUT_INVALID) {
        EXAMPLE_ERR("Failed to allocate timeout\n");
        return;
    }

    ttp->ev    = odp_timeout_to_event(tmo);
    tick       = odp_timer_current_tick(gbls->tp);

    while (1) {
        int wait = 0;
        odp_event_t ev;
        odp_timer_set_t rc;

        if (ttp) {
            tick  += period;
            rc     = odp_timer_set_abs(ttp->tim, tick, &ttp->ev);
            if (odp_unlikely(rc != ODP_TIMER_SUCCESS))
                /* Too early or too late timeout requested */
                EXAMPLE_ABORT("odp_timer_set_abs() failed: %s\n",
                              timerset2str(rc));
        }

        /* Get the next expired timeout.
         * We invoke the scheduler in a loop with a timeout because
         * we are not guaranteed to receive any more timeouts. The
         * scheduler isn't guaranteeing fairness when scheduling
         * buffers to threads.
         * Use 1.5 second timeout for scheduler */
        uint64_t sched_tmo =
            odp_schedule_wait_time(1500000000ULL);
        do {
            ev = odp_schedule(&queue, sched_tmo);

            /* Check if odp_schedule() timed out, possibly there
             * are no remaining timeouts to receive */
            if ((++wait > WAIT_NUM)
                    && (odp_atomic_load_u32(&gbls->remain) < num_workers))
                EXAMPLE_ABORT("At least one TMO was lost\n");
        } while (ev == ODP_EVENT_INVALID
                 && (int)odp_atomic_load_u32(&gbls->remain) > 0);

        if (ev == ODP_EVENT_INVALID)
            break;  /* No more timeouts */

        if (odp_event_type(ev) != ODP_EVENT_TIMEOUT)
            /* Not a default timeout event */
            EXAMPLE_ABORT("Unexpected event type (%u) received\n",
                          odp_event_type(ev));

        odp_timeout_t tmo = odp_timeout_from_event(ev);
        tick       = odp_timeout_tick(tmo);
        ttp        = odp_timeout_user_ptr(tmo);
        ttp->ev    = ev;
        if (!odp_timeout_fresh(tmo))
            /* Not the expected expiration tick, timer has
             * been reset or cancelled or freed */
            EXAMPLE_ABORT("Unexpected timeout received (timer %x, tick %d)\n",
                          ttp->tim, tick);

        EXAMPLE_DBG("  [%i] timeout, tick %d\n", thr, tick);

        uint32_t rx_num = odp_atomic_fetch_dec_u32(&gbls->remain);

        if (!rx_num)
            EXAMPLE_ABORT("Unexpected timeout received (timer %x, tick %d)\n",
                          ttp->tim, tick);
        else if (rx_num > num_workers)
            continue;

        odp_timeout_free(odp_timeout_from_event(ttp->ev));
        odp_timer_free(ttp->tim);
        ttp = NULL;
    }

    /* Remove any prescheduled events */
    remove_prescheduled_events();
}
Exemplo n.º 2
0
void scheduler_test_chaos(void)
{
	odp_pool_t pool;
	odp_pool_param_t params;
	odp_queue_param_t qp;
	odp_buffer_t buf;
	chaos_buf *cbuf;
	odp_event_t ev;
	test_globals_t *globals;
	thread_args_t *args;
	odp_shm_t shm;
	odp_queue_t from;
	int i, rc;
	uint64_t wait;
	odp_schedule_sync_t sync[] = {ODP_SCHED_SYNC_NONE,
				      ODP_SCHED_SYNC_ATOMIC/* , */
				      /* ODP_SCHED_SYNC_ORDERED */};
	const int num_sync = (sizeof(sync) / sizeof(sync[0]));
	const char *const qtypes[] = {"parallel", "atomic", "ordered"};

	/* Set up the scheduling environment */
	shm = odp_shm_lookup(GLOBALS_SHM_NAME);
	CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
	globals = odp_shm_addr(shm);
	CU_ASSERT_PTR_NOT_NULL_FATAL(shm);

	shm = odp_shm_lookup(SHM_THR_ARGS_NAME);
	CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
	args = odp_shm_addr(shm);
	CU_ASSERT_PTR_NOT_NULL_FATAL(args);

	args->globals = globals;
	args->cu_thr.numthrds = globals->num_workers;

	odp_queue_param_init(&qp);
	odp_pool_param_init(&params);
	params.buf.size = sizeof(chaos_buf);
	params.buf.align = 0;
	params.buf.num = CHAOS_NUM_EVENTS;
	params.type = ODP_POOL_BUFFER;

	pool = odp_pool_create("sched_chaos_pool", &params);
	CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
	qp.sched.prio = ODP_SCHED_PRIO_DEFAULT;

	for (i = 0; i < CHAOS_NUM_QUEUES; i++) {
		qp.sched.sync = sync[i % num_sync];
		snprintf(globals->chaos_q[i].name,
			 sizeof(globals->chaos_q[i].name),
			 "chaos queue %d - %s", i,
			 qtypes[i % num_sync]);
		globals->chaos_q[i].handle =
			odp_queue_create(globals->chaos_q[i].name,
					 ODP_QUEUE_TYPE_SCHED,
					 &qp);
		CU_ASSERT_FATAL(globals->chaos_q[i].handle !=
				ODP_QUEUE_INVALID);
		rc = odp_queue_context_set(globals->chaos_q[i].handle,
					   CHAOS_NDX_TO_PTR(i));
		CU_ASSERT_FATAL(rc == 0);
	}

	/* Now populate the queues with the initial seed elements */
	odp_atomic_init_u32(&globals->chaos_pending_event_count, 0);

	for (i = 0; i < CHAOS_NUM_EVENTS; i++) {
		buf = odp_buffer_alloc(pool);
		CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID);
		cbuf = odp_buffer_addr(buf);
		cbuf->evno = i;
		cbuf->seqno = 0;
		rc = odp_queue_enq(
			globals->chaos_q[i % CHAOS_NUM_QUEUES].handle,
			odp_buffer_to_event(buf));
		CU_ASSERT_FATAL(rc == 0);
		odp_atomic_inc_u32(&globals->chaos_pending_event_count);
	}

	/* Run the test */
	odp_cunit_thread_create(chaos_thread, &args->cu_thr);
	odp_cunit_thread_exit(&args->cu_thr);

	if (CHAOS_DEBUG)
		printf("Thread %d returning from chaos threads..cleaning up\n",
		       odp_thread_id());

	/* Cleanup: Drain queues, free events */
	wait = odp_schedule_wait_time(CHAOS_WAIT_FAIL);
	while (odp_atomic_fetch_dec_u32(
		       &globals->chaos_pending_event_count) > 0) {
		ev = odp_schedule(&from, wait);
		CU_ASSERT_FATAL(ev != ODP_EVENT_INVALID);
		cbuf = odp_buffer_addr(odp_buffer_from_event(ev));
		if (CHAOS_DEBUG)
			printf("Draining event %" PRIu64
			       " seq %" PRIu64 " from Q %s...\n",
			       cbuf->evno,
			       cbuf->seqno,
			       globals->
			       chaos_q
			       [CHAOS_PTR_TO_NDX(odp_queue_context(from))].
			       name);
		odp_event_free(ev);
	}

	odp_schedule_release_ordered();

	for (i = 0; i < CHAOS_NUM_QUEUES; i++) {
		if (CHAOS_DEBUG)
			printf("Destroying queue %s\n",
			       globals->chaos_q[i].name);
		rc = odp_queue_destroy(globals->chaos_q[i].handle);
		CU_ASSERT(rc == 0);
	}

	rc = odp_pool_destroy(pool);
	CU_ASSERT(rc == 0);
}