/* A thread body. Wait until t->cancel is cancelledm then decrement t->n. If t->n becomes 0, set t->done. */ static void thd_body(void *v) { struct test *t = v; gpr_mu_lock(&t->mu); while (!gpr_cv_cancellable_wait( &t->cv, &t->mu, gpr_inf_future(GPR_CLOCK_REALTIME), &t->cancel)) { } t->n--; if (t->n == 0) { gpr_event_set(&t->done, (void *)1); } gpr_mu_unlock(&t->mu); }
void *gpr_event_cancellable_wait(gpr_event *ev, gpr_timespec abs_deadline, gpr_cancellable *c) { void *result = (void *)gpr_atm_acq_load(&ev->state); if (result == NULL) { struct sync_array_s *s = hash(ev); gpr_mu_lock(&s->mu); do { result = (void *)gpr_atm_acq_load(&ev->state); } while (result == NULL && !gpr_cv_cancellable_wait(&s->cv, &s->mu, abs_deadline, c)); gpr_mu_unlock(&s->mu); } return result; }
static void test(void) { int i; gpr_thd_id thd; struct test t; int n = 1; gpr_timespec interval; gpr_mu_init(&t.mu); gpr_cv_init(&t.cv); gpr_event_init(&t.ev); gpr_event_init(&t.done); gpr_cancellable_init(&t.cancel); /* A gpr_cancellable starts not cancelled. */ GPR_ASSERT(!gpr_cancellable_is_cancelled(&t.cancel)); /* Test timeout on event wait for uncancelled gpr_cancellable */ interval = gpr_now(GPR_CLOCK_REALTIME); gpr_event_cancellable_wait( &t.ev, gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_micros(1000000, GPR_TIMESPAN)), &t.cancel); interval = gpr_time_sub(gpr_now(GPR_CLOCK_REALTIME), interval); GPR_ASSERT( gpr_time_cmp(interval, gpr_time_from_micros(500000, GPR_TIMESPAN)) >= 0); GPR_ASSERT( gpr_time_cmp(gpr_time_from_micros(2000000, GPR_TIMESPAN), interval) >= 0); /* Test timeout on cv wait for uncancelled gpr_cancellable */ gpr_mu_lock(&t.mu); interval = gpr_now(GPR_CLOCK_REALTIME); while (!gpr_cv_cancellable_wait( &t.cv, &t.mu, gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_micros(1000000, GPR_TIMESPAN)), &t.cancel)) { } interval = gpr_time_sub(gpr_now(GPR_CLOCK_REALTIME), interval); GPR_ASSERT( gpr_time_cmp(interval, gpr_time_from_micros(500000, GPR_TIMESPAN)) >= 0); GPR_ASSERT( gpr_time_cmp(gpr_time_from_micros(2000000, GPR_TIMESPAN), interval) >= 0); gpr_mu_unlock(&t.mu); /* Create some threads. They all wait until cancelled; the last to finish sets t.done. */ t.n = n; for (i = 0; i != n; i++) { GPR_ASSERT(gpr_thd_new(&thd, &thd_body, &t, NULL)); } /* Check that t.cancel still is not cancelled. */ GPR_ASSERT(!gpr_cancellable_is_cancelled(&t.cancel)); /* Wait a second, and check that no threads have finished waiting. */ gpr_mu_lock(&t.mu); gpr_cv_wait(&t.cv, &t.mu, gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_micros(1000000, GPR_TIMESPAN))); GPR_ASSERT(t.n == n); gpr_mu_unlock(&t.mu); /* Check that t.cancel still is not cancelled, but when cancelled it retports that it is cacncelled. */ GPR_ASSERT(!gpr_cancellable_is_cancelled(&t.cancel)); gpr_cancellable_cancel(&t.cancel); GPR_ASSERT(gpr_cancellable_is_cancelled(&t.cancel)); /* Wait for threads to finish. */ gpr_event_wait(&t.done, gpr_inf_future(GPR_CLOCK_REALTIME)); GPR_ASSERT(t.n == 0); /* Test timeout on cv wait for cancelled gpr_cancellable */ gpr_mu_lock(&t.mu); interval = gpr_now(GPR_CLOCK_REALTIME); while (!gpr_cv_cancellable_wait( &t.cv, &t.mu, gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_micros(1000000, GPR_TIMESPAN)), &t.cancel)) { } interval = gpr_time_sub(gpr_now(GPR_CLOCK_REALTIME), interval); GPR_ASSERT( gpr_time_cmp(gpr_time_from_micros(100000, GPR_TIMESPAN), interval) >= 0); gpr_mu_unlock(&t.mu); /* Test timeout on event wait for cancelled gpr_cancellable */ interval = gpr_now(GPR_CLOCK_REALTIME); gpr_event_cancellable_wait( &t.ev, gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_micros(1000000, GPR_TIMESPAN)), &t.cancel); interval = gpr_time_sub(gpr_now(GPR_CLOCK_REALTIME), interval); GPR_ASSERT( gpr_time_cmp(gpr_time_from_micros(100000, GPR_TIMESPAN), interval) >= 0); gpr_mu_destroy(&t.mu); gpr_cv_destroy(&t.cv); gpr_cancellable_destroy(&t.cancel); }