Пример #1
0
int gpr_cv_cancellable_wait(gpr_cv *cv, gpr_mu *mu, gpr_timespec abs_deadline,
                            gpr_cancellable *c) {
  gpr_int32 timeout;
  gpr_mu_lock(&c->mu);
  timeout = gpr_cancellable_is_cancelled(c);
  if (!timeout) {
    struct gpr_cancellable_list_ le;
    le.mu = mu;
    le.cv = cv;
    le.next = c->waiters.next;
    le.prev = &c->waiters;
    le.next->prev = ≤
    le.prev->next = ≤
    gpr_mu_unlock(&c->mu);
    timeout = gpr_cv_wait(cv, mu, abs_deadline);
    gpr_mu_lock(&c->mu);
    le.next->prev = le.prev;
    le.prev->next = le.next;
    if (!timeout) {
      timeout = gpr_cancellable_is_cancelled(c);
    }
  }
  gpr_mu_unlock(&c->mu);
  return timeout;
}
Пример #2
0
void gpr_cancellable_cancel(gpr_cancellable *c) {
  if (!gpr_cancellable_is_cancelled(c)) {
    int failures;
    int backoff = 1;
    do {
      struct gpr_cancellable_list_ *l;
      struct gpr_cancellable_list_ *nl;
      gpr_mu *omu = 0; /* one-element cache of a processed gpr_mu */
      gpr_cv *ocv = 0; /* one-element cache of a processd gpr_cv */
      gpr_mu_lock(&c->mu);
      gpr_atm_rel_store(&c->cancelled, 1);
      failures = 0;
      for (l = c->waiters.next; l != &c->waiters; l = nl) {
        nl = l->next;
        if (omu != l->mu) {
          omu = l->mu;
          if (gpr_mu_trylock(l->mu)) {
            gpr_mu_unlock(l->mu);
            l->next->prev = l->prev; /* remove *l from list */
            l->prev->next = l->next;
            /* allow unconditional dequeue in gpr_cv_cancellable_wait() */
            l->next = l;
            l->prev = l;
            ocv = 0; /* force broadcast */
          } else {
            failures++;
          }
        }
        if (ocv != l->cv) {
          ocv = l->cv;
          gpr_cv_broadcast(l->cv);
        }
      }
      gpr_mu_unlock(&c->mu);
      if (failures != 0) {
        if (backoff < 10) {
          volatile int i;
          for (i = 0; i != (1 << backoff); i++) {
          }
          backoff++;
        } else {
          gpr_event ev;
          gpr_event_init(&ev);
          gpr_event_wait(
              &ev, gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
                                gpr_time_from_micros(1000, GPR_TIMESPAN)));
        }
      }
    } while (failures != 0);
  }
}
Пример #3
0
static void test(void) {
  int i;
  gpr_thd_id thd;
  struct test t;
  int n = 1;
  gpr_timespec interval;

  gpr_mu_init(&t.mu);
  gpr_cv_init(&t.cv);
  gpr_event_init(&t.ev);
  gpr_event_init(&t.done);
  gpr_cancellable_init(&t.cancel);

  /* A gpr_cancellable starts not cancelled. */
  GPR_ASSERT(!gpr_cancellable_is_cancelled(&t.cancel));

  /* Test timeout on event wait for uncancelled gpr_cancellable */
  interval = gpr_now(GPR_CLOCK_REALTIME);
  gpr_event_cancellable_wait(
      &t.ev, gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
                          gpr_time_from_micros(1000000, GPR_TIMESPAN)),
      &t.cancel);
  interval = gpr_time_sub(gpr_now(GPR_CLOCK_REALTIME), interval);
  GPR_ASSERT(
      gpr_time_cmp(interval, gpr_time_from_micros(500000, GPR_TIMESPAN)) >= 0);
  GPR_ASSERT(
      gpr_time_cmp(gpr_time_from_micros(2000000, GPR_TIMESPAN), interval) >= 0);

  /* Test timeout on cv wait for uncancelled gpr_cancellable */
  gpr_mu_lock(&t.mu);
  interval = gpr_now(GPR_CLOCK_REALTIME);
  while (!gpr_cv_cancellable_wait(
      &t.cv, &t.mu, gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
                                 gpr_time_from_micros(1000000, GPR_TIMESPAN)),
      &t.cancel)) {
  }
  interval = gpr_time_sub(gpr_now(GPR_CLOCK_REALTIME), interval);
  GPR_ASSERT(
      gpr_time_cmp(interval, gpr_time_from_micros(500000, GPR_TIMESPAN)) >= 0);
  GPR_ASSERT(
      gpr_time_cmp(gpr_time_from_micros(2000000, GPR_TIMESPAN), interval) >= 0);
  gpr_mu_unlock(&t.mu);

  /* Create some threads.  They all wait until cancelled; the last to finish
     sets t.done.  */
  t.n = n;
  for (i = 0; i != n; i++) {
    GPR_ASSERT(gpr_thd_new(&thd, &thd_body, &t, NULL));
  }
  /* Check that t.cancel still is not cancelled. */
  GPR_ASSERT(!gpr_cancellable_is_cancelled(&t.cancel));

  /* Wait a second, and check that no threads have finished waiting. */
  gpr_mu_lock(&t.mu);
  gpr_cv_wait(&t.cv, &t.mu,
              gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
                           gpr_time_from_micros(1000000, GPR_TIMESPAN)));
  GPR_ASSERT(t.n == n);
  gpr_mu_unlock(&t.mu);

  /* Check that t.cancel still is not cancelled, but when
     cancelled it retports that it is cacncelled. */
  GPR_ASSERT(!gpr_cancellable_is_cancelled(&t.cancel));
  gpr_cancellable_cancel(&t.cancel);
  GPR_ASSERT(gpr_cancellable_is_cancelled(&t.cancel));

  /* Wait for threads to finish. */
  gpr_event_wait(&t.done, gpr_inf_future(GPR_CLOCK_REALTIME));
  GPR_ASSERT(t.n == 0);

  /* Test timeout on cv wait for cancelled gpr_cancellable */
  gpr_mu_lock(&t.mu);
  interval = gpr_now(GPR_CLOCK_REALTIME);
  while (!gpr_cv_cancellable_wait(
      &t.cv, &t.mu, gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
                                 gpr_time_from_micros(1000000, GPR_TIMESPAN)),
      &t.cancel)) {
  }
  interval = gpr_time_sub(gpr_now(GPR_CLOCK_REALTIME), interval);
  GPR_ASSERT(
      gpr_time_cmp(gpr_time_from_micros(100000, GPR_TIMESPAN), interval) >= 0);
  gpr_mu_unlock(&t.mu);

  /* Test timeout on event wait for cancelled gpr_cancellable */
  interval = gpr_now(GPR_CLOCK_REALTIME);
  gpr_event_cancellable_wait(
      &t.ev, gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
                          gpr_time_from_micros(1000000, GPR_TIMESPAN)),
      &t.cancel);
  interval = gpr_time_sub(gpr_now(GPR_CLOCK_REALTIME), interval);
  GPR_ASSERT(
      gpr_time_cmp(gpr_time_from_micros(100000, GPR_TIMESPAN), interval) >= 0);

  gpr_mu_destroy(&t.mu);
  gpr_cv_destroy(&t.cv);
  gpr_cancellable_destroy(&t.cancel);
}