Пример #1
0
/* Spawn the thread if new work has arrived a no thread is up */
static void maybe_spawn_locked() {
  if (grpc_closure_list_empty(g_executor.closures) == 1) {
    return;
  }
  if (g_executor.shutting_down == 1) {
    return;
  }

  if (g_executor.busy != 0) {
    /* Thread still working. New work will be picked up by already running
     * thread. Not spawning anything. */
    return;
  } else if (g_executor.pending_join != 0) {
    /* Pickup the remains of the previous incarnations of the thread. */
    gpr_thd_join(g_executor.tid);
    g_executor.pending_join = 0;
  }

  /* All previous instances of the thread should have been joined at this point.
   * Spawn time! */
  g_executor.busy = 1;
  gpr_thd_new(&g_executor.tid, closure_exec_thread_func, NULL,
              &g_executor.options);
  g_executor.pending_join = 1;
}
Пример #2
0
grpc_error *grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
                              grpc_pollset_worker **worker_hdl,
                              gpr_timespec now, gpr_timespec deadline) {
  uint64_t timeout;
  GRPC_UV_ASSERT_SAME_THREAD();
  gpr_mu_unlock(&grpc_polling_mu);
  if (grpc_pollset_work_run_loop) {
    if (gpr_time_cmp(deadline, now) >= 0) {
      timeout = (uint64_t)gpr_time_to_millis(gpr_time_sub(deadline, now));
    } else {
      timeout = 0;
    }
    /* We special-case timeout=0 so that we don't bother with the timer when
       the loop won't block anyway */
    if (timeout > 0) {
      uv_timer_start(&pollset->timer, timer_run_cb, timeout, 0);
      /* Run until there is some I/O activity or the timer triggers. It doesn't
         matter which happens */
      uv_run(uv_default_loop(), UV_RUN_ONCE);
      uv_timer_stop(&pollset->timer);
    } else {
      uv_run(uv_default_loop(), UV_RUN_NOWAIT);
    }
  }
  if (!grpc_closure_list_empty(exec_ctx->closure_list)) {
    grpc_exec_ctx_flush(exec_ctx);
  }
  gpr_mu_lock(&grpc_polling_mu);
  return GRPC_ERROR_NONE;
}
Пример #3
0
void grpc_client_channel_set_resolver(grpc_exec_ctx *exec_ctx,
                                      grpc_channel_stack *channel_stack,
                                      grpc_resolver *resolver) {
  /* post construction initialization: set the transport setup pointer */
  grpc_channel_element *elem = grpc_channel_stack_last_element(channel_stack);
  channel_data *chand = elem->channel_data;
  gpr_mu_lock(&chand->mu);
  GPR_ASSERT(!chand->resolver);
  chand->resolver = resolver;
  GRPC_RESOLVER_REF(resolver, "channel");
  if (!grpc_closure_list_empty(chand->waiting_for_config_closures) ||
      chand->exit_idle_when_lb_policy_arrives) {
    chand->started_resolving = true;
    GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver");
    grpc_resolver_next(exec_ctx, resolver, &chand->resolver_result,
                       &chand->on_resolver_result_changed);
  }
  gpr_mu_unlock(&chand->mu);
}
Пример #4
0
int grpc_exec_ctx_flush(grpc_exec_ctx *exec_ctx) {
  int did_something = 0;
  GPR_TIMER_BEGIN("grpc_exec_ctx_flush", 0);
  while (!grpc_closure_list_empty(exec_ctx->closure_list)) {
    grpc_closure *c = exec_ctx->closure_list.head;
    exec_ctx->closure_list.head = exec_ctx->closure_list.tail = NULL;
    while (c != NULL) {
      int success = (int)(c->final_data & 1);
      grpc_closure *next = (grpc_closure *)(c->final_data & ~(uintptr_t)1);
      did_something++;
      GPR_TIMER_BEGIN("grpc_exec_ctx_flush.cb", 0);
      c->cb(exec_ctx, c->cb_arg, success);
      GPR_TIMER_END("grpc_exec_ctx_flush.cb", 0);
      c = next;
    }
  }
  GPR_TIMER_END("grpc_exec_ctx_flush", 0);
  return did_something;
}
Пример #5
0
void grpc_executor_shutdown() {
  int pending_join;
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;

  gpr_mu_lock(&g_executor.mu);
  pending_join = g_executor.pending_join;
  g_executor.shutting_down = 1;
  gpr_mu_unlock(&g_executor.mu);
  /* we can release the lock at this point despite the access to the closure
   * list below because we aren't accepting new work */

  /* Execute pending callbacks, some may be performing cleanups */
  grpc_exec_ctx_enqueue_list(&exec_ctx, &g_executor.closures, NULL);
  grpc_exec_ctx_finish(&exec_ctx);
  GPR_ASSERT(grpc_closure_list_empty(g_executor.closures));
  if (pending_join) {
    gpr_thd_join(g_executor.tid);
  }
  gpr_mu_destroy(&g_executor.mu);
}
Пример #6
0
/* thread body */
static void closure_exec_thread_func(void *ignored) {
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
  while (1) {
    gpr_mu_lock(&g_executor.mu);
    if (g_executor.shutting_down != 0) {
      gpr_mu_unlock(&g_executor.mu);
      break;
    }
    if (grpc_closure_list_empty(g_executor.closures)) {
      /* no more work, time to die */
      GPR_ASSERT(g_executor.busy == 1);
      g_executor.busy = 0;
      gpr_mu_unlock(&g_executor.mu);
      break;
    } else {
      grpc_exec_ctx_enqueue_list(&exec_ctx, &g_executor.closures, NULL);
    }
    gpr_mu_unlock(&g_executor.mu);
    grpc_exec_ctx_flush(&exec_ctx);
  }
  grpc_exec_ctx_finish(&exec_ctx);
}
Пример #7
0
static void test_code(void) {
  /* iomgr.h */
  grpc_iomgr_init();
  grpc_iomgr_shutdown();

  /* closure.h */
  grpc_closure closure;
  closure.cb = NULL;
  closure.cb_arg = NULL;
  closure.next_data.scratch = 0;

  grpc_closure_list closure_list = GRPC_CLOSURE_LIST_INIT;
  closure_list.head = NULL;
  closure_list.tail = NULL;

  grpc_closure_init(&closure, NULL, NULL);

  grpc_closure_create(NULL, NULL);

  grpc_closure_list_move(NULL, NULL);
  grpc_closure_list_append(NULL, NULL, GRPC_ERROR_CREATE("Foo"));
  grpc_closure_list_empty(closure_list);

  /* exec_ctx.h */
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
  grpc_exec_ctx_flush(&exec_ctx);
  grpc_exec_ctx_finish(&exec_ctx);
  grpc_exec_ctx_sched(&exec_ctx, &closure, GRPC_ERROR_CREATE("Foo"), NULL);
  grpc_exec_ctx_enqueue_list(&exec_ctx, &closure_list, NULL);

  /* endpoint.h */
  grpc_endpoint endpoint;
  grpc_endpoint_vtable vtable = {grpc_endpoint_read,
                                 grpc_endpoint_write,
                                 grpc_endpoint_get_workqueue,
                                 grpc_endpoint_add_to_pollset,
                                 grpc_endpoint_add_to_pollset_set,
                                 grpc_endpoint_shutdown,
                                 grpc_endpoint_destroy,
                                 grpc_endpoint_get_resource_user,
                                 grpc_endpoint_get_peer};
  endpoint.vtable = &vtable;

  grpc_endpoint_read(&exec_ctx, &endpoint, NULL, NULL);
  grpc_endpoint_get_peer(&endpoint);
  grpc_endpoint_write(&exec_ctx, &endpoint, NULL, NULL);
  grpc_endpoint_shutdown(&exec_ctx, &endpoint);
  grpc_endpoint_destroy(&exec_ctx, &endpoint);
  grpc_endpoint_add_to_pollset(&exec_ctx, &endpoint, NULL);
  grpc_endpoint_add_to_pollset_set(&exec_ctx, &endpoint, NULL);

  /* executor.h */
  grpc_executor_init();
  grpc_executor_push(&closure, GRPC_ERROR_CREATE("Phi"));
  grpc_executor_shutdown();

  /* pollset.h */
  grpc_pollset_size();
  grpc_pollset_init(NULL, NULL);
  grpc_pollset_shutdown(NULL, NULL, NULL);
  grpc_pollset_reset(NULL);
  grpc_pollset_destroy(NULL);
  GRPC_ERROR_UNREF(grpc_pollset_work(NULL, NULL, NULL,
                                     gpr_now(GPR_CLOCK_REALTIME),
                                     gpr_now(GPR_CLOCK_MONOTONIC)));
  GRPC_ERROR_UNREF(grpc_pollset_kick(NULL, NULL));
}
Пример #8
0
void grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
                       grpc_pollset_worker **worker_hdl, gpr_timespec now,
                       gpr_timespec deadline) {
    grpc_pollset_worker worker;
    *worker_hdl = &worker;

    /* pollset->mu already held */
    int added_worker = 0;
    int locked = 1;
    int queued_work = 0;
    int keep_polling = 0;
    GPR_TIMER_BEGIN("grpc_pollset_work", 0);
    /* this must happen before we (potentially) drop pollset->mu */
    worker.next = worker.prev = NULL;
    worker.reevaluate_polling_on_wakeup = 0;
    if (pollset->local_wakeup_cache != NULL) {
        worker.wakeup_fd = pollset->local_wakeup_cache;
        pollset->local_wakeup_cache = worker.wakeup_fd->next;
    } else {
        worker.wakeup_fd = gpr_malloc(sizeof(*worker.wakeup_fd));
        grpc_wakeup_fd_init(&worker.wakeup_fd->fd);
    }
    worker.kicked_specifically = 0;
    /* If there's work waiting for the pollset to be idle, and the
       pollset is idle, then do that work */
    if (!grpc_pollset_has_workers(pollset) &&
            !grpc_closure_list_empty(pollset->idle_jobs)) {
        GPR_TIMER_MARK("grpc_pollset_work.idle_jobs", 0);
        grpc_exec_ctx_enqueue_list(exec_ctx, &pollset->idle_jobs, NULL);
        goto done;
    }
    /* If we're shutting down then we don't execute any extended work */
    if (pollset->shutting_down) {
        GPR_TIMER_MARK("grpc_pollset_work.shutting_down", 0);
        goto done;
    }
    /* Give do_promote priority so we don't starve it out */
    if (pollset->in_flight_cbs) {
        GPR_TIMER_MARK("grpc_pollset_work.in_flight_cbs", 0);
        gpr_mu_unlock(&pollset->mu);
        locked = 0;
        goto done;
    }
    /* Start polling, and keep doing so while we're being asked to
       re-evaluate our pollers (this allows poll() based pollers to
       ensure they don't miss wakeups) */
    keep_polling = 1;
    while (keep_polling) {
        keep_polling = 0;
        if (!pollset->kicked_without_pollers) {
            if (!added_worker) {
                push_front_worker(pollset, &worker);
                added_worker = 1;
                gpr_tls_set(&g_current_thread_worker, (intptr_t)&worker);
            }
            gpr_tls_set(&g_current_thread_poller, (intptr_t)pollset);
            GPR_TIMER_BEGIN("maybe_work_and_unlock", 0);
            pollset->vtable->maybe_work_and_unlock(exec_ctx, pollset, &worker,
                                                   deadline, now);
            GPR_TIMER_END("maybe_work_and_unlock", 0);
            locked = 0;
            gpr_tls_set(&g_current_thread_poller, 0);
        } else {
            GPR_TIMER_MARK("grpc_pollset_work.kicked_without_pollers", 0);
            pollset->kicked_without_pollers = 0;
        }
        /* Finished execution - start cleaning up.
           Note that we may arrive here from outside the enclosing while() loop.
           In that case we won't loop though as we haven't added worker to the
           worker list, which means nobody could ask us to re-evaluate polling). */
done:
        if (!locked) {
            queued_work |= grpc_exec_ctx_flush(exec_ctx);
            gpr_mu_lock(&pollset->mu);
            locked = 1;
        }
        /* If we're forced to re-evaluate polling (via grpc_pollset_kick with
           GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP) then we land here and force
           a loop */
        if (worker.reevaluate_polling_on_wakeup) {
            worker.reevaluate_polling_on_wakeup = 0;
            pollset->kicked_without_pollers = 0;
            if (queued_work || worker.kicked_specifically) {
                /* If there's queued work on the list, then set the deadline to be
                   immediate so we get back out of the polling loop quickly */
                deadline = gpr_inf_past(GPR_CLOCK_MONOTONIC);
            }
            keep_polling = 1;
        }
    }
    if (added_worker) {
        remove_worker(pollset, &worker);
        gpr_tls_set(&g_current_thread_worker, 0);
    }
    /* release wakeup fd to the local pool */
    worker.wakeup_fd->next = pollset->local_wakeup_cache;
    pollset->local_wakeup_cache = worker.wakeup_fd;
    /* check shutdown conditions */
    if (pollset->shutting_down) {
        if (grpc_pollset_has_workers(pollset)) {
            grpc_pollset_kick(pollset, NULL);
        } else if (!pollset->called_shutdown && pollset->in_flight_cbs == 0) {
            pollset->called_shutdown = 1;
            gpr_mu_unlock(&pollset->mu);
            finish_shutdown(exec_ctx, pollset);
            grpc_exec_ctx_flush(exec_ctx);
            /* Continuing to access pollset here is safe -- it is the caller's
             * responsibility to not destroy when it has outstanding calls to
             * grpc_pollset_work.
             * TODO(dklempner): Can we refactor the shutdown logic to avoid this? */
            gpr_mu_lock(&pollset->mu);
        } else if (!grpc_closure_list_empty(pollset->idle_jobs)) {
            grpc_exec_ctx_enqueue_list(exec_ctx, &pollset->idle_jobs, NULL);
            gpr_mu_unlock(&pollset->mu);
            grpc_exec_ctx_flush(exec_ctx);
            gpr_mu_lock(&pollset->mu);
        }
    }
    *worker_hdl = NULL;
    GPR_TIMER_END("grpc_pollset_work", 0);
}
Пример #9
0
static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {
    GPR_ASSERT(grpc_closure_list_empty(pollset->idle_jobs));
    pollset->vtable->finish_shutdown(pollset);
    grpc_exec_ctx_enqueue(exec_ctx, pollset->shutdown_done, true, NULL);
}
Пример #10
0
void grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
                       grpc_pollset_worker *worker, gpr_timespec now,
                       gpr_timespec deadline) {
  /* pollset->mu already held */
  int added_worker = 0;
  int locked = 1;
  /* this must happen before we (potentially) drop pollset->mu */
  worker->next = worker->prev = NULL;
  /* TODO(ctiller): pool these */
  grpc_wakeup_fd_init(&worker->wakeup_fd);
  if (!grpc_pollset_has_workers(pollset) &&
      !grpc_closure_list_empty(pollset->idle_jobs)) {
    grpc_exec_ctx_enqueue_list(exec_ctx, &pollset->idle_jobs);
    goto done;
  }
  if (grpc_alarm_check(exec_ctx, now, &deadline)) {
    gpr_mu_unlock(&pollset->mu);
    locked = 0;
    goto done;
  }
  if (pollset->shutting_down) {
    goto done;
  }
  if (pollset->in_flight_cbs) {
    /* Give do_promote priority so we don't starve it out */
    gpr_mu_unlock(&pollset->mu);
    locked = 0;
    goto done;
  }
  if (!pollset->kicked_without_pollers) {
    push_front_worker(pollset, worker);
    added_worker = 1;
    gpr_tls_set(&g_current_thread_poller, (gpr_intptr)pollset);
    gpr_tls_set(&g_current_thread_worker, (gpr_intptr)worker);
    pollset->vtable->maybe_work_and_unlock(exec_ctx, pollset, worker, deadline,
                                           now);
    locked = 0;
    gpr_tls_set(&g_current_thread_poller, 0);
    gpr_tls_set(&g_current_thread_worker, 0);
  } else {
    pollset->kicked_without_pollers = 0;
  }
done:
  if (!locked) {
    grpc_exec_ctx_flush(exec_ctx);
    gpr_mu_lock(&pollset->mu);
    locked = 1;
  }
  grpc_wakeup_fd_destroy(&worker->wakeup_fd);
  if (added_worker) {
    remove_worker(pollset, worker);
  }
  if (pollset->shutting_down) {
    if (grpc_pollset_has_workers(pollset)) {
      grpc_pollset_kick(pollset, NULL);
    } else if (!pollset->called_shutdown && pollset->in_flight_cbs == 0) {
      pollset->called_shutdown = 1;
      gpr_mu_unlock(&pollset->mu);
      finish_shutdown(exec_ctx, pollset);
      grpc_exec_ctx_flush(exec_ctx);
      /* Continuing to access pollset here is safe -- it is the caller's
       * responsibility to not destroy when it has outstanding calls to
       * grpc_pollset_work.
       * TODO(dklempner): Can we refactor the shutdown logic to avoid this? */
      gpr_mu_lock(&pollset->mu);
    } else if (!grpc_closure_list_empty(pollset->idle_jobs)) {
      gpr_mu_unlock(&pollset->mu);
      grpc_exec_ctx_enqueue_list(exec_ctx, &pollset->idle_jobs);
      grpc_exec_ctx_flush(exec_ctx);
      gpr_mu_lock(&pollset->mu);
    }
  }
}
Пример #11
0
static void test_code(void) {
  /* iomgr.h */
  grpc_iomgr_init();
  grpc_iomgr_shutdown();

  /* closure.h */
  grpc_closure closure;
  closure.cb = NULL;
  closure.cb_arg = NULL;
  closure.final_data = 0;

  grpc_closure_list closure_list = GRPC_CLOSURE_LIST_INIT;
  closure_list.head = NULL;
  closure_list.tail = NULL;

  grpc_closure_init(&closure, NULL, NULL);

  grpc_closure_create(NULL, NULL);

  grpc_closure_list_move(NULL, NULL);
  grpc_closure_list_add(NULL, NULL, true);
  bool x = grpc_closure_list_empty(closure_list);
  grpc_closure_next(&closure);

  /* exec_ctx.h */
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
  grpc_exec_ctx_flush(&exec_ctx);
  grpc_exec_ctx_finish(&exec_ctx);
  grpc_exec_ctx_enqueue(&exec_ctx, &closure, x, NULL);
  grpc_exec_ctx_enqueue_list(&exec_ctx, &closure_list, NULL);

  /* endpoint.h */
  grpc_endpoint endpoint;
  grpc_endpoint_vtable vtable = {
      grpc_endpoint_read,           grpc_endpoint_write,
      grpc_endpoint_add_to_pollset, grpc_endpoint_add_to_pollset_set,
      grpc_endpoint_shutdown,       grpc_endpoint_destroy,
      grpc_endpoint_get_peer};
  endpoint.vtable = &vtable;

  grpc_endpoint_read(&exec_ctx, &endpoint, NULL, NULL);
  grpc_endpoint_get_peer(&endpoint);
  grpc_endpoint_write(&exec_ctx, &endpoint, NULL, NULL);
  grpc_endpoint_shutdown(&exec_ctx, &endpoint);
  grpc_endpoint_destroy(&exec_ctx, &endpoint);
  grpc_endpoint_add_to_pollset(&exec_ctx, &endpoint, NULL);
  grpc_endpoint_add_to_pollset_set(&exec_ctx, &endpoint, NULL);

  /* executor.h */
  grpc_executor_init();
  grpc_executor_enqueue(&closure, x);
  grpc_executor_shutdown();

  /* pollset.h */
  grpc_pollset_size();
  grpc_pollset_init(NULL, NULL);
  grpc_pollset_shutdown(NULL, NULL, NULL);
  grpc_pollset_reset(NULL);
  grpc_pollset_destroy(NULL);
  grpc_pollset_work(NULL, NULL, NULL, gpr_now(GPR_CLOCK_REALTIME),
                    gpr_now(GPR_CLOCK_MONOTONIC));
  grpc_pollset_kick(NULL, NULL);
}
Пример #12
0
void grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
                       grpc_pollset_worker *worker, gpr_timespec now,
                       gpr_timespec deadline) {
  int added_worker = 0;
  worker->links[GRPC_POLLSET_WORKER_LINK_POLLSET].next =
      worker->links[GRPC_POLLSET_WORKER_LINK_POLLSET].prev =
          worker->links[GRPC_POLLSET_WORKER_LINK_GLOBAL].next =
              worker->links[GRPC_POLLSET_WORKER_LINK_GLOBAL].prev = NULL;
  worker->kicked = 0;
  worker->pollset = pollset;
  gpr_cv_init(&worker->cv);
  if (grpc_timer_check(exec_ctx, now, &deadline)) {
    goto done;
  }
  if (!pollset->kicked_without_pollers && !pollset->shutting_down) {
    if (g_active_poller == NULL) {
      grpc_pollset_worker *next_worker;
      /* become poller */
      pollset->is_iocp_worker = 1;
      g_active_poller = worker;
      gpr_mu_unlock(&grpc_polling_mu);
      grpc_iocp_work(exec_ctx, deadline);
      grpc_exec_ctx_flush(exec_ctx);
      gpr_mu_lock(&grpc_polling_mu);
      pollset->is_iocp_worker = 0;
      g_active_poller = NULL;
      /* try to get a worker from this pollsets worker list */
      next_worker = pop_front_worker(&pollset->root_worker,
                                     GRPC_POLLSET_WORKER_LINK_POLLSET);
      if (next_worker == NULL) {
        /* try to get a worker from the global list */
        next_worker = pop_front_worker(&g_global_root_worker,
                                       GRPC_POLLSET_WORKER_LINK_GLOBAL);
      }
      if (next_worker != NULL) {
        next_worker->kicked = 1;
        gpr_cv_signal(&next_worker->cv);
      }

      if (pollset->shutting_down && pollset->on_shutdown != NULL) {
        grpc_exec_ctx_enqueue(exec_ctx, pollset->on_shutdown, 1);
        pollset->on_shutdown = NULL;
      }
      goto done;
    }
    push_front_worker(&g_global_root_worker, GRPC_POLLSET_WORKER_LINK_GLOBAL,
                      worker);
    push_front_worker(&pollset->root_worker, GRPC_POLLSET_WORKER_LINK_POLLSET,
                      worker);
    added_worker = 1;
    while (!worker->kicked) {
      if (gpr_cv_wait(&worker->cv, &grpc_polling_mu, deadline)) {
        break;
      }
    }
  } else {
    pollset->kicked_without_pollers = 0;
  }
done:
  if (!grpc_closure_list_empty(exec_ctx->closure_list)) {
    gpr_mu_unlock(&grpc_polling_mu);
    grpc_exec_ctx_flush(exec_ctx);
    gpr_mu_lock(&grpc_polling_mu);
  }
  if (added_worker) {
    remove_worker(worker, GRPC_POLLSET_WORKER_LINK_GLOBAL);
    remove_worker(worker, GRPC_POLLSET_WORKER_LINK_POLLSET);
  }
  gpr_cv_destroy(&worker->cv);
}