Пример #1
0
/* returns true if all allocations are completed */
static bool rq_alloc(grpc_exec_ctx *exec_ctx,
                     grpc_resource_quota *resource_quota) {
  grpc_resource_user *resource_user;
  while ((resource_user = rulist_pop_head(resource_quota,
                                          GRPC_RULIST_AWAITING_ALLOCATION))) {
    gpr_mu_lock(&resource_user->mu);
    if (resource_user->free_pool < 0 &&
        -resource_user->free_pool <= resource_quota->free_pool) {
      int64_t amt = -resource_user->free_pool;
      resource_user->free_pool = 0;
      resource_quota->free_pool -= amt;
      if (grpc_resource_quota_trace) {
        gpr_log(GPR_DEBUG, "RQ %s %s: grant alloc %" PRId64
                           " bytes; rq_free_pool -> %" PRId64,
                resource_quota->name, resource_user->name, amt,
                resource_quota->free_pool);
      }
    } else if (grpc_resource_quota_trace && resource_user->free_pool >= 0) {
      gpr_log(GPR_DEBUG, "RQ %s %s: discard already satisfied alloc request",
              resource_quota->name, resource_user->name);
    }
    if (resource_user->free_pool >= 0) {
      resource_user->allocating = false;
      grpc_exec_ctx_enqueue_list(exec_ctx, &resource_user->on_allocated, NULL);
      gpr_mu_unlock(&resource_user->mu);
    } else {
      rulist_add_head(resource_user, GRPC_RULIST_AWAITING_ALLOCATION);
      gpr_mu_unlock(&resource_user->mu);
      return false;
    }
  }
  return true;
}
Пример #2
0
static void cc_start_transport_op(grpc_exec_ctx *exec_ctx,
                                  grpc_channel_element *elem,
                                  grpc_transport_op *op) {
  channel_data *chand = elem->channel_data;

  grpc_exec_ctx_sched(exec_ctx, op->on_consumed, GRPC_ERROR_NONE, NULL);

  GPR_ASSERT(op->set_accept_stream == false);
  if (op->bind_pollset != NULL) {
    grpc_pollset_set_add_pollset(exec_ctx, chand->interested_parties,
                                 op->bind_pollset);
  }

  gpr_mu_lock(&chand->mu);
  if (op->on_connectivity_state_change != NULL) {
    grpc_connectivity_state_notify_on_state_change(
        exec_ctx, &chand->state_tracker, op->connectivity_state,
        op->on_connectivity_state_change);
    op->on_connectivity_state_change = NULL;
    op->connectivity_state = NULL;
  }

  if (op->send_ping != NULL) {
    if (chand->lb_policy == NULL) {
      grpc_exec_ctx_sched(exec_ctx, op->send_ping,
                          GRPC_ERROR_CREATE("Ping with no load balancing"),
                          NULL);
    } else {
      grpc_lb_policy_ping_one(exec_ctx, chand->lb_policy, op->send_ping);
      op->bind_pollset = NULL;
    }
    op->send_ping = NULL;
  }

  if (op->disconnect_with_error != GRPC_ERROR_NONE) {
    if (chand->resolver != NULL) {
      set_channel_connectivity_state_locked(
          exec_ctx, chand, GRPC_CHANNEL_SHUTDOWN,
          GRPC_ERROR_REF(op->disconnect_with_error), "disconnect");
      grpc_resolver_shutdown(exec_ctx, chand->resolver);
      GRPC_RESOLVER_UNREF(exec_ctx, chand->resolver, "channel");
      chand->resolver = NULL;
      if (!chand->started_resolving) {
        grpc_closure_list_fail_all(&chand->waiting_for_config_closures,
                                   GRPC_ERROR_REF(op->disconnect_with_error));
        grpc_exec_ctx_enqueue_list(exec_ctx,
                                   &chand->waiting_for_config_closures, NULL);
      }
      if (chand->lb_policy != NULL) {
        grpc_pollset_set_del_pollset_set(exec_ctx,
                                         chand->lb_policy->interested_parties,
                                         chand->interested_parties);
        GRPC_LB_POLICY_UNREF(exec_ctx, chand->lb_policy, "channel");
        chand->lb_policy = NULL;
      }
    }
    GRPC_ERROR_UNREF(op->disconnect_with_error);
  }
  gpr_mu_unlock(&chand->mu);
}
Пример #3
0
void grpc_tcp_server_unref(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
  if (gpr_unref(&s->refs)) {
    grpc_tcp_server_shutdown_listeners(exec_ctx, s);
    gpr_mu_lock(&s->mu);
    grpc_exec_ctx_enqueue_list(exec_ctx, &s->shutdown_starting, NULL);
    gpr_mu_unlock(&s->mu);
    tcp_server_destroy(exec_ctx, s);
  }
}
Пример #4
0
void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
                           grpc_closure *closure) {
    GPR_ASSERT(!pollset->shutting_down);
    pollset->shutting_down = 1;
    pollset->shutdown_done = closure;
    grpc_pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST);
    if (!grpc_pollset_has_workers(pollset)) {
        grpc_exec_ctx_enqueue_list(exec_ctx, &pollset->idle_jobs, NULL);
    }
    if (!pollset->called_shutdown && pollset->in_flight_cbs == 0 &&
            !grpc_pollset_has_workers(pollset)) {
        pollset->called_shutdown = 1;
        finish_shutdown(exec_ctx, pollset);
    }
}
Пример #5
0
void grpc_tcp_server_unref(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
  if (gpr_unref(&s->refs)) {
    /* Complete shutdown_starting work before destroying. */
    grpc_exec_ctx local_exec_ctx = GRPC_EXEC_CTX_INIT;
    grpc_exec_ctx_enqueue_list(&local_exec_ctx, &s->shutdown_starting, NULL);
    if (exec_ctx == NULL) {
      grpc_exec_ctx_flush(&local_exec_ctx);
      tcp_server_destroy(&local_exec_ctx, s);
      grpc_exec_ctx_finish(&local_exec_ctx);
    } else {
      grpc_exec_ctx_finish(&local_exec_ctx);
      tcp_server_destroy(exec_ctx, s);
    }
  }
}
Пример #6
0
void grpc_executor_shutdown() {
  int pending_join;
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;

  gpr_mu_lock(&g_executor.mu);
  pending_join = g_executor.pending_join;
  g_executor.shutting_down = 1;
  gpr_mu_unlock(&g_executor.mu);
  /* we can release the lock at this point despite the access to the closure
   * list below because we aren't accepting new work */

  /* Execute pending callbacks, some may be performing cleanups */
  grpc_exec_ctx_enqueue_list(&exec_ctx, &g_executor.closures, NULL);
  grpc_exec_ctx_finish(&exec_ctx);
  GPR_ASSERT(grpc_closure_list_empty(g_executor.closures));
  if (pending_join) {
    gpr_thd_join(g_executor.tid);
  }
  gpr_mu_destroy(&g_executor.mu);
}
Пример #7
0
/* thread body */
static void closure_exec_thread_func(void *ignored) {
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
  while (1) {
    gpr_mu_lock(&g_executor.mu);
    if (g_executor.shutting_down != 0) {
      gpr_mu_unlock(&g_executor.mu);
      break;
    }
    if (grpc_closure_list_empty(g_executor.closures)) {
      /* no more work, time to die */
      GPR_ASSERT(g_executor.busy == 1);
      g_executor.busy = 0;
      gpr_mu_unlock(&g_executor.mu);
      break;
    } else {
      grpc_exec_ctx_enqueue_list(&exec_ctx, &g_executor.closures, NULL);
    }
    gpr_mu_unlock(&g_executor.mu);
    grpc_exec_ctx_flush(&exec_ctx);
  }
  grpc_exec_ctx_finish(&exec_ctx);
}
Пример #8
0
void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
                           grpc_closure *closure) {
  int call_shutdown = 0;
  gpr_mu_lock(&pollset->mu);
  GPR_ASSERT(!pollset->shutting_down);
  pollset->shutting_down = 1;
  if (!pollset->called_shutdown && pollset->in_flight_cbs == 0 &&
      !grpc_pollset_has_workers(pollset)) {
    pollset->called_shutdown = 1;
    call_shutdown = 1;
  }
  if (!grpc_pollset_has_workers(pollset)) {
    grpc_exec_ctx_enqueue_list(exec_ctx, &pollset->idle_jobs);
  }
  pollset->shutdown_done = closure;
  grpc_pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST);
  gpr_mu_unlock(&pollset->mu);

  if (call_shutdown) {
    finish_shutdown(exec_ctx, pollset);
  }
}
Пример #9
0
static void on_resolver_result_changed(grpc_exec_ctx *exec_ctx, void *arg,
                                       grpc_error *error) {
  channel_data *chand = arg;
  grpc_lb_policy *lb_policy = NULL;
  grpc_lb_policy *old_lb_policy;
  grpc_mdstr_hash_table *method_params_table = NULL;
  grpc_connectivity_state state = GRPC_CHANNEL_TRANSIENT_FAILURE;
  bool exit_idle = false;
  grpc_error *state_error = GRPC_ERROR_CREATE("No load balancing policy");

  if (chand->resolver_result != NULL) {
    grpc_lb_policy_args lb_policy_args;
    lb_policy_args.args = chand->resolver_result;
    lb_policy_args.client_channel_factory = chand->client_channel_factory;

    // Find LB policy name.
    const char *lb_policy_name = NULL;
    const grpc_arg *channel_arg =
        grpc_channel_args_find(lb_policy_args.args, GRPC_ARG_LB_POLICY_NAME);
    if (channel_arg != NULL) {
      GPR_ASSERT(channel_arg->type == GRPC_ARG_STRING);
      lb_policy_name = channel_arg->value.string;
    }
    // Special case: If all of the addresses are balancer addresses,
    // assume that we should use the grpclb policy, regardless of what the
    // resolver actually specified.
    channel_arg =
        grpc_channel_args_find(lb_policy_args.args, GRPC_ARG_LB_ADDRESSES);
    if (channel_arg != NULL) {
      GPR_ASSERT(channel_arg->type == GRPC_ARG_POINTER);
      grpc_lb_addresses *addresses = channel_arg->value.pointer.p;
      bool found_backend_address = false;
      for (size_t i = 0; i < addresses->num_addresses; ++i) {
        if (!addresses->addresses[i].is_balancer) {
          found_backend_address = true;
          break;
        }
      }
      if (!found_backend_address) {
        if (lb_policy_name != NULL && strcmp(lb_policy_name, "grpclb") != 0) {
          gpr_log(GPR_INFO,
                  "resolver requested LB policy %s but provided only balancer "
                  "addresses, no backend addresses -- forcing use of grpclb LB "
                  "policy",
                  lb_policy_name);
        }
        lb_policy_name = "grpclb";
      }
    }
    // Use pick_first if nothing was specified and we didn't select grpclb
    // above.
    if (lb_policy_name == NULL) lb_policy_name = "pick_first";

    lb_policy =
        grpc_lb_policy_create(exec_ctx, lb_policy_name, &lb_policy_args);
    if (lb_policy != NULL) {
      GRPC_LB_POLICY_REF(lb_policy, "config_change");
      GRPC_ERROR_UNREF(state_error);
      state =
          grpc_lb_policy_check_connectivity(exec_ctx, lb_policy, &state_error);
    }
    channel_arg =
        grpc_channel_args_find(lb_policy_args.args, GRPC_ARG_SERVICE_CONFIG);
    if (channel_arg != NULL) {
      GPR_ASSERT(channel_arg->type == GRPC_ARG_POINTER);
      method_params_table = grpc_method_config_table_convert(
          (grpc_method_config_table *)channel_arg->value.pointer.p,
          method_config_convert_value, &method_parameters_vtable);
    }
    grpc_channel_args_destroy(chand->resolver_result);
    chand->resolver_result = NULL;
  }

  if (lb_policy != NULL) {
    grpc_pollset_set_add_pollset_set(exec_ctx, lb_policy->interested_parties,
                                     chand->interested_parties);
  }

  gpr_mu_lock(&chand->mu);
  old_lb_policy = chand->lb_policy;
  chand->lb_policy = lb_policy;
  if (chand->method_params_table != NULL) {
    grpc_mdstr_hash_table_unref(chand->method_params_table);
  }
  chand->method_params_table = method_params_table;
  if (lb_policy != NULL) {
    grpc_exec_ctx_enqueue_list(exec_ctx, &chand->waiting_for_config_closures,
                               NULL);
  } else if (chand->resolver == NULL /* disconnected */) {
    grpc_closure_list_fail_all(
        &chand->waiting_for_config_closures,
        GRPC_ERROR_CREATE_REFERENCING("Channel disconnected", &error, 1));
    grpc_exec_ctx_enqueue_list(exec_ctx, &chand->waiting_for_config_closures,
                               NULL);
  }
  if (lb_policy != NULL && chand->exit_idle_when_lb_policy_arrives) {
    GRPC_LB_POLICY_REF(lb_policy, "exit_idle");
    exit_idle = true;
    chand->exit_idle_when_lb_policy_arrives = false;
  }

  if (error == GRPC_ERROR_NONE && chand->resolver) {
    set_channel_connectivity_state_locked(
        exec_ctx, chand, state, GRPC_ERROR_REF(state_error), "new_lb+resolver");
    if (lb_policy != NULL) {
      watch_lb_policy(exec_ctx, chand, lb_policy, state);
    }
    GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver");
    grpc_resolver_next(exec_ctx, chand->resolver, &chand->resolver_result,
                       &chand->on_resolver_result_changed);
    gpr_mu_unlock(&chand->mu);
  } else {
    if (chand->resolver != NULL) {
      grpc_resolver_shutdown(exec_ctx, chand->resolver);
      GRPC_RESOLVER_UNREF(exec_ctx, chand->resolver, "channel");
      chand->resolver = NULL;
    }
    grpc_error *refs[] = {error, state_error};
    set_channel_connectivity_state_locked(
        exec_ctx, chand, GRPC_CHANNEL_SHUTDOWN,
        GRPC_ERROR_CREATE_REFERENCING("Got config after disconnection", refs,
                                      GPR_ARRAY_SIZE(refs)),
        "resolver_gone");
    gpr_mu_unlock(&chand->mu);
  }

  if (exit_idle) {
    grpc_lb_policy_exit_idle(exec_ctx, lb_policy);
    GRPC_LB_POLICY_UNREF(exec_ctx, lb_policy, "exit_idle");
  }

  if (old_lb_policy != NULL) {
    grpc_pollset_set_del_pollset_set(
        exec_ctx, old_lb_policy->interested_parties, chand->interested_parties);
    GRPC_LB_POLICY_UNREF(exec_ctx, old_lb_policy, "channel");
  }

  if (lb_policy != NULL) {
    GRPC_LB_POLICY_UNREF(exec_ctx, lb_policy, "config_change");
  }

  GRPC_CHANNEL_STACK_UNREF(exec_ctx, chand->owning_stack, "resolver");
  GRPC_ERROR_UNREF(state_error);
}
Пример #10
0
static void test_code(void) {
  /* iomgr.h */
  grpc_iomgr_init();
  grpc_iomgr_shutdown();

  /* closure.h */
  grpc_closure closure;
  closure.cb = NULL;
  closure.cb_arg = NULL;
  closure.next_data.scratch = 0;

  grpc_closure_list closure_list = GRPC_CLOSURE_LIST_INIT;
  closure_list.head = NULL;
  closure_list.tail = NULL;

  grpc_closure_init(&closure, NULL, NULL);

  grpc_closure_create(NULL, NULL);

  grpc_closure_list_move(NULL, NULL);
  grpc_closure_list_append(NULL, NULL, GRPC_ERROR_CREATE("Foo"));
  grpc_closure_list_empty(closure_list);

  /* exec_ctx.h */
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
  grpc_exec_ctx_flush(&exec_ctx);
  grpc_exec_ctx_finish(&exec_ctx);
  grpc_exec_ctx_sched(&exec_ctx, &closure, GRPC_ERROR_CREATE("Foo"), NULL);
  grpc_exec_ctx_enqueue_list(&exec_ctx, &closure_list, NULL);

  /* endpoint.h */
  grpc_endpoint endpoint;
  grpc_endpoint_vtable vtable = {grpc_endpoint_read,
                                 grpc_endpoint_write,
                                 grpc_endpoint_get_workqueue,
                                 grpc_endpoint_add_to_pollset,
                                 grpc_endpoint_add_to_pollset_set,
                                 grpc_endpoint_shutdown,
                                 grpc_endpoint_destroy,
                                 grpc_endpoint_get_resource_user,
                                 grpc_endpoint_get_peer};
  endpoint.vtable = &vtable;

  grpc_endpoint_read(&exec_ctx, &endpoint, NULL, NULL);
  grpc_endpoint_get_peer(&endpoint);
  grpc_endpoint_write(&exec_ctx, &endpoint, NULL, NULL);
  grpc_endpoint_shutdown(&exec_ctx, &endpoint);
  grpc_endpoint_destroy(&exec_ctx, &endpoint);
  grpc_endpoint_add_to_pollset(&exec_ctx, &endpoint, NULL);
  grpc_endpoint_add_to_pollset_set(&exec_ctx, &endpoint, NULL);

  /* executor.h */
  grpc_executor_init();
  grpc_executor_push(&closure, GRPC_ERROR_CREATE("Phi"));
  grpc_executor_shutdown();

  /* pollset.h */
  grpc_pollset_size();
  grpc_pollset_init(NULL, NULL);
  grpc_pollset_shutdown(NULL, NULL, NULL);
  grpc_pollset_reset(NULL);
  grpc_pollset_destroy(NULL);
  GRPC_ERROR_UNREF(grpc_pollset_work(NULL, NULL, NULL,
                                     gpr_now(GPR_CLOCK_REALTIME),
                                     gpr_now(GPR_CLOCK_MONOTONIC)));
  GRPC_ERROR_UNREF(grpc_pollset_kick(NULL, NULL));
}
Пример #11
0
static void on_resolver_result_changed(grpc_exec_ctx *exec_ctx, void *arg,
                                       grpc_error *error) {
  channel_data *chand = arg;
  grpc_lb_policy *lb_policy = NULL;
  grpc_lb_policy *old_lb_policy;
  grpc_connectivity_state state = GRPC_CHANNEL_TRANSIENT_FAILURE;
  bool exit_idle = false;
  grpc_error *state_error = GRPC_ERROR_CREATE("No load balancing policy");

  if (chand->resolver_result != NULL) {
    lb_policy = grpc_resolver_result_get_lb_policy(chand->resolver_result);
    if (lb_policy != NULL) {
      GRPC_LB_POLICY_REF(lb_policy, "channel");
      GRPC_LB_POLICY_REF(lb_policy, "config_change");
      GRPC_ERROR_UNREF(state_error);
      state =
          grpc_lb_policy_check_connectivity(exec_ctx, lb_policy, &state_error);
    }

    grpc_resolver_result_unref(exec_ctx, chand->resolver_result);
  }

  chand->resolver_result = NULL;

  if (lb_policy != NULL) {
    grpc_pollset_set_add_pollset_set(exec_ctx, lb_policy->interested_parties,
                                     chand->interested_parties);
  }

  gpr_mu_lock(&chand->mu);
  old_lb_policy = chand->lb_policy;
  chand->lb_policy = lb_policy;
  if (lb_policy != NULL) {
    grpc_exec_ctx_enqueue_list(exec_ctx, &chand->waiting_for_config_closures,
                               NULL);
  } else if (chand->resolver == NULL /* disconnected */) {
    grpc_closure_list_fail_all(
        &chand->waiting_for_config_closures,
        GRPC_ERROR_CREATE_REFERENCING("Channel disconnected", &error, 1));
    grpc_exec_ctx_enqueue_list(exec_ctx, &chand->waiting_for_config_closures,
                               NULL);
  }
  if (lb_policy != NULL && chand->exit_idle_when_lb_policy_arrives) {
    GRPC_LB_POLICY_REF(lb_policy, "exit_idle");
    exit_idle = true;
    chand->exit_idle_when_lb_policy_arrives = false;
  }

  if (error == GRPC_ERROR_NONE && chand->resolver) {
    set_channel_connectivity_state_locked(
        exec_ctx, chand, state, GRPC_ERROR_REF(state_error), "new_lb+resolver");
    if (lb_policy != NULL) {
      watch_lb_policy(exec_ctx, chand, lb_policy, state);
    }
    GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver");
    grpc_resolver_next(exec_ctx, chand->resolver, &chand->resolver_result,
                       &chand->on_resolver_result_changed);
    gpr_mu_unlock(&chand->mu);
  } else {
    if (chand->resolver != NULL) {
      grpc_resolver_shutdown(exec_ctx, chand->resolver);
      GRPC_RESOLVER_UNREF(exec_ctx, chand->resolver, "channel");
      chand->resolver = NULL;
    }
    grpc_error *refs[] = {error, state_error};
    set_channel_connectivity_state_locked(
        exec_ctx, chand, GRPC_CHANNEL_SHUTDOWN,
        GRPC_ERROR_CREATE_REFERENCING("Got config after disconnection", refs,
                                      GPR_ARRAY_SIZE(refs)),
        "resolver_gone");
    gpr_mu_unlock(&chand->mu);
  }

  if (exit_idle) {
    grpc_lb_policy_exit_idle(exec_ctx, lb_policy);
    GRPC_LB_POLICY_UNREF(exec_ctx, lb_policy, "exit_idle");
  }

  if (old_lb_policy != NULL) {
    grpc_pollset_set_del_pollset_set(
        exec_ctx, old_lb_policy->interested_parties, chand->interested_parties);
    GRPC_LB_POLICY_UNREF(exec_ctx, old_lb_policy, "channel");
  }

  if (lb_policy != NULL) {
    GRPC_LB_POLICY_UNREF(exec_ctx, lb_policy, "config_change");
  }

  GRPC_CHANNEL_STACK_UNREF(exec_ctx, chand->owning_stack, "resolver");
  GRPC_ERROR_UNREF(state_error);
}
Пример #12
0
void grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
                       grpc_pollset_worker **worker_hdl, gpr_timespec now,
                       gpr_timespec deadline) {
    grpc_pollset_worker worker;
    *worker_hdl = &worker;

    /* pollset->mu already held */
    int added_worker = 0;
    int locked = 1;
    int queued_work = 0;
    int keep_polling = 0;
    GPR_TIMER_BEGIN("grpc_pollset_work", 0);
    /* this must happen before we (potentially) drop pollset->mu */
    worker.next = worker.prev = NULL;
    worker.reevaluate_polling_on_wakeup = 0;
    if (pollset->local_wakeup_cache != NULL) {
        worker.wakeup_fd = pollset->local_wakeup_cache;
        pollset->local_wakeup_cache = worker.wakeup_fd->next;
    } else {
        worker.wakeup_fd = gpr_malloc(sizeof(*worker.wakeup_fd));
        grpc_wakeup_fd_init(&worker.wakeup_fd->fd);
    }
    worker.kicked_specifically = 0;
    /* If there's work waiting for the pollset to be idle, and the
       pollset is idle, then do that work */
    if (!grpc_pollset_has_workers(pollset) &&
            !grpc_closure_list_empty(pollset->idle_jobs)) {
        GPR_TIMER_MARK("grpc_pollset_work.idle_jobs", 0);
        grpc_exec_ctx_enqueue_list(exec_ctx, &pollset->idle_jobs, NULL);
        goto done;
    }
    /* If we're shutting down then we don't execute any extended work */
    if (pollset->shutting_down) {
        GPR_TIMER_MARK("grpc_pollset_work.shutting_down", 0);
        goto done;
    }
    /* Give do_promote priority so we don't starve it out */
    if (pollset->in_flight_cbs) {
        GPR_TIMER_MARK("grpc_pollset_work.in_flight_cbs", 0);
        gpr_mu_unlock(&pollset->mu);
        locked = 0;
        goto done;
    }
    /* Start polling, and keep doing so while we're being asked to
       re-evaluate our pollers (this allows poll() based pollers to
       ensure they don't miss wakeups) */
    keep_polling = 1;
    while (keep_polling) {
        keep_polling = 0;
        if (!pollset->kicked_without_pollers) {
            if (!added_worker) {
                push_front_worker(pollset, &worker);
                added_worker = 1;
                gpr_tls_set(&g_current_thread_worker, (intptr_t)&worker);
            }
            gpr_tls_set(&g_current_thread_poller, (intptr_t)pollset);
            GPR_TIMER_BEGIN("maybe_work_and_unlock", 0);
            pollset->vtable->maybe_work_and_unlock(exec_ctx, pollset, &worker,
                                                   deadline, now);
            GPR_TIMER_END("maybe_work_and_unlock", 0);
            locked = 0;
            gpr_tls_set(&g_current_thread_poller, 0);
        } else {
            GPR_TIMER_MARK("grpc_pollset_work.kicked_without_pollers", 0);
            pollset->kicked_without_pollers = 0;
        }
        /* Finished execution - start cleaning up.
           Note that we may arrive here from outside the enclosing while() loop.
           In that case we won't loop though as we haven't added worker to the
           worker list, which means nobody could ask us to re-evaluate polling). */
done:
        if (!locked) {
            queued_work |= grpc_exec_ctx_flush(exec_ctx);
            gpr_mu_lock(&pollset->mu);
            locked = 1;
        }
        /* If we're forced to re-evaluate polling (via grpc_pollset_kick with
           GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP) then we land here and force
           a loop */
        if (worker.reevaluate_polling_on_wakeup) {
            worker.reevaluate_polling_on_wakeup = 0;
            pollset->kicked_without_pollers = 0;
            if (queued_work || worker.kicked_specifically) {
                /* If there's queued work on the list, then set the deadline to be
                   immediate so we get back out of the polling loop quickly */
                deadline = gpr_inf_past(GPR_CLOCK_MONOTONIC);
            }
            keep_polling = 1;
        }
    }
    if (added_worker) {
        remove_worker(pollset, &worker);
        gpr_tls_set(&g_current_thread_worker, 0);
    }
    /* release wakeup fd to the local pool */
    worker.wakeup_fd->next = pollset->local_wakeup_cache;
    pollset->local_wakeup_cache = worker.wakeup_fd;
    /* check shutdown conditions */
    if (pollset->shutting_down) {
        if (grpc_pollset_has_workers(pollset)) {
            grpc_pollset_kick(pollset, NULL);
        } else if (!pollset->called_shutdown && pollset->in_flight_cbs == 0) {
            pollset->called_shutdown = 1;
            gpr_mu_unlock(&pollset->mu);
            finish_shutdown(exec_ctx, pollset);
            grpc_exec_ctx_flush(exec_ctx);
            /* Continuing to access pollset here is safe -- it is the caller's
             * responsibility to not destroy when it has outstanding calls to
             * grpc_pollset_work.
             * TODO(dklempner): Can we refactor the shutdown logic to avoid this? */
            gpr_mu_lock(&pollset->mu);
        } else if (!grpc_closure_list_empty(pollset->idle_jobs)) {
            grpc_exec_ctx_enqueue_list(exec_ctx, &pollset->idle_jobs, NULL);
            gpr_mu_unlock(&pollset->mu);
            grpc_exec_ctx_flush(exec_ctx);
            gpr_mu_lock(&pollset->mu);
        }
    }
    *worker_hdl = NULL;
    GPR_TIMER_END("grpc_pollset_work", 0);
}
Пример #13
0
static void cc_on_config_changed(grpc_exec_ctx *exec_ctx, void *arg,
                                 bool iomgr_success) {
  channel_data *chand = arg;
  grpc_lb_policy *lb_policy = NULL;
  grpc_lb_policy *old_lb_policy;
  grpc_connectivity_state state = GRPC_CHANNEL_TRANSIENT_FAILURE;
  int exit_idle = 0;

  if (chand->incoming_configuration != NULL) {
    lb_policy = grpc_client_config_get_lb_policy(chand->incoming_configuration);
    if (lb_policy != NULL) {
      GRPC_LB_POLICY_REF(lb_policy, "channel");
      GRPC_LB_POLICY_REF(lb_policy, "config_change");
      state = grpc_lb_policy_check_connectivity(exec_ctx, lb_policy);
    }

    grpc_client_config_unref(exec_ctx, chand->incoming_configuration);
  }

  chand->incoming_configuration = NULL;

  if (lb_policy != NULL) {
    grpc_pollset_set_add_pollset_set(exec_ctx, lb_policy->interested_parties,
                                     chand->interested_parties);
  }

  gpr_mu_lock(&chand->mu_config);
  old_lb_policy = chand->lb_policy;
  chand->lb_policy = lb_policy;
  if (lb_policy != NULL || chand->resolver == NULL /* disconnected */) {
    grpc_exec_ctx_enqueue_list(exec_ctx, &chand->waiting_for_config_closures,
                               NULL);
  }
  if (lb_policy != NULL && chand->exit_idle_when_lb_policy_arrives) {
    GRPC_LB_POLICY_REF(lb_policy, "exit_idle");
    exit_idle = 1;
    chand->exit_idle_when_lb_policy_arrives = 0;
  }

  if (iomgr_success && chand->resolver) {
    set_channel_connectivity_state_locked(exec_ctx, chand, state,
                                          "new_lb+resolver");
    if (lb_policy != NULL) {
      watch_lb_policy(exec_ctx, chand, lb_policy, state);
    }
    GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver");
    grpc_resolver_next(exec_ctx, chand->resolver,
                       &chand->incoming_configuration,
                       &chand->on_config_changed);
    gpr_mu_unlock(&chand->mu_config);
  } else {
    if (chand->resolver != NULL) {
      grpc_resolver_shutdown(exec_ctx, chand->resolver);
      GRPC_RESOLVER_UNREF(exec_ctx, chand->resolver, "channel");
      chand->resolver = NULL;
    }
    set_channel_connectivity_state_locked(
        exec_ctx, chand, GRPC_CHANNEL_FATAL_FAILURE, "resolver_gone");
    gpr_mu_unlock(&chand->mu_config);
  }

  if (exit_idle) {
    grpc_lb_policy_exit_idle(exec_ctx, lb_policy);
    GRPC_LB_POLICY_UNREF(exec_ctx, lb_policy, "exit_idle");
  }

  if (old_lb_policy != NULL) {
    grpc_pollset_set_del_pollset_set(
        exec_ctx, old_lb_policy->interested_parties, chand->interested_parties);
    GRPC_LB_POLICY_UNREF(exec_ctx, old_lb_policy, "channel");
  }

  if (lb_policy != NULL) {
    GRPC_LB_POLICY_UNREF(exec_ctx, lb_policy, "config_change");
  }

  GRPC_CHANNEL_STACK_UNREF(exec_ctx, chand->owning_stack, "resolver");
}
Пример #14
0
void grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
                       grpc_pollset_worker *worker, gpr_timespec now,
                       gpr_timespec deadline) {
  /* pollset->mu already held */
  int added_worker = 0;
  int locked = 1;
  /* this must happen before we (potentially) drop pollset->mu */
  worker->next = worker->prev = NULL;
  /* TODO(ctiller): pool these */
  grpc_wakeup_fd_init(&worker->wakeup_fd);
  if (!grpc_pollset_has_workers(pollset) &&
      !grpc_closure_list_empty(pollset->idle_jobs)) {
    grpc_exec_ctx_enqueue_list(exec_ctx, &pollset->idle_jobs);
    goto done;
  }
  if (grpc_alarm_check(exec_ctx, now, &deadline)) {
    gpr_mu_unlock(&pollset->mu);
    locked = 0;
    goto done;
  }
  if (pollset->shutting_down) {
    goto done;
  }
  if (pollset->in_flight_cbs) {
    /* Give do_promote priority so we don't starve it out */
    gpr_mu_unlock(&pollset->mu);
    locked = 0;
    goto done;
  }
  if (!pollset->kicked_without_pollers) {
    push_front_worker(pollset, worker);
    added_worker = 1;
    gpr_tls_set(&g_current_thread_poller, (gpr_intptr)pollset);
    gpr_tls_set(&g_current_thread_worker, (gpr_intptr)worker);
    pollset->vtable->maybe_work_and_unlock(exec_ctx, pollset, worker, deadline,
                                           now);
    locked = 0;
    gpr_tls_set(&g_current_thread_poller, 0);
    gpr_tls_set(&g_current_thread_worker, 0);
  } else {
    pollset->kicked_without_pollers = 0;
  }
done:
  if (!locked) {
    grpc_exec_ctx_flush(exec_ctx);
    gpr_mu_lock(&pollset->mu);
    locked = 1;
  }
  grpc_wakeup_fd_destroy(&worker->wakeup_fd);
  if (added_worker) {
    remove_worker(pollset, worker);
  }
  if (pollset->shutting_down) {
    if (grpc_pollset_has_workers(pollset)) {
      grpc_pollset_kick(pollset, NULL);
    } else if (!pollset->called_shutdown && pollset->in_flight_cbs == 0) {
      pollset->called_shutdown = 1;
      gpr_mu_unlock(&pollset->mu);
      finish_shutdown(exec_ctx, pollset);
      grpc_exec_ctx_flush(exec_ctx);
      /* Continuing to access pollset here is safe -- it is the caller's
       * responsibility to not destroy when it has outstanding calls to
       * grpc_pollset_work.
       * TODO(dklempner): Can we refactor the shutdown logic to avoid this? */
      gpr_mu_lock(&pollset->mu);
    } else if (!grpc_closure_list_empty(pollset->idle_jobs)) {
      gpr_mu_unlock(&pollset->mu);
      grpc_exec_ctx_enqueue_list(exec_ctx, &pollset->idle_jobs);
      grpc_exec_ctx_flush(exec_ctx);
      gpr_mu_lock(&pollset->mu);
    }
  }
}
Пример #15
0
static void test_code(void) {
  /* iomgr.h */
  grpc_iomgr_init();
  grpc_iomgr_shutdown();

  /* closure.h */
  grpc_closure closure;
  closure.cb = NULL;
  closure.cb_arg = NULL;
  closure.final_data = 0;

  grpc_closure_list closure_list = GRPC_CLOSURE_LIST_INIT;
  closure_list.head = NULL;
  closure_list.tail = NULL;

  grpc_closure_init(&closure, NULL, NULL);

  grpc_closure_create(NULL, NULL);

  grpc_closure_list_move(NULL, NULL);
  grpc_closure_list_add(NULL, NULL, true);
  bool x = grpc_closure_list_empty(closure_list);
  grpc_closure_next(&closure);

  /* exec_ctx.h */
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
  grpc_exec_ctx_flush(&exec_ctx);
  grpc_exec_ctx_finish(&exec_ctx);
  grpc_exec_ctx_enqueue(&exec_ctx, &closure, x, NULL);
  grpc_exec_ctx_enqueue_list(&exec_ctx, &closure_list, NULL);

  /* endpoint.h */
  grpc_endpoint endpoint;
  grpc_endpoint_vtable vtable = {
      grpc_endpoint_read,           grpc_endpoint_write,
      grpc_endpoint_add_to_pollset, grpc_endpoint_add_to_pollset_set,
      grpc_endpoint_shutdown,       grpc_endpoint_destroy,
      grpc_endpoint_get_peer};
  endpoint.vtable = &vtable;

  grpc_endpoint_read(&exec_ctx, &endpoint, NULL, NULL);
  grpc_endpoint_get_peer(&endpoint);
  grpc_endpoint_write(&exec_ctx, &endpoint, NULL, NULL);
  grpc_endpoint_shutdown(&exec_ctx, &endpoint);
  grpc_endpoint_destroy(&exec_ctx, &endpoint);
  grpc_endpoint_add_to_pollset(&exec_ctx, &endpoint, NULL);
  grpc_endpoint_add_to_pollset_set(&exec_ctx, &endpoint, NULL);

  /* executor.h */
  grpc_executor_init();
  grpc_executor_enqueue(&closure, x);
  grpc_executor_shutdown();

  /* pollset.h */
  grpc_pollset_size();
  grpc_pollset_init(NULL, NULL);
  grpc_pollset_shutdown(NULL, NULL, NULL);
  grpc_pollset_reset(NULL);
  grpc_pollset_destroy(NULL);
  grpc_pollset_work(NULL, NULL, NULL, gpr_now(GPR_CLOCK_REALTIME),
                    gpr_now(GPR_CLOCK_MONOTONIC));
  grpc_pollset_kick(NULL, NULL);
}