Exemplo n.º 1
0
void grpc_resource_user_alloc(grpc_exec_ctx *exec_ctx,
                              grpc_resource_user *resource_user, size_t size,
                              grpc_closure *optional_on_done) {
  gpr_mu_lock(&resource_user->mu);
  ru_ref_by(resource_user, (gpr_atm)size);
  resource_user->free_pool -= (int64_t)size;
  if (grpc_resource_quota_trace) {
    gpr_log(GPR_DEBUG, "RQ %s %s: alloc %" PRIdPTR "; free_pool -> %" PRId64,
            resource_user->resource_quota->name, resource_user->name, size,
            resource_user->free_pool);
  }
  if (resource_user->free_pool < 0) {
    grpc_closure_list_append(&resource_user->on_allocated, optional_on_done,
                             GRPC_ERROR_NONE);
    if (!resource_user->allocating) {
      resource_user->allocating = true;
      grpc_combiner_execute(exec_ctx, resource_user->resource_quota->combiner,
                            &resource_user->allocate_closure, GRPC_ERROR_NONE,
                            false);
    }
  } else {
    grpc_exec_ctx_sched(exec_ctx, optional_on_done, GRPC_ERROR_NONE, NULL);
  }
  gpr_mu_unlock(&resource_user->mu);
}
Exemplo n.º 2
0
void grpc_tcp_server_shutdown_starting_add(grpc_tcp_server *s,
                                           grpc_closure *shutdown_starting) {
  gpr_mu_lock(&s->mu);
  grpc_closure_list_append(&s->shutdown_starting, shutdown_starting,
                           GRPC_ERROR_NONE);
  gpr_mu_unlock(&s->mu);
}
Exemplo n.º 3
0
void grpc_executor_push(grpc_closure *closure, grpc_error *error) {
  gpr_mu_lock(&g_executor.mu);
  if (g_executor.shutting_down == 0) {
    grpc_closure_list_append(&g_executor.closures, closure, error);
    maybe_spawn_locked();
  }
  gpr_mu_unlock(&g_executor.mu);
}
Exemplo n.º 4
0
void grpc_resource_user_alloc(grpc_exec_ctx *exec_ctx,
                              grpc_resource_user *resource_user, size_t size,
                              grpc_closure *optional_on_done) {
  gpr_mu_lock(&resource_user->mu);
  grpc_closure *on_done_destroy = (grpc_closure *)gpr_atm_no_barrier_load(
      &resource_user->on_done_destroy_closure);
  if (on_done_destroy != NULL) {
    /* already shutdown */
    if (grpc_resource_quota_trace) {
      gpr_log(GPR_DEBUG, "RQ %s %s: alloc %" PRIdPTR " after shutdown",
              resource_user->resource_quota->name, resource_user->name, size);
    }
    grpc_exec_ctx_sched(
        exec_ctx, optional_on_done,
        GRPC_ERROR_CREATE("Buffer pool user is already shutdown"), NULL);
    gpr_mu_unlock(&resource_user->mu);
    return;
  }
  resource_user->allocated += (int64_t)size;
  resource_user->free_pool -= (int64_t)size;
  if (grpc_resource_quota_trace) {
    gpr_log(GPR_DEBUG, "RQ %s %s: alloc %" PRIdPTR "; allocated -> %" PRId64
                       ", free_pool -> %" PRId64,
            resource_user->resource_quota->name, resource_user->name, size,
            resource_user->allocated, resource_user->free_pool);
  }
  if (resource_user->free_pool < 0) {
    grpc_closure_list_append(&resource_user->on_allocated, optional_on_done,
                             GRPC_ERROR_NONE);
    if (!resource_user->allocating) {
      resource_user->allocating = true;
      grpc_combiner_execute(exec_ctx, resource_user->resource_quota->combiner,
                            &resource_user->allocate_closure, GRPC_ERROR_NONE,
                            false);
    }
  } else {
    grpc_exec_ctx_sched(exec_ctx, optional_on_done, GRPC_ERROR_NONE, NULL);
  }
  gpr_mu_unlock(&resource_user->mu);
}
Exemplo n.º 5
0
/* Constructor for call_data */
static grpc_error *cc_init_call_elem(grpc_exec_ctx *exec_ctx,
                                     grpc_call_element *elem,
                                     grpc_call_element_args *args) {
  channel_data *chand = elem->channel_data;
  call_data *calld = elem->call_data;
  // Initialize data members.
  grpc_deadline_state_init(exec_ctx, elem, args->call_stack);
  calld->path = GRPC_MDSTR_REF(args->path);
  calld->call_start_time = args->start_time;
  calld->deadline = gpr_convert_clock_type(args->deadline, GPR_CLOCK_MONOTONIC);
  calld->wait_for_ready_from_service_config = WAIT_FOR_READY_UNSET;
  calld->cancel_error = GRPC_ERROR_NONE;
  gpr_atm_rel_store(&calld->subchannel_call, 0);
  gpr_mu_init(&calld->mu);
  calld->connected_subchannel = NULL;
  calld->waiting_ops = NULL;
  calld->waiting_ops_count = 0;
  calld->waiting_ops_capacity = 0;
  calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING;
  calld->owning_call = args->call_stack;
  calld->pollent = NULL;
  // If the resolver has already returned results, then we can access
  // the service config parameters immediately.  Otherwise, we need to
  // defer that work until the resolver returns an initial result.
  // TODO(roth): This code is almost but not quite identical to the code
  // in read_service_config() above.  It would be nice to find a way to
  // combine them, to avoid having to maintain it twice.
  gpr_mu_lock(&chand->mu);
  if (chand->lb_policy != NULL) {
    // We already have a resolver result, so check for service config.
    if (chand->method_params_table != NULL) {
      grpc_mdstr_hash_table *method_params_table =
          grpc_mdstr_hash_table_ref(chand->method_params_table);
      gpr_mu_unlock(&chand->mu);
      method_parameters *method_params =
          grpc_method_config_table_get(method_params_table, args->path);
      if (method_params != NULL) {
        if (gpr_time_cmp(method_params->timeout,
                         gpr_time_0(GPR_CLOCK_MONOTONIC)) != 0) {
          gpr_timespec per_method_deadline =
              gpr_time_add(calld->call_start_time, method_params->timeout);
          calld->deadline = gpr_time_min(calld->deadline, per_method_deadline);
        }
        if (method_params->wait_for_ready != WAIT_FOR_READY_UNSET) {
          calld->wait_for_ready_from_service_config =
              method_params->wait_for_ready;
        }
      }
      grpc_mdstr_hash_table_unref(method_params_table);
    } else {
      gpr_mu_unlock(&chand->mu);
    }
  } else {
    // We don't yet have a resolver result, so register a callback to
    // get the service config data once the resolver returns.
    // Take a reference to the call stack to be owned by the callback.
    GRPC_CALL_STACK_REF(calld->owning_call, "read_service_config");
    grpc_closure_init(&calld->read_service_config, read_service_config, elem);
    grpc_closure_list_append(&chand->waiting_for_config_closures,
                             &calld->read_service_config, GRPC_ERROR_NONE);
    gpr_mu_unlock(&chand->mu);
  }
  // Start the deadline timer with the current deadline value.  If we
  // do not yet have service config data, then the timer may be reset
  // later.
  grpc_deadline_state_start(exec_ctx, elem, calld->deadline);
  return GRPC_ERROR_NONE;
}
Exemplo n.º 6
0
static bool pick_subchannel(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
                            grpc_metadata_batch *initial_metadata,
                            uint32_t initial_metadata_flags,
                            grpc_connected_subchannel **connected_subchannel,
                            grpc_closure *on_ready, grpc_error *error) {
  GPR_TIMER_BEGIN("pick_subchannel", 0);

  channel_data *chand = elem->channel_data;
  call_data *calld = elem->call_data;
  continue_picking_args *cpa;
  grpc_closure *closure;

  GPR_ASSERT(connected_subchannel);

  gpr_mu_lock(&chand->mu);
  if (initial_metadata == NULL) {
    if (chand->lb_policy != NULL) {
      grpc_lb_policy_cancel_pick(exec_ctx, chand->lb_policy,
                                 connected_subchannel, GRPC_ERROR_REF(error));
    }
    for (closure = chand->waiting_for_config_closures.head; closure != NULL;
         closure = closure->next_data.next) {
      cpa = closure->cb_arg;
      if (cpa->connected_subchannel == connected_subchannel) {
        cpa->connected_subchannel = NULL;
        grpc_exec_ctx_sched(
            exec_ctx, cpa->on_ready,
            GRPC_ERROR_CREATE_REFERENCING("Pick cancelled", &error, 1), NULL);
      }
    }
    gpr_mu_unlock(&chand->mu);
    GPR_TIMER_END("pick_subchannel", 0);
    GRPC_ERROR_UNREF(error);
    return true;
  }
  GPR_ASSERT(error == GRPC_ERROR_NONE);
  if (chand->lb_policy != NULL) {
    grpc_lb_policy *lb_policy = chand->lb_policy;
    GRPC_LB_POLICY_REF(lb_policy, "pick_subchannel");
    gpr_mu_unlock(&chand->mu);
    // If the application explicitly set wait_for_ready, use that.
    // Otherwise, if the service config specified a value for this
    // method, use that.
    const bool wait_for_ready_set_from_api =
        initial_metadata_flags &
        GRPC_INITIAL_METADATA_WAIT_FOR_READY_EXPLICITLY_SET;
    const bool wait_for_ready_set_from_service_config =
        calld->wait_for_ready_from_service_config != WAIT_FOR_READY_UNSET;
    if (!wait_for_ready_set_from_api &&
        wait_for_ready_set_from_service_config) {
      if (calld->wait_for_ready_from_service_config == WAIT_FOR_READY_TRUE) {
        initial_metadata_flags |= GRPC_INITIAL_METADATA_WAIT_FOR_READY;
      } else {
        initial_metadata_flags &= ~GRPC_INITIAL_METADATA_WAIT_FOR_READY;
      }
    }
    // TODO(dgq): make this deadline configurable somehow.
    const grpc_lb_policy_pick_args inputs = {
        initial_metadata, initial_metadata_flags, &calld->lb_token_mdelem,
        gpr_inf_future(GPR_CLOCK_MONOTONIC)};
    const bool result = grpc_lb_policy_pick(
        exec_ctx, lb_policy, &inputs, connected_subchannel, NULL, on_ready);
    GRPC_LB_POLICY_UNREF(exec_ctx, lb_policy, "pick_subchannel");
    GPR_TIMER_END("pick_subchannel", 0);
    return result;
  }
  if (chand->resolver != NULL && !chand->started_resolving) {
    chand->started_resolving = true;
    GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver");
    grpc_resolver_next(exec_ctx, chand->resolver, &chand->resolver_result,
                       &chand->on_resolver_result_changed);
  }
  if (chand->resolver != NULL) {
    cpa = gpr_malloc(sizeof(*cpa));
    cpa->initial_metadata = initial_metadata;
    cpa->initial_metadata_flags = initial_metadata_flags;
    cpa->connected_subchannel = connected_subchannel;
    cpa->on_ready = on_ready;
    cpa->elem = elem;
    grpc_closure_init(&cpa->closure, continue_picking, cpa);
    grpc_closure_list_append(&chand->waiting_for_config_closures, &cpa->closure,
                             GRPC_ERROR_NONE);
  } else {
    grpc_exec_ctx_sched(exec_ctx, on_ready, GRPC_ERROR_CREATE("Disconnected"),
                        NULL);
  }
  gpr_mu_unlock(&chand->mu);

  GPR_TIMER_END("pick_subchannel", 0);
  return false;
}
Exemplo n.º 7
0
Arquivo: iomgr.c Projeto: gnirodi/grpc
static void test_code(void) {
  /* iomgr.h */
  grpc_iomgr_init();
  grpc_iomgr_shutdown();

  /* closure.h */
  grpc_closure closure;
  closure.cb = NULL;
  closure.cb_arg = NULL;
  closure.next_data.scratch = 0;

  grpc_closure_list closure_list = GRPC_CLOSURE_LIST_INIT;
  closure_list.head = NULL;
  closure_list.tail = NULL;

  grpc_closure_init(&closure, NULL, NULL);

  grpc_closure_create(NULL, NULL);

  grpc_closure_list_move(NULL, NULL);
  grpc_closure_list_append(NULL, NULL, GRPC_ERROR_CREATE("Foo"));
  grpc_closure_list_empty(closure_list);

  /* exec_ctx.h */
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
  grpc_exec_ctx_flush(&exec_ctx);
  grpc_exec_ctx_finish(&exec_ctx);
  grpc_exec_ctx_sched(&exec_ctx, &closure, GRPC_ERROR_CREATE("Foo"), NULL);
  grpc_exec_ctx_enqueue_list(&exec_ctx, &closure_list, NULL);

  /* endpoint.h */
  grpc_endpoint endpoint;
  grpc_endpoint_vtable vtable = {grpc_endpoint_read,
                                 grpc_endpoint_write,
                                 grpc_endpoint_get_workqueue,
                                 grpc_endpoint_add_to_pollset,
                                 grpc_endpoint_add_to_pollset_set,
                                 grpc_endpoint_shutdown,
                                 grpc_endpoint_destroy,
                                 grpc_endpoint_get_resource_user,
                                 grpc_endpoint_get_peer};
  endpoint.vtable = &vtable;

  grpc_endpoint_read(&exec_ctx, &endpoint, NULL, NULL);
  grpc_endpoint_get_peer(&endpoint);
  grpc_endpoint_write(&exec_ctx, &endpoint, NULL, NULL);
  grpc_endpoint_shutdown(&exec_ctx, &endpoint);
  grpc_endpoint_destroy(&exec_ctx, &endpoint);
  grpc_endpoint_add_to_pollset(&exec_ctx, &endpoint, NULL);
  grpc_endpoint_add_to_pollset_set(&exec_ctx, &endpoint, NULL);

  /* executor.h */
  grpc_executor_init();
  grpc_executor_push(&closure, GRPC_ERROR_CREATE("Phi"));
  grpc_executor_shutdown();

  /* pollset.h */
  grpc_pollset_size();
  grpc_pollset_init(NULL, NULL);
  grpc_pollset_shutdown(NULL, NULL, NULL);
  grpc_pollset_reset(NULL);
  grpc_pollset_destroy(NULL);
  GRPC_ERROR_UNREF(grpc_pollset_work(NULL, NULL, NULL,
                                     gpr_now(GPR_CLOCK_REALTIME),
                                     gpr_now(GPR_CLOCK_MONOTONIC)));
  GRPC_ERROR_UNREF(grpc_pollset_kick(NULL, NULL));
}
Exemplo n.º 8
0
static bool pick_subchannel(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
                            grpc_metadata_batch *initial_metadata,
                            uint32_t initial_metadata_flags,
                            grpc_connected_subchannel **connected_subchannel,
                            grpc_closure *on_ready) {
  GPR_TIMER_BEGIN("pick_subchannel", 0);

  channel_data *chand = elem->channel_data;
  call_data *calld = elem->call_data;
  continue_picking_args *cpa;
  grpc_closure *closure;

  GPR_ASSERT(connected_subchannel);

  gpr_mu_lock(&chand->mu);
  if (initial_metadata == NULL) {
    if (chand->lb_policy != NULL) {
      grpc_lb_policy_cancel_pick(exec_ctx, chand->lb_policy,
                                 connected_subchannel);
    }
    for (closure = chand->waiting_for_config_closures.head; closure != NULL;
         closure = closure->next_data.next) {
      cpa = closure->cb_arg;
      if (cpa->connected_subchannel == connected_subchannel) {
        cpa->connected_subchannel = NULL;
        grpc_exec_ctx_sched(exec_ctx, cpa->on_ready,
                            GRPC_ERROR_CREATE("Pick cancelled"), NULL);
      }
    }
    gpr_mu_unlock(&chand->mu);
    GPR_TIMER_END("pick_subchannel", 0);
    return true;
  }
  if (chand->lb_policy != NULL) {
    grpc_lb_policy *lb_policy = chand->lb_policy;
    int r;
    GRPC_LB_POLICY_REF(lb_policy, "pick_subchannel");
    gpr_mu_unlock(&chand->mu);
    const grpc_lb_policy_pick_args inputs = {calld->pollent, initial_metadata,
                                             initial_metadata_flags,
                                             &calld->lb_token_mdelem};
    r = grpc_lb_policy_pick(exec_ctx, lb_policy, &inputs, connected_subchannel,
                            NULL, on_ready);
    GRPC_LB_POLICY_UNREF(exec_ctx, lb_policy, "pick_subchannel");
    GPR_TIMER_END("pick_subchannel", 0);
    return r;
  }
  if (chand->resolver != NULL && !chand->started_resolving) {
    chand->started_resolving = true;
    GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver");
    grpc_resolver_next(exec_ctx, chand->resolver, &chand->resolver_result,
                       &chand->on_resolver_result_changed);
  }
  if (chand->resolver != NULL) {
    cpa = gpr_malloc(sizeof(*cpa));
    cpa->initial_metadata = initial_metadata;
    cpa->initial_metadata_flags = initial_metadata_flags;
    cpa->connected_subchannel = connected_subchannel;
    cpa->on_ready = on_ready;
    cpa->elem = elem;
    grpc_closure_init(&cpa->closure, continue_picking, cpa);
    grpc_closure_list_append(&chand->waiting_for_config_closures, &cpa->closure,
                             GRPC_ERROR_NONE);
  } else {
    grpc_exec_ctx_sched(exec_ctx, on_ready, GRPC_ERROR_CREATE("Disconnected"),
                        NULL);
  }
  gpr_mu_unlock(&chand->mu);

  GPR_TIMER_END("pick_subchannel", 0);
  return false;
}