Пример #1
0
grpc_call *grpc_call_create(grpc_channel *channel, grpc_completion_queue *cq,
                            const void *server_transport_data) {
  size_t i;
  grpc_channel_stack *channel_stack = grpc_channel_get_channel_stack(channel);
  grpc_call *call =
      gpr_malloc(sizeof(grpc_call) + channel_stack->call_stack_size);
  memset(call, 0, sizeof(grpc_call));
  gpr_mu_init(&call->mu);
  call->channel = channel;
  call->cq = cq;
  call->is_client = server_transport_data == NULL;
  for (i = 0; i < GRPC_IOREQ_OP_COUNT; i++) {
    call->request_set[i] = REQSET_EMPTY;
  }
  if (call->is_client) {
    call->request_set[GRPC_IOREQ_SEND_TRAILING_METADATA] = REQSET_DONE;
    call->request_set[GRPC_IOREQ_SEND_STATUS] = REQSET_DONE;
  }
  grpc_channel_internal_ref(channel);
  call->metadata_context = grpc_channel_get_metadata_context(channel);
  /* one ref is dropped in response to destroy, the other in
     stream_closed */
  gpr_ref_init(&call->internal_refcount, 2);
  grpc_call_stack_init(channel_stack, server_transport_data,
                       CALL_STACK_FROM_CALL(call));
  return call;
}
Пример #2
0
static void destroy_call(void *call, int ignored_success) {
  size_t i;
  grpc_call *c = call;
  grpc_call_stack_destroy(CALL_STACK_FROM_CALL(c));
  grpc_channel_internal_unref(c->channel);
  gpr_mu_destroy(&c->mu);
  for (i = 0; i < STATUS_SOURCE_COUNT; i++) {
    if (c->status[i].details) {
      grpc_mdstr_unref(c->status[i].details);
    }
  }
  for (i = 0; i < c->owned_metadata_count; i++) {
    grpc_mdelem_unref(c->owned_metadata[i]);
  }
  gpr_free(c->owned_metadata);
  for (i = 0; i < GPR_ARRAY_SIZE(c->buffered_metadata); i++) {
    gpr_free(c->buffered_metadata[i].metadata);
  }
  for (i = 0; i < c->send_initial_metadata_count; i++) {
    grpc_mdelem_unref(c->send_initial_metadata[i].md);
  }
  for (i = 0; i < GRPC_CONTEXT_COUNT; i++) {
    if (c->destroy_context[i]) {
      c->destroy_context[i](c->context[i]);
    }
  }
  grpc_sopb_destroy(&c->send_ops);
  grpc_sopb_destroy(&c->recv_ops);
  grpc_bbq_destroy(&c->incoming_queue);
  gpr_slice_buffer_destroy(&c->incoming_message);
  gpr_free(c);
}
Пример #3
0
static void destroy_call(grpc_exec_ctx *exec_ctx, void *call, int success) {
  size_t i;
  int ii;
  grpc_call *c = call;
  GPR_TIMER_BEGIN("destroy_call", 0);
  for (i = 0; i < 2; i++) {
    grpc_metadata_batch_destroy(
        &c->metadata_batch[1 /* is_receiving */][i /* is_initial */]);
  }
  if (c->receiving_stream != NULL) {
    grpc_byte_stream_destroy(c->receiving_stream);
  }
  grpc_call_stack_destroy(exec_ctx, CALL_STACK_FROM_CALL(c));
  GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, c->channel, "call");
  gpr_mu_destroy(&c->mu);
  for (i = 0; i < STATUS_SOURCE_COUNT; i++) {
    if (c->status[i].details) {
      GRPC_MDSTR_UNREF(c->status[i].details);
    }
  }
  for (ii = 0; ii < c->send_extra_metadata_count; ii++) {
    GRPC_MDELEM_UNREF(c->send_extra_metadata[ii].md);
  }
  for (i = 0; i < GRPC_CONTEXT_COUNT; i++) {
    if (c->context[i].destroy) {
      c->context[i].destroy(c->context[i].value);
    }
  }
  if (c->cq) {
    GRPC_CQ_INTERNAL_UNREF(c->cq, "bind");
  }
  gpr_free(c);
  GPR_TIMER_END("destroy_call", 0);
}
Пример #4
0
void grpc_call_set_completion_queue(grpc_exec_ctx *exec_ctx, grpc_call *call,
                                    grpc_completion_queue *cq) {
  GPR_ASSERT(cq);
  call->cq = cq;
  GRPC_CQ_INTERNAL_REF(cq, "bind");
  grpc_call_stack_set_pollset(exec_ctx, CALL_STACK_FROM_CALL(call),
                              grpc_cq_pollset(cq));
}
Пример #5
0
grpc_call *grpc_call_create(grpc_channel *channel, grpc_completion_queue *cq,
                            const void *server_transport_data,
                            grpc_mdelem **add_initial_metadata,
                            size_t add_initial_metadata_count,
                            gpr_timespec send_deadline) {
  size_t i;
  grpc_transport_op initial_op;
  grpc_transport_op *initial_op_ptr = NULL;
  grpc_channel_stack *channel_stack = grpc_channel_get_channel_stack(channel);
  grpc_call *call =
      gpr_malloc(sizeof(grpc_call) + channel_stack->call_stack_size);
  memset(call, 0, sizeof(grpc_call));
  gpr_mu_init(&call->mu);
  call->channel = channel;
  call->cq = cq;
  call->is_client = server_transport_data == NULL;
  for (i = 0; i < GRPC_IOREQ_OP_COUNT; i++) {
    call->request_set[i] = REQSET_EMPTY;
  }
  if (call->is_client) {
    call->request_set[GRPC_IOREQ_SEND_TRAILING_METADATA] = REQSET_DONE;
    call->request_set[GRPC_IOREQ_SEND_STATUS] = REQSET_DONE;
  }
  GPR_ASSERT(add_initial_metadata_count < MAX_SEND_INITIAL_METADATA_COUNT);
  for (i = 0; i < add_initial_metadata_count; i++) {
    call->send_initial_metadata[i].md = add_initial_metadata[i];
  }
  call->send_initial_metadata_count = add_initial_metadata_count;
  call->send_deadline = send_deadline;
  grpc_channel_internal_ref(channel);
  call->metadata_context = grpc_channel_get_metadata_context(channel);
  grpc_sopb_init(&call->send_ops);
  grpc_sopb_init(&call->recv_ops);
  gpr_slice_buffer_init(&call->incoming_message);
  /* dropped in destroy */
  gpr_ref_init(&call->internal_refcount, 1);
  /* server hack: start reads immediately so we can get initial metadata.
     TODO(ctiller): figure out a cleaner solution */
  if (!call->is_client) {
    memset(&initial_op, 0, sizeof(initial_op));
    initial_op.recv_ops = &call->recv_ops;
    initial_op.recv_state = &call->recv_state;
    initial_op.on_done_recv = call_on_done_recv;
    initial_op.recv_user_data = call;
    initial_op.context = call->context;
    call->receiving = 1;
    GRPC_CALL_INTERNAL_REF(call, "receiving");
    initial_op_ptr = &initial_op;
  }
  grpc_call_stack_init(channel_stack, server_transport_data, initial_op_ptr,
                       CALL_STACK_FROM_CALL(call));
  if (gpr_time_cmp(send_deadline, gpr_inf_future) != 0) {
    set_deadline_alarm(call, send_deadline);
  }
  return call;
}
Пример #6
0
static void destroy_call(void *call, int ignored_success) {
  size_t i;
  grpc_call *c = call;
  grpc_call_stack_destroy(CALL_STACK_FROM_CALL(c));
  grpc_channel_internal_unref(c->channel);
  gpr_mu_destroy(&c->mu);
  for (i = 0; i < STATUS_SOURCE_COUNT; i++) {
    if (c->status[i].details) {
      grpc_mdstr_unref(c->status[i].details);
    }
  }
  for (i = 0; i < c->owned_metadata_count; i++) {
    grpc_mdelem_unref(c->owned_metadata[i]);
  }
  gpr_free(c->owned_metadata);
  for (i = 0; i < GPR_ARRAY_SIZE(c->buffered_metadata); i++) {
    gpr_free(c->buffered_metadata[i].metadata);
  }
  if (c->legacy_state) {
    destroy_legacy_state(c->legacy_state);
  }
  grpc_bbq_destroy(&c->incoming_queue);
  gpr_free(c);
}
Пример #7
0
grpc_call *grpc_call_create(grpc_channel *channel, grpc_call *parent_call,
                            uint32_t propagation_mask,
                            grpc_completion_queue *cq,
                            const void *server_transport_data,
                            grpc_mdelem **add_initial_metadata,
                            size_t add_initial_metadata_count,
                            gpr_timespec send_deadline) {
  size_t i, j;
  grpc_channel_stack *channel_stack = grpc_channel_get_channel_stack(channel);
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
  grpc_call *call;
  GPR_TIMER_BEGIN("grpc_call_create", 0);
  call = gpr_malloc(sizeof(grpc_call) + channel_stack->call_stack_size);
  memset(call, 0, sizeof(grpc_call));
  gpr_mu_init(&call->mu);
  call->channel = channel;
  call->cq = cq;
  call->parent = parent_call;
  call->is_client = server_transport_data == NULL;
  if (call->is_client) {
    GPR_ASSERT(add_initial_metadata_count < MAX_SEND_EXTRA_METADATA_COUNT);
    for (i = 0; i < add_initial_metadata_count; i++) {
      call->send_extra_metadata[i].md = add_initial_metadata[i];
    }
    call->send_extra_metadata_count = (int)add_initial_metadata_count;
  } else {
    GPR_ASSERT(add_initial_metadata_count == 0);
    call->send_extra_metadata_count = 0;
  }
  for (i = 0; i < 2; i++) {
    for (j = 0; j < 2; j++) {
      call->metadata_batch[i][j].deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
    }
  }
  call->send_deadline = send_deadline;
  GRPC_CHANNEL_INTERNAL_REF(channel, "call");
  /* initial refcount dropped by grpc_call_destroy */
  grpc_call_stack_init(&exec_ctx, channel_stack, 1, destroy_call, call,
                       call->context, server_transport_data,
                       CALL_STACK_FROM_CALL(call));
  if (cq != NULL) {
    GRPC_CQ_INTERNAL_REF(cq, "bind");
    grpc_call_stack_set_pollset(&exec_ctx, CALL_STACK_FROM_CALL(call),
                                grpc_cq_pollset(cq));
  }
  if (parent_call != NULL) {
    GRPC_CALL_INTERNAL_REF(parent_call, "child");
    GPR_ASSERT(call->is_client);
    GPR_ASSERT(!parent_call->is_client);

    gpr_mu_lock(&parent_call->mu);

    if (propagation_mask & GRPC_PROPAGATE_DEADLINE) {
      send_deadline = gpr_time_min(
          gpr_convert_clock_type(send_deadline,
                                 parent_call->send_deadline.clock_type),
          parent_call->send_deadline);
    }
    /* for now GRPC_PROPAGATE_TRACING_CONTEXT *MUST* be passed with
     * GRPC_PROPAGATE_STATS_CONTEXT */
    /* TODO(ctiller): This should change to use the appropriate census start_op
     * call. */
    if (propagation_mask & GRPC_PROPAGATE_CENSUS_TRACING_CONTEXT) {
      GPR_ASSERT(propagation_mask & GRPC_PROPAGATE_CENSUS_STATS_CONTEXT);
      grpc_call_context_set(call, GRPC_CONTEXT_TRACING,
                            parent_call->context[GRPC_CONTEXT_TRACING].value,
                            NULL);
    } else {
      GPR_ASSERT(propagation_mask & GRPC_PROPAGATE_CENSUS_STATS_CONTEXT);
    }
    if (propagation_mask & GRPC_PROPAGATE_CANCELLATION) {
      call->cancellation_is_inherited = 1;
    }

    if (parent_call->first_child == NULL) {
      parent_call->first_child = call;
      call->sibling_next = call->sibling_prev = call;
    } else {
      call->sibling_next = parent_call->first_child;
      call->sibling_prev = parent_call->first_child->sibling_prev;
      call->sibling_next->sibling_prev = call->sibling_prev->sibling_next =
          call;
    }

    gpr_mu_unlock(&parent_call->mu);
  }
  if (gpr_time_cmp(send_deadline, gpr_inf_future(send_deadline.clock_type)) !=
      0) {
    set_deadline_alarm(&exec_ctx, call, send_deadline);
  }
  grpc_exec_ctx_finish(&exec_ctx);
  GPR_TIMER_END("grpc_call_create", 0);
  return call;
}
Пример #8
0
grpc_call_stack *grpc_call_get_call_stack(grpc_call *call) {
  return CALL_STACK_FROM_CALL(call);
}
Пример #9
0
void grpc_call_internal_unref(grpc_exec_ctx *exec_ctx, grpc_call *c) {
  grpc_call_stack_unref(exec_ctx, CALL_STACK_FROM_CALL(c));
}
Пример #10
0
void grpc_call_internal_ref(grpc_call *c) {
  grpc_call_stack_ref(CALL_STACK_FROM_CALL(c));
}
Пример #11
0
void grpc_call_internal_unref(grpc_exec_ctx *exec_ctx, grpc_call *c,
                              const char *reason) {
  grpc_call_stack_unref(exec_ctx, CALL_STACK_FROM_CALL(c), reason);
}
Пример #12
0
void grpc_call_internal_ref(grpc_call *c, const char *reason) {
  grpc_call_stack_ref(CALL_STACK_FROM_CALL(c), reason);
}