예제 #1
0
void grpc_deadline_state_init(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
                              grpc_call_element_args* args) {
  grpc_deadline_state* deadline_state = elem->call_data;
  memset(deadline_state, 0, sizeof(*deadline_state));
  deadline_state->call_stack = args->call_stack;
  gpr_mu_init(&deadline_state->timer_mu);
  // Deadline will always be infinite on servers, so the timer will only be
  // set on clients with a finite deadline.
  const gpr_timespec deadline =
      gpr_convert_clock_type(args->deadline, GPR_CLOCK_MONOTONIC);
  if (gpr_time_cmp(deadline, gpr_inf_future(GPR_CLOCK_MONOTONIC)) != 0) {
    // When the deadline passes, we indicate the failure by sending down
    // an op with cancel_error set.  However, we can't send down any ops
    // until after the call stack is fully initialized.  If we start the
    // timer here, we have no guarantee that the timer won't pop before
    // call stack initialization is finished.  To avoid that problem, we
    // create a closure to start the timer, and we schedule that closure
    // to be run after call stack initialization is done.
    struct start_timer_after_init_state* state = gpr_malloc(sizeof(*state));
    state->elem = elem;
    state->deadline = deadline;
    grpc_closure_init(&state->closure, start_timer_after_init, state);
    grpc_exec_ctx_sched(exec_ctx, &state->closure, GRPC_ERROR_NONE, NULL);
  }
}
예제 #2
0
int gpr_cv_wait(gpr_cv *cv, gpr_mu *mu, gpr_timespec abs_deadline) {
  int timeout = 0;
  DWORD timeout_max_ms;
  mu->locked = 0;
  if (gpr_time_cmp(abs_deadline, gpr_inf_future(abs_deadline.clock_type)) ==
      0) {
    SleepConditionVariableCS(cv, &mu->cs, INFINITE);
  } else {
    abs_deadline = gpr_convert_clock_type(abs_deadline, GPR_CLOCK_REALTIME);
    gpr_timespec now = gpr_now(abs_deadline.clock_type);
    int64_t now_ms = (int64_t)now.tv_sec * 1000 + now.tv_nsec / 1000000;
    int64_t deadline_ms =
        (int64_t)abs_deadline.tv_sec * 1000 + abs_deadline.tv_nsec / 1000000;
    if (now_ms >= deadline_ms) {
      timeout = 1;
    } else {
      if ((deadline_ms - now_ms) >= INFINITE) {
        timeout_max_ms = INFINITE - 1;
      } else {
        timeout_max_ms = (DWORD)(deadline_ms - now_ms);
      }
      timeout = (SleepConditionVariableCS(cv, &mu->cs, timeout_max_ms) == 0 &&
                 GetLastError() == ERROR_TIMEOUT);
    }
  }
  mu->locked = 1;
  return timeout;
}
예제 #3
0
파일: rb_grpc.c 프로젝트: larsonmpdx/grpc
/* Converts a wrapped time constant to a standard time. */
static VALUE grpc_rb_time_val_to_time(VALUE self) {
  gpr_timespec *time_const = NULL;
  gpr_timespec real_time;
  TypedData_Get_Struct(self, gpr_timespec, &grpc_rb_timespec_data_type,
                       time_const);
  real_time = gpr_convert_clock_type(*time_const, GPR_CLOCK_REALTIME);
  return rb_funcall(rb_cTime, id_at, 2, INT2NUM(real_time.tv_sec),
                    INT2NUM(real_time.tv_nsec));
}
예제 #4
0
static void watcher_timer_init(grpc_exec_ctx *exec_ctx, void *arg,
                               grpc_error *error_ignored) {
  watcher_timer_init_arg *wa = (watcher_timer_init_arg *)arg;

  grpc_timer_init(exec_ctx, &wa->w->alarm,
                  gpr_convert_clock_type(wa->deadline, GPR_CLOCK_MONOTONIC),
                  &wa->w->on_timeout, gpr_now(GPR_CLOCK_MONOTONIC));
  gpr_free(wa);
}
예제 #5
0
파일: rb_server.c 프로젝트: rwightman/grpc
/* call-seq:
   cq = CompletionQueue.new
   tag = Object.new
   timeout = 10
   server.request_call(cqueue, tag, timeout)

   Requests notification of a new call on a server. */
static VALUE grpc_rb_server_request_call(VALUE self, VALUE cqueue,
        VALUE tag_new, VALUE timeout) {
    grpc_rb_server *s = NULL;
    grpc_call *call = NULL;
    grpc_event ev;
    grpc_call_error err;
    request_call_stack st;
    VALUE result;
    gpr_timespec deadline;
    TypedData_Get_Struct(self, grpc_rb_server, &grpc_rb_server_data_type, s);
    if (s->wrapped == NULL) {
        rb_raise(rb_eRuntimeError, "destroyed!");
        return Qnil;
    } else {
        grpc_request_call_stack_init(&st);
        /* call grpc_server_request_call, then wait for it to complete using
         * pluck_event */
        err = grpc_server_request_call(
                  s->wrapped, &call, &st.details, &st.md_ary,
                  grpc_rb_get_wrapped_completion_queue(cqueue),
                  grpc_rb_get_wrapped_completion_queue(cqueue),
                  ROBJECT(tag_new));
        if (err != GRPC_CALL_OK) {
            grpc_request_call_stack_cleanup(&st);
            rb_raise(grpc_rb_eCallError,
                     "grpc_server_request_call failed: %s (code=%d)",
                     grpc_call_error_detail_of(err), err);
            return Qnil;
        }

        ev = grpc_rb_completion_queue_pluck_event(cqueue, tag_new, timeout);
        if (ev.type == GRPC_QUEUE_TIMEOUT) {
            grpc_request_call_stack_cleanup(&st);
            return Qnil;
        }
        if (!ev.success) {
            grpc_request_call_stack_cleanup(&st);
            rb_raise(grpc_rb_eCallError, "request_call completion failed");
            return Qnil;
        }

        /* build the NewServerRpc struct result */
        deadline = gpr_convert_clock_type(st.details.deadline, GPR_CLOCK_REALTIME);
        result = rb_struct_new(
                     grpc_rb_sNewServerRpc, rb_str_new2(st.details.method),
                     rb_str_new2(st.details.host),
                     rb_funcall(rb_cTime, id_at, 2, INT2NUM(deadline.tv_sec),
                                INT2NUM(deadline.tv_nsec)),
                     grpc_rb_md_ary_to_h(&st.md_ary), grpc_rb_wrap_call(call), NULL);
        grpc_request_call_stack_cleanup(&st);
        return result;
    }
    return Qnil;
}
예제 #6
0
파일: rb_server.c 프로젝트: royalharsh/grpc
/* call-seq:
   server.request_call

   Requests notification of a new call on a server. */
static VALUE grpc_rb_server_request_call(VALUE self) {
  grpc_rb_server *s = NULL;
  grpc_call *call = NULL;
  grpc_event ev;
  grpc_call_error err;
  request_call_stack st;
  VALUE result;
  void *tag = (void*)&st;
  grpc_completion_queue *call_queue = grpc_completion_queue_create(NULL);
  gpr_timespec deadline;

  TypedData_Get_Struct(self, grpc_rb_server, &grpc_rb_server_data_type, s);
  if (s->wrapped == NULL) {
    rb_raise(rb_eRuntimeError, "destroyed!");
    return Qnil;
  }
  grpc_request_call_stack_init(&st);
  /* call grpc_server_request_call, then wait for it to complete using
   * pluck_event */
  err = grpc_server_request_call(
      s->wrapped, &call, &st.details, &st.md_ary,
      call_queue, s->queue, tag);
  if (err != GRPC_CALL_OK) {
    grpc_request_call_stack_cleanup(&st);
    rb_raise(grpc_rb_eCallError,
             "grpc_server_request_call failed: %s (code=%d)",
             grpc_call_error_detail_of(err), err);
    return Qnil;
  }

  ev = rb_completion_queue_pluck(s->queue, tag,
                                 gpr_inf_future(GPR_CLOCK_REALTIME), NULL);
  if (!ev.success) {
    grpc_request_call_stack_cleanup(&st);
    rb_raise(grpc_rb_eCallError, "request_call completion failed");
    return Qnil;
  }



  /* build the NewServerRpc struct result */
  deadline = gpr_convert_clock_type(st.details.deadline, GPR_CLOCK_REALTIME);
  result = rb_struct_new(
      grpc_rb_sNewServerRpc, grpc_rb_slice_to_ruby_string(st.details.method),
      grpc_rb_slice_to_ruby_string(st.details.host),
      rb_funcall(rb_cTime, id_at, 2, INT2NUM(deadline.tv_sec),
                 INT2NUM(deadline.tv_nsec / 1000)),
      grpc_rb_md_ary_to_h(&st.md_ary), grpc_rb_wrap_call(call, call_queue),
      NULL);
  grpc_request_call_stack_cleanup(&st);
  return result;
}
예제 #7
0
파일: call.c 프로젝트: robottomw/grpc
static void set_deadline_alarm(grpc_exec_ctx *exec_ctx, grpc_call *call,
                               gpr_timespec deadline) {
  if (call->have_alarm) {
    gpr_log(GPR_ERROR, "Attempt to set deadline alarm twice");
    assert(0);
    return;
  }
  GRPC_CALL_INTERNAL_REF(call, "alarm");
  call->have_alarm = 1;
  call->send_deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
  grpc_timer_init(exec_ctx, &call->alarm, call->send_deadline, call_alarm, call,
                  gpr_now(GPR_CLOCK_MONOTONIC));
}
예제 #8
0
파일: alarm.c 프로젝트: endobson/grpc
void grpc_alarm_set(grpc_alarm *alarm, grpc_completion_queue *cq,
                    gpr_timespec deadline, void *tag, void *reserved) {
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;

  GRPC_CQ_INTERNAL_REF(cq, "alarm");
  alarm->cq = cq;
  alarm->tag = tag;

  GPR_ASSERT(grpc_cq_begin_op(cq, tag));
  grpc_timer_init(&exec_ctx, &alarm->alarm,
                  gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC),
                  &alarm->on_alarm, gpr_now(GPR_CLOCK_MONOTONIC));
  grpc_exec_ctx_finish(&exec_ctx);
}
예제 #9
0
grpc_event grpc_completion_queue_next(grpc_completion_queue *cc,
                                      gpr_timespec deadline, void *reserved) {
  grpc_event ret;
  grpc_pollset_worker worker;
  int first_loop = 1;
  gpr_timespec now;
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;

  GPR_ASSERT(!reserved);

  deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);

  GRPC_CQ_INTERNAL_REF(cc, "next");
  gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
  for (;;) {
    if (cc->completed_tail != &cc->completed_head) {
      grpc_cq_completion *c = (grpc_cq_completion *)cc->completed_head.next;
      cc->completed_head.next = c->next & ~(gpr_uintptr)1;
      if (c == cc->completed_tail) {
        cc->completed_tail = &cc->completed_head;
      }
      gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
      ret.type = GRPC_OP_COMPLETE;
      ret.success = c->next & 1u;
      ret.tag = c->tag;
      c->done(&exec_ctx, c->done_arg, c);
      break;
    }
    if (cc->shutdown) {
      gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
      memset(&ret, 0, sizeof(ret));
      ret.type = GRPC_QUEUE_SHUTDOWN;
      break;
    }
    now = gpr_now(GPR_CLOCK_MONOTONIC);
    if (!first_loop && gpr_time_cmp(now, deadline) >= 0) {
      gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
      memset(&ret, 0, sizeof(ret));
      ret.type = GRPC_QUEUE_TIMEOUT;
      break;
    }
    first_loop = 0;
    grpc_pollset_work(&exec_ctx, &cc->pollset, &worker, now, deadline);
  }
  GRPC_SURFACE_TRACE_RETURNED_EVENT(cc, &ret);
  GRPC_CQ_INTERNAL_UNREF(cc, "next");
  grpc_exec_ctx_finish(&exec_ctx);
  return ret;
}
예제 #10
0
int gpr_cv_wait(gpr_cv* cv, gpr_mu* mu, gpr_timespec abs_deadline) {
  int err = 0;
  if (gpr_time_cmp(abs_deadline, gpr_inf_future(abs_deadline.clock_type)) ==
      0) {
    err = pthread_cond_wait(cv, mu);
  } else {
    struct timespec abs_deadline_ts;
    abs_deadline = gpr_convert_clock_type(abs_deadline, GPR_CLOCK_REALTIME);
    abs_deadline_ts.tv_sec = abs_deadline.tv_sec;
    abs_deadline_ts.tv_nsec = abs_deadline.tv_nsec;
    err = pthread_cond_timedwait(cv, mu, &abs_deadline_ts);
  }
  GPR_ASSERT(err == 0 || err == ETIMEDOUT || err == EAGAIN);
  return err == ETIMEDOUT;
}
예제 #11
0
// Starts the deadline timer.
static void start_timer_if_needed(grpc_exec_ctx* exec_ctx,
                                  grpc_call_element* elem,
                                  gpr_timespec deadline) {
  grpc_deadline_state* deadline_state = elem->call_data;
  deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
  if (gpr_time_cmp(deadline, gpr_inf_future(GPR_CLOCK_MONOTONIC)) != 0) {
    // Take a reference to the call stack, to be owned by the timer.
    GRPC_CALL_STACK_REF(deadline_state->call_stack, "deadline_timer");
    gpr_mu_lock(&deadline_state->timer_mu);
    deadline_state->timer_pending = true;
    grpc_timer_init(exec_ctx, &deadline_state->timer, deadline, timer_callback,
                    elem, gpr_now(GPR_CLOCK_MONOTONIC));
    gpr_mu_unlock(&deadline_state->timer_mu);
  }
}
예제 #12
0
파일: alarm.c 프로젝트: Alex-duzhichao/grpc
grpc_alarm *grpc_alarm_create(grpc_completion_queue *cq, gpr_timespec deadline,
                              void *tag) {
  grpc_alarm *alarm = gpr_malloc(sizeof(grpc_alarm));
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;

  GRPC_CQ_INTERNAL_REF(cq, "alarm");
  alarm->cq = cq;
  alarm->tag = tag;

  grpc_cq_begin_op(cq, tag);
  grpc_timer_init(&exec_ctx, &alarm->alarm,
                  gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC),
                  alarm_cb, alarm, gpr_now(GPR_CLOCK_MONOTONIC));
  grpc_exec_ctx_finish(&exec_ctx);
  return alarm;
}
예제 #13
0
grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
                                       gpr_timespec deadline) {
  grpc_event ret;
  grpc_cq_completion *c;
  grpc_cq_completion *prev;

  deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);

  GRPC_CQ_INTERNAL_REF(cc, "pluck");
  gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
  for (;;) {
    prev = &cc->completed_head;
    while ((c = (grpc_cq_completion *)(prev->next & ~(gpr_uintptr)1)) !=
           &cc->completed_head) {
      if (c->tag == tag) {
        prev->next =
            (prev->next & (gpr_uintptr)1) | (c->next & ~(gpr_uintptr)1);
        if (c == cc->completed_tail) {
          cc->completed_tail = prev;
        }
        gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
        ret.type = GRPC_OP_COMPLETE;
        ret.success = c->next & 1u;
        ret.tag = c->tag;
        c->done(c->done_arg, c);
        goto done;
      }
      prev = c;
    }
    if (cc->shutdown) {
      gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
      memset(&ret, 0, sizeof(ret));
      ret.type = GRPC_QUEUE_SHUTDOWN;
      break;
    }
    if (!grpc_pollset_work(&cc->pollset, deadline)) {
      gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
      memset(&ret, 0, sizeof(ret));
      ret.type = GRPC_QUEUE_TIMEOUT;
      break;
    }
  }
done:
  GRPC_SURFACE_TRACE_RETURNED_EVENT(cc, &ret);
  GRPC_CQ_INTERNAL_UNREF(cc, "pluck");
  return ret;
}
예제 #14
0
파일: handshake.c 프로젝트: nerdrew/grpc
void grpc_do_security_handshake(
    grpc_exec_ctx *exec_ctx, tsi_handshaker *handshaker,
    grpc_security_connector *connector, bool is_client_side,
    grpc_endpoint *nonsecure_endpoint, gpr_slice_buffer *read_buffer,
    gpr_timespec deadline, grpc_security_handshake_done_cb cb,
    void *user_data) {
    grpc_security_connector_handshake_list *handshake_node;
    grpc_security_handshake *h = gpr_malloc(sizeof(grpc_security_handshake));
    memset(h, 0, sizeof(grpc_security_handshake));
    h->handshaker = handshaker;
    h->connector = GRPC_SECURITY_CONNECTOR_REF(connector, "handshake");
    h->is_client_side = is_client_side;
    h->handshake_buffer_size = GRPC_INITIAL_HANDSHAKE_BUFFER_SIZE;
    h->handshake_buffer = gpr_malloc(h->handshake_buffer_size);
    h->wrapped_endpoint = nonsecure_endpoint;
    h->user_data = user_data;
    h->cb = cb;
    gpr_ref_init(&h->refs, 2); /* timer and handshake proper each get a ref */
    grpc_closure_init(&h->on_handshake_data_sent_to_peer,
                      on_handshake_data_sent_to_peer, h);
    grpc_closure_init(&h->on_handshake_data_received_from_peer,
                      on_handshake_data_received_from_peer, h);
    gpr_slice_buffer_init(&h->left_overs);
    gpr_slice_buffer_init(&h->outgoing);
    gpr_slice_buffer_init(&h->incoming);
    if (read_buffer != NULL) {
        gpr_slice_buffer_move_into(read_buffer, &h->incoming);
        gpr_free(read_buffer);
    }
    if (!is_client_side) {
        grpc_server_security_connector *server_connector =
            (grpc_server_security_connector *)connector;
        handshake_node = gpr_malloc(sizeof(grpc_security_connector_handshake_list));
        handshake_node->handshake = h;
        gpr_mu_lock(&server_connector->mu);
        handshake_node->next = server_connector->handshaking_handshakes;
        server_connector->handshaking_handshakes = handshake_node;
        gpr_mu_unlock(&server_connector->mu);
    }
    send_handshake_bytes_to_peer(exec_ctx, h);
    grpc_timer_init(exec_ctx, &h->timer,
                    gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC),
                    on_timeout, h, gpr_now(GPR_CLOCK_MONOTONIC));
}
예제 #15
0
static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
                                    grpc_closure *closure, grpc_endpoint **ep,
                                    grpc_pollset_set *interested_parties,
                                    const grpc_channel_args *channel_args,
                                    const grpc_resolved_address *resolved_addr,
                                    gpr_timespec deadline) {
  grpc_uv_tcp_connect *connect;
  grpc_resource_quota *resource_quota = grpc_resource_quota_create(NULL);
  (void)channel_args;
  (void)interested_parties;

  if (channel_args != NULL) {
    for (size_t i = 0; i < channel_args->num_args; i++) {
      if (0 == strcmp(channel_args->args[i].key, GRPC_ARG_RESOURCE_QUOTA)) {
        grpc_resource_quota_unref_internal(exec_ctx, resource_quota);
        resource_quota = grpc_resource_quota_ref_internal(
            channel_args->args[i].value.pointer.p);
      }
    }
  }

  connect = gpr_malloc(sizeof(grpc_uv_tcp_connect));
  memset(connect, 0, sizeof(grpc_uv_tcp_connect));
  connect->closure = closure;
  connect->endpoint = ep;
  connect->tcp_handle = gpr_malloc(sizeof(uv_tcp_t));
  connect->addr_name = grpc_sockaddr_to_uri(resolved_addr);
  connect->resource_quota = resource_quota;
  uv_tcp_init(uv_default_loop(), connect->tcp_handle);
  connect->connect_req.data = connect;
  // TODO(murgatroid99): figure out what the return value here means
  uv_tcp_connect(&connect->connect_req, connect->tcp_handle,
                 (const struct sockaddr *)resolved_addr->addr,
                 uv_tc_on_connect);
  grpc_closure_init(&connect->on_alarm, uv_tc_on_alarm, connect,
                    grpc_schedule_on_exec_ctx);
  grpc_timer_init(exec_ctx, &connect->alarm,
                  gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC),
                  &connect->on_alarm, gpr_now(GPR_CLOCK_MONOTONIC));
}
예제 #16
0
grpc_event grpc_completion_queue_next(grpc_completion_queue *cc,
                                      gpr_timespec deadline) {
  grpc_event ret;

  deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);

  GRPC_CQ_INTERNAL_REF(cc, "next");
  gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
  for (;;) {
    if (cc->completed_tail != &cc->completed_head) {
      grpc_cq_completion *c = (grpc_cq_completion *)cc->completed_head.next;
      cc->completed_head.next = c->next & ~(gpr_uintptr)1;
      if (c == cc->completed_tail) {
        cc->completed_tail = &cc->completed_head;
      }
      gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
      ret.type = GRPC_OP_COMPLETE;
      ret.success = c->next & 1u;
      ret.tag = c->tag;
      c->done(c->done_arg, c);
      break;
    }
    if (cc->shutdown) {
      gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
      memset(&ret, 0, sizeof(ret));
      ret.type = GRPC_QUEUE_SHUTDOWN;
      break;
    }
    if (!grpc_pollset_work(&cc->pollset, deadline)) {
      gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
      memset(&ret, 0, sizeof(ret));
      ret.type = GRPC_QUEUE_TIMEOUT;
      break;
    }
  }
  GRPC_SURFACE_TRACE_RETURNED_EVENT(cc, &ret);
  GRPC_CQ_INTERNAL_UNREF(cc, "next");
  return ret;
}
예제 #17
0
/* Constructor for call_data */
static grpc_error *cc_init_call_elem(grpc_exec_ctx *exec_ctx,
                                     grpc_call_element *elem,
                                     grpc_call_element_args *args) {
  channel_data *chand = elem->channel_data;
  call_data *calld = elem->call_data;
  // Initialize data members.
  grpc_deadline_state_init(exec_ctx, elem, args->call_stack);
  calld->path = GRPC_MDSTR_REF(args->path);
  calld->call_start_time = args->start_time;
  calld->deadline = gpr_convert_clock_type(args->deadline, GPR_CLOCK_MONOTONIC);
  calld->wait_for_ready_from_service_config = WAIT_FOR_READY_UNSET;
  calld->cancel_error = GRPC_ERROR_NONE;
  gpr_atm_rel_store(&calld->subchannel_call, 0);
  gpr_mu_init(&calld->mu);
  calld->connected_subchannel = NULL;
  calld->waiting_ops = NULL;
  calld->waiting_ops_count = 0;
  calld->waiting_ops_capacity = 0;
  calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING;
  calld->owning_call = args->call_stack;
  calld->pollent = NULL;
  // If the resolver has already returned results, then we can access
  // the service config parameters immediately.  Otherwise, we need to
  // defer that work until the resolver returns an initial result.
  // TODO(roth): This code is almost but not quite identical to the code
  // in read_service_config() above.  It would be nice to find a way to
  // combine them, to avoid having to maintain it twice.
  gpr_mu_lock(&chand->mu);
  if (chand->lb_policy != NULL) {
    // We already have a resolver result, so check for service config.
    if (chand->method_params_table != NULL) {
      grpc_mdstr_hash_table *method_params_table =
          grpc_mdstr_hash_table_ref(chand->method_params_table);
      gpr_mu_unlock(&chand->mu);
      method_parameters *method_params =
          grpc_method_config_table_get(method_params_table, args->path);
      if (method_params != NULL) {
        if (gpr_time_cmp(method_params->timeout,
                         gpr_time_0(GPR_CLOCK_MONOTONIC)) != 0) {
          gpr_timespec per_method_deadline =
              gpr_time_add(calld->call_start_time, method_params->timeout);
          calld->deadline = gpr_time_min(calld->deadline, per_method_deadline);
        }
        if (method_params->wait_for_ready != WAIT_FOR_READY_UNSET) {
          calld->wait_for_ready_from_service_config =
              method_params->wait_for_ready;
        }
      }
      grpc_mdstr_hash_table_unref(method_params_table);
    } else {
      gpr_mu_unlock(&chand->mu);
    }
  } else {
    // We don't yet have a resolver result, so register a callback to
    // get the service config data once the resolver returns.
    // Take a reference to the call stack to be owned by the callback.
    GRPC_CALL_STACK_REF(calld->owning_call, "read_service_config");
    grpc_closure_init(&calld->read_service_config, read_service_config, elem);
    grpc_closure_list_append(&chand->waiting_for_config_closures,
                             &calld->read_service_config, GRPC_ERROR_NONE);
    gpr_mu_unlock(&chand->mu);
  }
  // Start the deadline timer with the current deadline value.  If we
  // do not yet have service config data, then the timer may be reset
  // later.
  grpc_deadline_state_start(exec_ctx, elem, calld->deadline);
  return GRPC_ERROR_NONE;
}
예제 #18
0
파일: call.c 프로젝트: AlexTalks/bazel
grpc_call *grpc_call_create(grpc_channel *channel, grpc_call *parent_call,
                            uint32_t propagation_mask,
                            grpc_completion_queue *cq,
                            const void *server_transport_data,
                            grpc_mdelem **add_initial_metadata,
                            size_t add_initial_metadata_count,
                            gpr_timespec send_deadline) {
  size_t i, j;
  grpc_channel_stack *channel_stack = grpc_channel_get_channel_stack(channel);
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
  grpc_call *call;
  GPR_TIMER_BEGIN("grpc_call_create", 0);
  call = gpr_malloc(sizeof(grpc_call) + channel_stack->call_stack_size);
  memset(call, 0, sizeof(grpc_call));
  gpr_mu_init(&call->mu);
  call->channel = channel;
  call->cq = cq;
  call->parent = parent_call;
  call->is_client = server_transport_data == NULL;
  if (call->is_client) {
    GPR_ASSERT(add_initial_metadata_count < MAX_SEND_EXTRA_METADATA_COUNT);
    for (i = 0; i < add_initial_metadata_count; i++) {
      call->send_extra_metadata[i].md = add_initial_metadata[i];
    }
    call->send_extra_metadata_count = (int)add_initial_metadata_count;
  } else {
    GPR_ASSERT(add_initial_metadata_count == 0);
    call->send_extra_metadata_count = 0;
  }
  for (i = 0; i < 2; i++) {
    for (j = 0; j < 2; j++) {
      call->metadata_batch[i][j].deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
    }
  }
  call->send_deadline = send_deadline;
  GRPC_CHANNEL_INTERNAL_REF(channel, "call");
  /* initial refcount dropped by grpc_call_destroy */
  grpc_call_stack_init(&exec_ctx, channel_stack, 1, destroy_call, call,
                       call->context, server_transport_data,
                       CALL_STACK_FROM_CALL(call));
  if (cq != NULL) {
    GRPC_CQ_INTERNAL_REF(cq, "bind");
    grpc_call_stack_set_pollset(&exec_ctx, CALL_STACK_FROM_CALL(call),
                                grpc_cq_pollset(cq));
  }
  if (parent_call != NULL) {
    GRPC_CALL_INTERNAL_REF(parent_call, "child");
    GPR_ASSERT(call->is_client);
    GPR_ASSERT(!parent_call->is_client);

    gpr_mu_lock(&parent_call->mu);

    if (propagation_mask & GRPC_PROPAGATE_DEADLINE) {
      send_deadline = gpr_time_min(
          gpr_convert_clock_type(send_deadline,
                                 parent_call->send_deadline.clock_type),
          parent_call->send_deadline);
    }
    /* for now GRPC_PROPAGATE_TRACING_CONTEXT *MUST* be passed with
     * GRPC_PROPAGATE_STATS_CONTEXT */
    /* TODO(ctiller): This should change to use the appropriate census start_op
     * call. */
    if (propagation_mask & GRPC_PROPAGATE_CENSUS_TRACING_CONTEXT) {
      GPR_ASSERT(propagation_mask & GRPC_PROPAGATE_CENSUS_STATS_CONTEXT);
      grpc_call_context_set(call, GRPC_CONTEXT_TRACING,
                            parent_call->context[GRPC_CONTEXT_TRACING].value,
                            NULL);
    } else {
      GPR_ASSERT(propagation_mask & GRPC_PROPAGATE_CENSUS_STATS_CONTEXT);
    }
    if (propagation_mask & GRPC_PROPAGATE_CANCELLATION) {
      call->cancellation_is_inherited = 1;
    }

    if (parent_call->first_child == NULL) {
      parent_call->first_child = call;
      call->sibling_next = call->sibling_prev = call;
    } else {
      call->sibling_next = parent_call->first_child;
      call->sibling_prev = parent_call->first_child->sibling_prev;
      call->sibling_next->sibling_prev = call->sibling_prev->sibling_next =
          call;
    }

    gpr_mu_unlock(&parent_call->mu);
  }
  if (gpr_time_cmp(send_deadline, gpr_inf_future(send_deadline.clock_type)) !=
      0) {
    set_deadline_alarm(&exec_ctx, call, send_deadline);
  }
  grpc_exec_ctx_finish(&exec_ctx);
  GPR_TIMER_END("grpc_call_create", 0);
  return call;
}
예제 #19
0
파일: utility.c 프로젝트: hongweiwang/grpc
double pygrpc_cast_gpr_timespec_to_double(gpr_timespec timespec) {
  timespec = gpr_convert_clock_type(timespec, GPR_CLOCK_REALTIME);
  return timespec.tv_sec + 1e-9*timespec.tv_nsec;
}
예제 #20
0
GPR_EXPORT gpr_timespec GPR_CALLTYPE gprsharp_convert_clock_type(gpr_timespec t, gpr_clock_type target_clock) {
  return gpr_convert_clock_type(t, target_clock);
}
예제 #21
0
grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
                                       gpr_timespec deadline, void *reserved) {
  grpc_event ret;
  grpc_cq_completion *c;
  grpc_cq_completion *prev;
  grpc_pollset_worker worker;
  gpr_timespec now;
  int first_loop = 1;
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;

  GPR_ASSERT(!reserved);

  deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);

  GRPC_CQ_INTERNAL_REF(cc, "pluck");
  gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
  for (;;) {
    prev = &cc->completed_head;
    while ((c = (grpc_cq_completion *)(prev->next & ~(gpr_uintptr)1)) !=
           &cc->completed_head) {
      if (c->tag == tag) {
        prev->next =
            (prev->next & (gpr_uintptr)1) | (c->next & ~(gpr_uintptr)1);
        if (c == cc->completed_tail) {
          cc->completed_tail = prev;
        }
        gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
        ret.type = GRPC_OP_COMPLETE;
        ret.success = c->next & 1u;
        ret.tag = c->tag;
        c->done(&exec_ctx, c->done_arg, c);
        goto done;
      }
      prev = c;
    }
    if (cc->shutdown) {
      gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
      memset(&ret, 0, sizeof(ret));
      ret.type = GRPC_QUEUE_SHUTDOWN;
      break;
    }
    if (!add_plucker(cc, tag, &worker)) {
      gpr_log(GPR_DEBUG,
              "Too many outstanding grpc_completion_queue_pluck calls: maximum "
              "is %d",
              GRPC_MAX_COMPLETION_QUEUE_PLUCKERS);
      gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
      memset(&ret, 0, sizeof(ret));
      /* TODO(ctiller): should we use a different result here */
      ret.type = GRPC_QUEUE_TIMEOUT;
      break;
    }
    now = gpr_now(GPR_CLOCK_MONOTONIC);
    if (!first_loop && gpr_time_cmp(now, deadline) >= 0) {
      del_plucker(cc, tag, &worker);
      gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
      memset(&ret, 0, sizeof(ret));
      ret.type = GRPC_QUEUE_TIMEOUT;
      break;
    }
    first_loop = 0;
    grpc_pollset_work(&exec_ctx, &cc->pollset, &worker, now, deadline);
    del_plucker(cc, tag, &worker);
  }
done:
  GRPC_SURFACE_TRACE_RETURNED_EVENT(cc, &ret);
  GRPC_CQ_INTERNAL_UNREF(cc, "pluck");
  grpc_exec_ctx_finish(&exec_ctx);
  return ret;
}
예제 #22
0
static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
                                    grpc_closure *closure, grpc_endpoint **ep,
                                    grpc_pollset_set *interested_parties,
                                    const grpc_channel_args *channel_args,
                                    const grpc_resolved_address *addr,
                                    gpr_timespec deadline) {
  int fd;
  grpc_dualstack_mode dsmode;
  int err;
  async_connect *ac;
  grpc_resolved_address addr6_v4mapped;
  grpc_resolved_address addr4_copy;
  grpc_fd *fdobj;
  char *name;
  char *addr_str;
  grpc_error *error;

  *ep = NULL;

  /* Use dualstack sockets where available. */
  if (grpc_sockaddr_to_v4mapped(addr, &addr6_v4mapped)) {
    addr = &addr6_v4mapped;
  }

  error = grpc_create_dualstack_socket(addr, SOCK_STREAM, 0, &dsmode, &fd);
  if (error != GRPC_ERROR_NONE) {
    grpc_closure_sched(exec_ctx, closure, error);
    return;
  }
  if (dsmode == GRPC_DSMODE_IPV4) {
    /* If we got an AF_INET socket, map the address back to IPv4. */
    GPR_ASSERT(grpc_sockaddr_is_v4mapped(addr, &addr4_copy));
    addr = &addr4_copy;
  }
  if ((error = prepare_socket(addr, fd, channel_args)) != GRPC_ERROR_NONE) {
    grpc_closure_sched(exec_ctx, closure, error);
    return;
  }

  do {
    GPR_ASSERT(addr->len < ~(socklen_t)0);
    err =
        connect(fd, (const struct sockaddr *)addr->addr, (socklen_t)addr->len);
  } while (err < 0 && errno == EINTR);

  addr_str = grpc_sockaddr_to_uri(addr);
  gpr_asprintf(&name, "tcp-client:%s", addr_str);

  fdobj = grpc_fd_create(fd, name);

  if (err >= 0) {
    *ep =
        grpc_tcp_client_create_from_fd(exec_ctx, fdobj, channel_args, addr_str);
    grpc_closure_sched(exec_ctx, closure, GRPC_ERROR_NONE);
    goto done;
  }

  if (errno != EWOULDBLOCK && errno != EINPROGRESS) {
    grpc_fd_orphan(exec_ctx, fdobj, NULL, NULL, "tcp_client_connect_error");
    grpc_closure_sched(exec_ctx, closure, GRPC_OS_ERROR(errno, "connect"));
    goto done;
  }

  grpc_pollset_set_add_fd(exec_ctx, interested_parties, fdobj);

  ac = gpr_malloc(sizeof(async_connect));
  ac->closure = closure;
  ac->ep = ep;
  ac->fd = fdobj;
  ac->interested_parties = interested_parties;
  ac->addr_str = addr_str;
  addr_str = NULL;
  gpr_mu_init(&ac->mu);
  ac->refs = 2;
  grpc_closure_init(&ac->write_closure, on_writable, ac,
                    grpc_schedule_on_exec_ctx);
  ac->channel_args = grpc_channel_args_copy(channel_args);

  if (grpc_tcp_trace) {
    gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %s: asynchronously connecting",
            ac->addr_str);
  }

  gpr_mu_lock(&ac->mu);
  grpc_closure_init(&ac->on_alarm, tc_on_alarm, ac, grpc_schedule_on_exec_ctx);
  grpc_timer_init(exec_ctx, &ac->alarm,
                  gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC),
                  &ac->on_alarm, gpr_now(GPR_CLOCK_MONOTONIC));
  grpc_fd_notify_on_write(exec_ctx, ac->fd, &ac->write_closure);
  gpr_mu_unlock(&ac->mu);

done:
  gpr_free(name);
  gpr_free(addr_str);
}
예제 #23
0
void grpc_tcp_client_connect(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
                             grpc_endpoint **ep,
                             grpc_pollset_set *interested_parties,
                             const struct sockaddr *addr, size_t addr_len,
                             gpr_timespec deadline) {
  int fd;
  grpc_dualstack_mode dsmode;
  int err;
  async_connect *ac;
  struct sockaddr_in6 addr6_v4mapped;
  struct sockaddr_in addr4_copy;
  grpc_fd *fdobj;
  char *name;
  char *addr_str;

  *ep = NULL;

  /* Use dualstack sockets where available. */
  if (grpc_sockaddr_to_v4mapped(addr, &addr6_v4mapped)) {
    addr = (const struct sockaddr *)&addr6_v4mapped;
    addr_len = sizeof(addr6_v4mapped);
  }

  fd = grpc_create_dualstack_socket(addr, SOCK_STREAM, 0, &dsmode);
  if (fd < 0) {
    gpr_log(GPR_ERROR, "Unable to create socket: %s", strerror(errno));
  }
  if (dsmode == GRPC_DSMODE_IPV4) {
    /* If we got an AF_INET socket, map the address back to IPv4. */
    GPR_ASSERT(grpc_sockaddr_is_v4mapped(addr, &addr4_copy));
    addr = (struct sockaddr *)&addr4_copy;
    addr_len = sizeof(addr4_copy);
  }
  if (!prepare_socket(addr, fd)) {
    grpc_exec_ctx_enqueue(exec_ctx, closure, 0);
    return;
  }

  do {
    GPR_ASSERT(addr_len < ~(socklen_t)0);
    err = connect(fd, addr, (socklen_t)addr_len);
  } while (err < 0 && errno == EINTR);

  addr_str = grpc_sockaddr_to_uri(addr);
  gpr_asprintf(&name, "tcp-client:%s", addr_str);

  fdobj = grpc_fd_create(fd, name);

  if (err >= 0) {
    *ep = grpc_tcp_create(fdobj, GRPC_TCP_DEFAULT_READ_SLICE_SIZE, addr_str);
    grpc_exec_ctx_enqueue(exec_ctx, closure, 1);
    goto done;
  }

  if (errno != EWOULDBLOCK && errno != EINPROGRESS) {
    gpr_log(GPR_ERROR, "connect error to '%s': %s", addr_str, strerror(errno));
    grpc_fd_orphan(exec_ctx, fdobj, NULL, "tcp_client_connect_error");
    grpc_exec_ctx_enqueue(exec_ctx, closure, 0);
    goto done;
  }

  grpc_pollset_set_add_fd(exec_ctx, interested_parties, fdobj);

  ac = gpr_malloc(sizeof(async_connect));
  ac->closure = closure;
  ac->ep = ep;
  ac->fd = fdobj;
  ac->interested_parties = interested_parties;
  ac->addr_str = addr_str;
  addr_str = NULL;
  gpr_mu_init(&ac->mu);
  ac->refs = 2;
  ac->write_closure.cb = on_writable;
  ac->write_closure.cb_arg = ac;

  if (grpc_tcp_trace) {
    gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %s: asynchronously connecting",
            ac->addr_str);
  }

  gpr_mu_lock(&ac->mu);
  grpc_alarm_init(exec_ctx, &ac->alarm,
                  gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC),
                  tc_on_alarm, ac, gpr_now(GPR_CLOCK_MONOTONIC));
  grpc_fd_notify_on_write(exec_ctx, ac->fd, &ac->write_closure);
  gpr_mu_unlock(&ac->mu);

done:
  gpr_free(name);
  gpr_free(addr_str);
}