Ejemplo n.º 1
0
/* call-seq:
   ops = {
     GRPC::Core::CallOps::SEND_INITIAL_METADATA => <op_value>,
     GRPC::Core::CallOps::SEND_MESSAGE => <op_value>,
     ...
   }
   tag = Object.new
   timeout = 10
   call.start_batch(tag, timeout, ops)

   Start a batch of operations defined in the array ops; when complete, post a
   completion of type 'tag' to the completion queue bound to the call.

   Also waits for the batch to complete, until timeout is reached.
   The order of ops specified in the batch has no significance.
   Only one operation of each type can be active at once in any given
   batch */
static VALUE grpc_rb_call_run_batch(VALUE self, VALUE ops_hash) {
  run_batch_stack *st = NULL;
  grpc_rb_call *call = NULL;
  grpc_event ev;
  grpc_call_error err;
  VALUE result = Qnil;
  VALUE rb_write_flag = rb_ivar_get(self, id_write_flag);
  unsigned write_flag = 0;
  void *tag = (void *)&st;

  if (RTYPEDDATA_DATA(self) == NULL) {
    rb_raise(grpc_rb_eCallError, "Cannot run batch on closed call");
    return Qnil;
  }
  TypedData_Get_Struct(self, grpc_rb_call, &grpc_call_data_type, call);

  /* Validate the ops args, adding them to a ruby array */
  if (TYPE(ops_hash) != T_HASH) {
    rb_raise(rb_eTypeError, "call#run_batch: ops hash should be a hash");
    return Qnil;
  }
  if (rb_write_flag != Qnil) {
    write_flag = NUM2UINT(rb_write_flag);
  }
  st = gpr_malloc(sizeof(run_batch_stack));
  grpc_run_batch_stack_init(st, write_flag);
  grpc_run_batch_stack_fill_ops(st, ops_hash);

  /* call grpc_call_start_batch, then wait for it to complete using
   * pluck_event */
  err = grpc_call_start_batch(call->wrapped, st->ops, st->op_num, tag, NULL);
  if (err != GRPC_CALL_OK) {
    grpc_run_batch_stack_cleanup(st);
    gpr_free(st);
    rb_raise(grpc_rb_eCallError,
             "grpc_call_start_batch failed with %s (code=%d)",
             grpc_call_error_detail_of(err), err);
    return Qnil;
  }
  ev = rb_completion_queue_pluck(call->queue, tag,
                                 gpr_inf_future(GPR_CLOCK_REALTIME), NULL);
  if (!ev.success) {
    rb_raise(grpc_rb_eCallError, "call#run_batch failed somehow");
  }
  /* Build and return the BatchResult struct result,
     if there is an error, it's reflected in the status */
  result = grpc_run_batch_stack_build_result(st);
  grpc_run_batch_stack_cleanup(st);
  gpr_free(st);
  return result;
}
Ejemplo n.º 2
0
void test_succeeds(void) {
  grpc_resolved_address resolved_addr;
  struct sockaddr_in *addr = (struct sockaddr_in *)resolved_addr.addr;
  uv_tcp_t *svr_handle = gpr_malloc(sizeof(uv_tcp_t));
  int connections_complete_before;
  grpc_closure done;
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;

  gpr_log(GPR_DEBUG, "test_succeeds");

  memset(&resolved_addr, 0, sizeof(resolved_addr));
  resolved_addr.len = sizeof(struct sockaddr_in);
  addr->sin_family = AF_INET;

  /* create a dummy server */
  GPR_ASSERT(0 == uv_tcp_init(uv_default_loop(), svr_handle));
  GPR_ASSERT(0 == uv_tcp_bind(svr_handle, (struct sockaddr *)addr, 0));
  GPR_ASSERT(0 == uv_listen((uv_stream_t *)svr_handle, 1, connection_cb));

  gpr_mu_lock(g_mu);
  connections_complete_before = g_connections_complete;
  gpr_mu_unlock(g_mu);

  /* connect to it */
  GPR_ASSERT(uv_tcp_getsockname(svr_handle, (struct sockaddr *)addr,
                                (int *)&resolved_addr.len) == 0);
  GRPC_CLOSURE_INIT(&done, must_succeed, NULL, grpc_schedule_on_exec_ctx);
  grpc_tcp_client_connect(&exec_ctx, &done, &g_connecting, NULL, NULL,
                          &resolved_addr, gpr_inf_future(GPR_CLOCK_REALTIME));

  gpr_mu_lock(g_mu);

  while (g_connections_complete == connections_complete_before) {
    grpc_pollset_worker *worker = NULL;
    GPR_ASSERT(GRPC_LOG_IF_ERROR(
        "pollset_work",
        grpc_pollset_work(&exec_ctx, g_pollset, &worker,
                          gpr_now(GPR_CLOCK_MONOTONIC),
                          grpc_timeout_seconds_to_deadline(5))));
    gpr_mu_unlock(g_mu);
    grpc_exec_ctx_flush(&exec_ctx);
    gpr_mu_lock(g_mu);
  }

  // This will get cleaned up when the pollset runs again or gets shutdown
  uv_close((uv_handle_t *)svr_handle, close_cb);

  gpr_mu_unlock(g_mu);

  grpc_exec_ctx_finish(&exec_ctx);
}
Ejemplo n.º 3
0
int main(int argc, char **argv) {
  synchronizer sync;
  grpc_jwt_verifier *verifier;
  gpr_cmdline *cl;
  char *jwt = NULL;
  char *aud = NULL;
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;

  grpc_init();
  cl = gpr_cmdline_create("JWT verifier tool");
  gpr_cmdline_add_string(cl, "jwt", "JSON web token to verify", &jwt);
  gpr_cmdline_add_string(cl, "aud", "Audience for the JWT", &aud);
  gpr_cmdline_parse(cl, argc, argv);
  if (jwt == NULL || aud == NULL) {
    print_usage_and_exit(cl, argv[0]);
  }

  verifier = grpc_jwt_verifier_create(NULL, 0);

  grpc_init();

  sync.pollset = gpr_malloc(grpc_pollset_size());
  grpc_pollset_init(sync.pollset, &sync.mu);
  sync.is_done = 0;

  grpc_jwt_verifier_verify(&exec_ctx, verifier, sync.pollset, jwt, aud,
                           on_jwt_verification_done, &sync);

  gpr_mu_lock(sync.mu);
  while (!sync.is_done) {
    grpc_pollset_worker *worker = NULL;
    if (!GRPC_LOG_IF_ERROR(
            "pollset_work",
            grpc_pollset_work(&exec_ctx, sync.pollset, &worker,
                              gpr_now(GPR_CLOCK_MONOTONIC),
                              gpr_inf_future(GPR_CLOCK_MONOTONIC))))
      sync.is_done = true;
    gpr_mu_unlock(sync.mu);
    grpc_exec_ctx_flush(&exec_ctx);
    gpr_mu_lock(sync.mu);
  }
  gpr_mu_unlock(sync.mu);

  gpr_free(sync.pollset);

  grpc_jwt_verifier_destroy(&exec_ctx, verifier);
  grpc_exec_ctx_finish(&exec_ctx);
  gpr_cmdline_destroy(cl);
  grpc_shutdown();
  return !sync.success;
}
Ejemplo n.º 4
0
/* call-seq:
   server.request_call

   Requests notification of a new call on a server. */
static VALUE grpc_rb_server_request_call(VALUE self) {
  grpc_rb_server* s = NULL;
  grpc_call* call = NULL;
  grpc_event ev;
  grpc_call_error err;
  request_call_stack st;
  VALUE result;
  void* tag = (void*)&st;
  grpc_completion_queue* call_queue =
      grpc_completion_queue_create_for_pluck(NULL);
  gpr_timespec deadline;

  TypedData_Get_Struct(self, grpc_rb_server, &grpc_rb_server_data_type, s);
  if (s->wrapped == NULL) {
    rb_raise(rb_eRuntimeError, "destroyed!");
    return Qnil;
  }
  grpc_request_call_stack_init(&st);
  /* call grpc_server_request_call, then wait for it to complete using
   * pluck_event */
  err = grpc_server_request_call(s->wrapped, &call, &st.details, &st.md_ary,
                                 call_queue, s->queue, tag);
  if (err != GRPC_CALL_OK) {
    grpc_request_call_stack_cleanup(&st);
    rb_raise(grpc_rb_eCallError,
             "grpc_server_request_call failed: %s (code=%d)",
             grpc_call_error_detail_of(err), err);
    return Qnil;
  }

  ev = rb_completion_queue_pluck(s->queue, tag,
                                 gpr_inf_future(GPR_CLOCK_REALTIME), NULL);
  if (!ev.success) {
    grpc_request_call_stack_cleanup(&st);
    rb_raise(grpc_rb_eCallError, "request_call completion failed");
    return Qnil;
  }

  /* build the NewServerRpc struct result */
  deadline = gpr_convert_clock_type(st.details.deadline, GPR_CLOCK_REALTIME);
  result = rb_struct_new(
      grpc_rb_sNewServerRpc, grpc_rb_slice_to_ruby_string(st.details.method),
      grpc_rb_slice_to_ruby_string(st.details.host),
      rb_funcall(rb_cTime, id_at, 2, INT2NUM(deadline.tv_sec),
                 INT2NUM(deadline.tv_nsec / 1000)),
      grpc_rb_md_ary_to_h(&st.md_ary), grpc_rb_wrap_call(call, call_queue),
      NULL);
  grpc_request_call_stack_cleanup(&st);
  return result;
}
Ejemplo n.º 5
0
Archivo: server.c Proyecto: An-mol/grpc
static void init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
                           grpc_call_element_args *args) {
  call_data *calld = elem->call_data;
  channel_data *chand = elem->channel_data;
  memset(calld, 0, sizeof(call_data));
  calld->deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
  calld->call = grpc_call_from_top_element(elem);
  gpr_mu_init(&calld->mu_state);

  grpc_closure_init(&calld->server_on_recv_initial_metadata,
                    server_on_recv_initial_metadata, elem);

  server_ref(chand->server);
}
Ejemplo n.º 6
0
void test_fails(void) {
  grpc_resolved_address resolved_addr;
  struct sockaddr_in *addr = (struct sockaddr_in *)resolved_addr.addr;
  int connections_complete_before;
  grpc_closure done;
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;

  gpr_log(GPR_DEBUG, "test_fails");

  memset(&resolved_addr, 0, sizeof(resolved_addr));
  resolved_addr.len = sizeof(struct sockaddr_in);
  addr->sin_family = AF_INET;

  gpr_mu_lock(g_mu);
  connections_complete_before = g_connections_complete;
  gpr_mu_unlock(g_mu);

  /* connect to a broken address */
  GRPC_CLOSURE_INIT(&done, must_fail, NULL, grpc_schedule_on_exec_ctx);
  grpc_tcp_client_connect(&exec_ctx, &done, &g_connecting, NULL, NULL,
                          &resolved_addr, gpr_inf_future(GPR_CLOCK_REALTIME));

  gpr_mu_lock(g_mu);

  /* wait for the connection callback to finish */
  while (g_connections_complete == connections_complete_before) {
    grpc_pollset_worker *worker = NULL;
    gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
    gpr_timespec polling_deadline = test_deadline();
    switch (grpc_timer_check(&exec_ctx, now, &polling_deadline)) {
      case GRPC_TIMERS_FIRED:
        break;
      case GRPC_TIMERS_NOT_CHECKED:
        polling_deadline = now;
      /* fall through */
      case GRPC_TIMERS_CHECKED_AND_EMPTY:
        GPR_ASSERT(GRPC_LOG_IF_ERROR(
            "pollset_work", grpc_pollset_work(&exec_ctx, g_pollset, &worker,
                                              now, polling_deadline)));
        break;
    }
    gpr_mu_unlock(g_mu);
    grpc_exec_ctx_flush(&exec_ctx);
    gpr_mu_lock(g_mu);
  }

  gpr_mu_unlock(g_mu);
  grpc_exec_ctx_finish(&exec_ctx);
}
Ejemplo n.º 7
0
int main(int argc, char **argv) {
  int result = 0;
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
  synchronizer sync;
  grpc_channel_credentials *creds = NULL;
  char *service_url = "https://test.foo.google.com/Foo";
  grpc_auth_metadata_context context;
  gpr_cmdline *cl = gpr_cmdline_create("print_google_default_creds_token");
  gpr_cmdline_add_string(cl, "service_url",
                         "Service URL for the token request.", &service_url);
  gpr_cmdline_parse(cl, argc, argv);
  memset(&context, 0, sizeof(context));
  context.service_url = service_url;

  grpc_init();

  creds = grpc_google_default_credentials_create();
  if (creds == NULL) {
    fprintf(stderr, "\nCould not find default credentials.\n\n");
    result = 1;
    goto end;
  }

  grpc_pollset_init(&sync.pollset);
  sync.is_done = 0;

  grpc_call_credentials_get_request_metadata(
      &exec_ctx, ((grpc_composite_channel_credentials *)creds)->call_creds,
      &sync.pollset, context, on_metadata_response, &sync);

  gpr_mu_lock(GRPC_POLLSET_MU(&sync.pollset));
  while (!sync.is_done) {
    grpc_pollset_worker worker;
    grpc_pollset_work(&exec_ctx, &sync.pollset, &worker,
                      gpr_now(GPR_CLOCK_MONOTONIC),
                      gpr_inf_future(GPR_CLOCK_MONOTONIC));
    gpr_mu_unlock(GRPC_POLLSET_MU(&sync.pollset));
    grpc_exec_ctx_finish(&exec_ctx);
    gpr_mu_lock(GRPC_POLLSET_MU(&sync.pollset));
  }
  gpr_mu_unlock(GRPC_POLLSET_MU(&sync.pollset));

  grpc_channel_credentials_release(creds);

end:
  gpr_cmdline_destroy(cl);
  grpc_shutdown();
  return result;
}
Ejemplo n.º 8
0
/* Increment counter only when (m->counter%m->threads)==m->thread_id; then mark
   thread as done.  */
static void inc_by_turns(void *v /*=m*/) {
  struct test *m = v;
  gpr_int64 i;
  int id = thread_id(m);
  for (i = 0; i != m->iterations; i++) {
    gpr_mu_lock(&m->mu);
    while ((m->counter % m->threads) != id) {
      gpr_cv_wait(&m->cv, &m->mu, gpr_inf_future(GPR_CLOCK_REALTIME));
    }
    m->counter++;
    gpr_cv_broadcast(&m->cv);
    gpr_mu_unlock(&m->mu);
  }
  mark_thread_done(m);
}
Ejemplo n.º 9
0
static void fill_metadata(grpc_call_element *elem, grpc_metadata_batch *mdb) {
  call_data *calld = elem->call_data;
  channel_data *chand = elem->channel_data;
  char tmp[GPR_LTOA_MIN_BUFSIZE];
  gpr_ltoa(chand->error_code, tmp);
  calld->status.md = grpc_mdelem_from_strings("grpc-status", tmp);
  calld->details.md =
      grpc_mdelem_from_strings("grpc-message", chand->error_message);
  calld->status.prev = calld->details.next = NULL;
  calld->status.next = &calld->details;
  calld->details.prev = &calld->status;
  mdb->list.head = &calld->status;
  mdb->list.tail = &calld->details;
  mdb->deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
}
Ejemplo n.º 10
0
// Starts the deadline timer.
static void start_timer_if_needed(grpc_exec_ctx* exec_ctx,
                                  grpc_call_element* elem,
                                  gpr_timespec deadline) {
  grpc_deadline_state* deadline_state = elem->call_data;
  deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
  if (gpr_time_cmp(deadline, gpr_inf_future(GPR_CLOCK_MONOTONIC)) != 0) {
    // Take a reference to the call stack, to be owned by the timer.
    GRPC_CALL_STACK_REF(deadline_state->call_stack, "deadline_timer");
    gpr_mu_lock(&deadline_state->timer_mu);
    deadline_state->timer_pending = true;
    grpc_timer_init(exec_ctx, &deadline_state->timer, deadline, timer_callback,
                    elem, gpr_now(GPR_CLOCK_MONOTONIC));
    gpr_mu_unlock(&deadline_state->timer_mu);
  }
}
Ejemplo n.º 11
0
static void test_threading_loop(void *arg) {
  threading_shared *shared = arg;
  while (thread_wakeups < 1000000) {
    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
    grpc_pollset_worker *worker;
    gpr_mu_lock(shared->mu);
    GPR_ASSERT(GRPC_LOG_IF_ERROR(
        "pollset_work",
        grpc_pollset_work(&exec_ctx, shared->pollset, &worker,
                          gpr_now(GPR_CLOCK_MONOTONIC),
                          gpr_inf_future(GPR_CLOCK_MONOTONIC))));
    gpr_mu_unlock(shared->mu);
    grpc_exec_ctx_finish(&exec_ctx);
  }
}
Ejemplo n.º 12
0
int gpr_cv_wait(gpr_cv* cv, gpr_mu* mu, gpr_timespec abs_deadline) {
  int err = 0;
  if (gpr_time_cmp(abs_deadline, gpr_inf_future(abs_deadline.clock_type)) ==
      0) {
    err = pthread_cond_wait(cv, mu);
  } else {
    struct timespec abs_deadline_ts;
    abs_deadline = gpr_convert_clock_type(abs_deadline, GPR_CLOCK_REALTIME);
    abs_deadline_ts.tv_sec = abs_deadline.tv_sec;
    abs_deadline_ts.tv_nsec = abs_deadline.tv_nsec;
    err = pthread_cond_timedwait(cv, mu, &abs_deadline_ts);
  }
  GPR_ASSERT(err == 0 || err == ETIMEDOUT || err == EAGAIN);
  return err == ETIMEDOUT;
}
Ejemplo n.º 13
0
/* Wait until m->event is set to (void *)1, then decrement m->refcount
   m->stats_counter m->iterations times, and ensure that the last decrement
   caused the counter to reach zero, then mark thread as done.  */
static void refcheck(void *v /*=m*/) {
  struct test *m = v;
  gpr_int64 n = m->iterations * m->threads;
  gpr_int64 i;
  GPR_ASSERT(gpr_event_wait(&m->event, gpr_inf_future(GPR_CLOCK_REALTIME)) ==
             (void *)1);
  GPR_ASSERT(gpr_event_get(&m->event) == (void *)1);
  for (i = 1; i != n; i++) {
    GPR_ASSERT(!gpr_unref(&m->refcount));
    m->counter++;
  }
  GPR_ASSERT(gpr_unref(&m->refcount));
  m->counter++;
  mark_thread_done(m);
}
Ejemplo n.º 14
0
/* Wait for the signal to shutdown a client. */
static void client_wait_and_shutdown(client *cl) {
  gpr_mu_lock(g_mu);
  while (!cl->done) {
    grpc_pollset_worker *worker = NULL;
    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
    GPR_ASSERT(GRPC_LOG_IF_ERROR(
        "pollset_work",
        grpc_pollset_work(&exec_ctx, g_pollset, &worker,
                          gpr_now(GPR_CLOCK_MONOTONIC),
                          gpr_inf_future(GPR_CLOCK_MONOTONIC))));
    gpr_mu_unlock(g_mu);
    grpc_exec_ctx_finish(&exec_ctx);
    gpr_mu_lock(g_mu);
  }
  gpr_mu_unlock(g_mu);
}
Ejemplo n.º 15
0
static void init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
                           const void *server_transport_data,
                           grpc_transport_stream_op *initial_op) {
  call_data *calld = elem->call_data;
  channel_data *chand = elem->channel_data;
  memset(calld, 0, sizeof(call_data));
  calld->deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
  calld->call = grpc_call_from_top_element(elem);
  gpr_mu_init(&calld->mu_state);

  grpc_closure_init(&calld->server_on_recv, server_on_recv, elem);

  server_ref(chand->server);

  if (initial_op) server_mutate_op(elem, initial_op);
}
Ejemplo n.º 16
0
static void *grpc_rb_wait_for_event_no_gil(void *param) {
  grpc_rb_event *event = NULL;
  (void)param;
  gpr_mu_lock(&event_queue.mu);
  while ((event = grpc_rb_event_queue_dequeue()) == NULL) {
    gpr_cv_wait(&event_queue.cv,
                &event_queue.mu,
                gpr_inf_future(GPR_CLOCK_REALTIME));
    if (event_queue.abort) {
      gpr_mu_unlock(&event_queue.mu);
      return NULL;
    }
  }
  gpr_mu_unlock(&event_queue.mu);
  return event;
}
Ejemplo n.º 17
0
static void destroy_server(grpc_rb_server *server, gpr_timespec deadline) {
  grpc_event ev;
  if (server->wrapped != NULL) {
    grpc_server_shutdown_and_notify(server->wrapped, server->queue, NULL);
    ev = rb_completion_queue_pluck(server->queue, NULL, deadline, NULL);
    if (ev.type == GRPC_QUEUE_TIMEOUT) {
      grpc_server_cancel_all_calls(server->wrapped);
      rb_completion_queue_pluck(server->queue, NULL,
                                gpr_inf_future(GPR_CLOCK_REALTIME), NULL);
    }
    grpc_server_destroy(server->wrapped);
    grpc_rb_completion_queue_destroy(server->queue);
    server->wrapped = NULL;
    server->queue = NULL;
  }
}
Ejemplo n.º 18
0
int grpc_poll_deadline_to_millis_timeout(gpr_timespec deadline,
        gpr_timespec now) {
    gpr_timespec timeout;
    static const int64_t max_spin_polling_us = 10;
    if (gpr_time_cmp(deadline, gpr_inf_future(deadline.clock_type)) == 0) {
        return -1;
    }
    if (gpr_time_cmp(deadline, gpr_time_add(now, gpr_time_from_micros(
            max_spin_polling_us,
            GPR_TIMESPAN))) <= 0) {
        return 0;
    }
    timeout = gpr_time_sub(deadline, now);
    return gpr_time_to_millis(gpr_time_add(
                                  timeout, gpr_time_from_nanos(GPR_NS_PER_MS - 1, GPR_TIMESPAN)));
}
Ejemplo n.º 19
0
// Note this loop breaks out with a single call of
// "grpc_rb_event_unblocking_func".
// This assumes that a ruby call the unblocking func
// indicates process shutdown.
// In the worst case, this stops polling channel connectivity
// early and falls back to current behavior.
static void *run_poll_channels_loop_no_gil(void *arg) {
  grpc_event event;
  (void)arg;
  for (;;) {
    event = grpc_completion_queue_next(
        channel_polling_cq, gpr_inf_future(GPR_CLOCK_REALTIME), NULL);
    if (event.type == GRPC_QUEUE_SHUTDOWN) {
      break;
    }
    if (event.type == GRPC_OP_COMPLETE) {
      grpc_rb_channel_try_register_connection_polling((grpc_rb_channel *)event.tag);
    }
  }
  grpc_completion_queue_destroy(channel_polling_cq);
  gpr_log(GPR_DEBUG, "GRPC_RUBY: run_poll_channels_loop_no_gil - exit connection polling loop");
  return NULL;
}
Ejemplo n.º 20
0
static int is_stack_running_on_compute_engine(void) {
  compute_engine_detector detector;
  grpc_httpcli_request request;
  grpc_httpcli_context context;
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
  grpc_closure destroy_closure;

  /* The http call is local. If it takes more than one sec, it is for sure not
     on compute engine. */
  gpr_timespec max_detection_delay = gpr_time_from_seconds(1, GPR_TIMESPAN);

  grpc_pollset_init(&detector.pollset);
  detector.is_done = 0;
  detector.success = 0;

  memset(&request, 0, sizeof(grpc_httpcli_request));
  request.host = GRPC_COMPUTE_ENGINE_DETECTION_HOST;
  request.path = "/";

  grpc_httpcli_context_init(&context);

  grpc_httpcli_get(
      &exec_ctx, &context, &detector.pollset, &request,
      gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), max_detection_delay),
      on_compute_engine_detection_http_response, &detector);

  grpc_exec_ctx_finish(&exec_ctx);

  /* Block until we get the response. This is not ideal but this should only be
     called once for the lifetime of the process by the default credentials. */
  gpr_mu_lock(GRPC_POLLSET_MU(&detector.pollset));
  while (!detector.is_done) {
    grpc_pollset_worker worker;
    grpc_pollset_work(&exec_ctx, &detector.pollset, &worker,
                      gpr_now(GPR_CLOCK_MONOTONIC),
                      gpr_inf_future(GPR_CLOCK_MONOTONIC));
  }
  gpr_mu_unlock(GRPC_POLLSET_MU(&detector.pollset));

  grpc_httpcli_context_destroy(&context);
  grpc_closure_init(&destroy_closure, destroy_pollset, &detector.pollset);
  grpc_pollset_shutdown(&exec_ctx, &detector.pollset, &destroy_closure);
  grpc_exec_ctx_finish(&exec_ctx);

  return detector.success;
}
Ejemplo n.º 21
0
char *grpc_test_fetch_oauth2_token_with_credentials(grpc_credentials *creds) {
  oauth2_request request;
  grpc_pollset_init(&request.pollset);
  request.is_done = 0;

  grpc_credentials_get_request_metadata(creds, &request.pollset, "",
                                        on_oauth2_response, &request);

  gpr_mu_lock(GRPC_POLLSET_MU(&request.pollset));
  while (!request.is_done)
    grpc_pollset_work(&request.pollset, gpr_inf_future(GPR_CLOCK_REALTIME));
  gpr_mu_unlock(GRPC_POLLSET_MU(&request.pollset));

  grpc_pollset_shutdown(&request.pollset, do_nothing, NULL);
  grpc_pollset_destroy(&request.pollset);
  return request.token;
}
Ejemplo n.º 22
0
static void test_too_many_plucks(void) {
  grpc_event ev;
  grpc_completion_queue *cc;
  void *tags[GRPC_MAX_COMPLETION_QUEUE_PLUCKERS];
  grpc_cq_completion completions[GPR_ARRAY_SIZE(tags)];
  gpr_thd_id thread_ids[GPR_ARRAY_SIZE(tags)];
  struct thread_state thread_states[GPR_ARRAY_SIZE(tags)];
  gpr_thd_options thread_options = gpr_thd_options_default();
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
  unsigned i, j;

  LOG_TEST("test_too_many_plucks");

  cc = grpc_completion_queue_create(NULL);
  gpr_thd_options_set_joinable(&thread_options);

  for (i = 0; i < GPR_ARRAY_SIZE(tags); i++) {
    tags[i] = create_test_tag();
    for (j = 0; j < i; j++) {
      GPR_ASSERT(tags[i] != tags[j]);
    }
    thread_states[i].cc = cc;
    thread_states[i].tag = tags[i];
    gpr_thd_new(thread_ids + i, pluck_one, thread_states + i, &thread_options);
  }

  /* wait until all other threads are plucking */
  gpr_sleep_until(GRPC_TIMEOUT_MILLIS_TO_DEADLINE(1000));

  ev = grpc_completion_queue_pluck(cc, create_test_tag(),
                                   gpr_inf_future(GPR_CLOCK_REALTIME), NULL);
  GPR_ASSERT(ev.type == GRPC_QUEUE_TIMEOUT);

  for (i = 0; i < GPR_ARRAY_SIZE(tags); i++) {
    grpc_cq_begin_op(cc, tags[i]);
    grpc_cq_end_op(&exec_ctx, cc, tags[i], GRPC_ERROR_NONE,
                   do_nothing_end_completion, NULL, &completions[i]);
  }

  for (i = 0; i < GPR_ARRAY_SIZE(tags); i++) {
    gpr_thd_join(thread_ids[i]);
  }

  shutdown_and_destroy(cc);
  grpc_exec_ctx_finish(&exec_ctx);
}
Ejemplo n.º 23
0
/* Consume elements from m->q until m->threads*m->iterations are seen,
   wait an extra second to confirm that no more elements are arriving,
   then mark thread as done. */
static void consumer(void *v /*=m*/) {
  struct test *m = v;
  gpr_int64 n = m->iterations * m->threads;
  gpr_int64 i;
  int value;
  for (i = 0; i != n; i++) {
    queue_remove(&m->q, &value, gpr_inf_future(GPR_CLOCK_REALTIME));
  }
  gpr_mu_lock(&m->mu);
  m->counter = n;
  gpr_mu_unlock(&m->mu);
  GPR_ASSERT(
      !queue_remove(&m->q, &value,
                    gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
                                 gpr_time_from_micros(1000000, GPR_TIMESPAN))));
  mark_thread_done(m);
}
Ejemplo n.º 24
0
gpr_timespec gpr_time_from_nanos(long ns, gpr_clock_type type) {
  gpr_timespec result;
  result.clock_type = type;
  if (ns == LONG_MAX) {
    result = gpr_inf_future(type);
  } else if (ns == LONG_MIN) {
    result = gpr_inf_past(type);
  } else if (ns >= 0) {
    result.tv_sec = ns / GPR_NS_PER_SEC;
    result.tv_nsec = (gpr_int32)(ns - result.tv_sec * GPR_NS_PER_SEC);
  } else {
    /* Calculation carefully formulated to avoid any possible under/overflow. */
    result.tv_sec = (-(999999999 - (ns + GPR_NS_PER_SEC)) / GPR_NS_PER_SEC) - 1;
    result.tv_nsec = (gpr_int32)(ns - result.tv_sec * GPR_NS_PER_SEC);
  }
  return result;
}
Ejemplo n.º 25
0
gpr_timespec gpr_time_from_millis(long ms, gpr_clock_type type) {
  gpr_timespec result;
  result.clock_type = type;
  if (ms == LONG_MAX) {
    result = gpr_inf_future(type);
  } else if (ms == LONG_MIN) {
    result = gpr_inf_past(type);
  } else if (ms >= 0) {
    result.tv_sec = ms / 1000;
    result.tv_nsec = (gpr_int32)((ms - result.tv_sec * 1000) * 1000000);
  } else {
    /* Calculation carefully formulated to avoid any possible under/overflow. */
    result.tv_sec = (-(999 - (ms + 1000)) / 1000) - 1;
    result.tv_nsec = (gpr_int32)((ms - result.tv_sec * 1000) * 1000000);
  }
  return result;
}
Ejemplo n.º 26
0
gpr_timespec gpr_time_from_micros(int64_t us, gpr_clock_type type) {
  gpr_timespec result;
  result.clock_type = type;
  if (us == INT64_MAX) {
    result = gpr_inf_future(type);
  } else if (us == INT64_MIN) {
    result = gpr_inf_past(type);
  } else if (us >= 0) {
    result.tv_sec = us / 1000000;
    result.tv_nsec = (int32_t)((us - result.tv_sec * 1000000) * 1000);
  } else {
    /* Calculation carefully formulated to avoid any possible under/overflow. */
    result.tv_sec = (-(999999 - (us + 1000000)) / 1000000) - 1;
    result.tv_nsec = (int32_t)((us - result.tv_sec * 1000000) * 1000);
  }
  return result;
}
Ejemplo n.º 27
0
static void on_initial_header(void *tp, grpc_mdelem *md) {
  grpc_chttp2_transport_parsing *transport_parsing = tp;
  grpc_chttp2_stream_parsing *stream_parsing =
      transport_parsing->incoming_stream;

  GPR_TIMER_BEGIN("on_initial_header", 0);

  GPR_ASSERT(stream_parsing);

  GRPC_CHTTP2_IF_TRACING(gpr_log(
      GPR_INFO, "HTTP:%d:HDR:%s: %s: %s", stream_parsing->id,
      transport_parsing->is_client ? "CLI" : "SVR",
      grpc_mdstr_as_c_string(md->key), grpc_mdstr_as_c_string(md->value)));

  if (md->key == GRPC_MDSTR_GRPC_STATUS && md != GRPC_MDELEM_GRPC_STATUS_0) {
    /* TODO(ctiller): check for a status like " 0" */
    stream_parsing->seen_error = 1;
  }

  if (md->key == GRPC_MDSTR_GRPC_TIMEOUT) {
    gpr_timespec *cached_timeout = grpc_mdelem_get_user_data(md, free_timeout);
    if (!cached_timeout) {
      /* not already parsed: parse it now, and store the result away */
      cached_timeout = gpr_malloc(sizeof(gpr_timespec));
      if (!grpc_chttp2_decode_timeout(grpc_mdstr_as_c_string(md->value),
                                      cached_timeout)) {
        gpr_log(GPR_ERROR, "Ignoring bad timeout value '%s'",
                grpc_mdstr_as_c_string(md->value));
        *cached_timeout = gpr_inf_future(GPR_TIMESPAN);
      }
      grpc_mdelem_set_user_data(md, free_timeout, cached_timeout);
    }
    grpc_chttp2_incoming_metadata_buffer_set_deadline(
        &stream_parsing->metadata_buffer[0],
        gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), *cached_timeout));
    GRPC_MDELEM_UNREF(md);
  } else {
    grpc_chttp2_incoming_metadata_buffer_add(
        &stream_parsing->metadata_buffer[0], md);
  }

  grpc_chttp2_list_add_parsing_seen_stream(transport_parsing, stream_parsing);

  GPR_TIMER_END("on_initial_header", 0);
}
Ejemplo n.º 28
0
/*
  call-seq:
    server = Server.new({'arg1': 'value1'})
    ... // do stuff with server
    ...
    ... // to shutdown the server
    server.destroy()

    ... // to shutdown the server with a timeout
    server.destroy(timeout)

  Destroys server instances. */
static VALUE grpc_rb_server_destroy(int argc, VALUE *argv, VALUE self) {
  VALUE timeout = Qnil;
  gpr_timespec deadline;
  grpc_rb_server *s = NULL;

  /* "01" == 0 mandatory args, 1 (timeout) is optional */
  rb_scan_args(argc, argv, "01", &timeout);
  TypedData_Get_Struct(self, grpc_rb_server, &grpc_rb_server_data_type, s);
  if (TYPE(timeout) == T_NIL) {
    deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
  } else {
    deadline = grpc_rb_time_timeval(timeout, /* absolute time*/ 0);
  }

  destroy_server(s, deadline);

  return Qnil;
}
Ejemplo n.º 29
0
/* Takes ownership of json and buffer even in case of failure. */
grpc_jwt_claims *grpc_jwt_claims_from_json(grpc_json *json, grpc_slice buffer) {
  grpc_json *cur;
  grpc_jwt_claims *claims = gpr_malloc(sizeof(grpc_jwt_claims));
  memset(claims, 0, sizeof(grpc_jwt_claims));
  claims->json = json;
  claims->buffer = buffer;
  claims->iat = gpr_inf_past(GPR_CLOCK_REALTIME);
  claims->nbf = gpr_inf_past(GPR_CLOCK_REALTIME);
  claims->exp = gpr_inf_future(GPR_CLOCK_REALTIME);

  /* Per the spec, all fields are optional. */
  for (cur = json->child; cur != NULL; cur = cur->next) {
    if (strcmp(cur->key, "sub") == 0) {
      claims->sub = validate_string_field(cur, "sub");
      if (claims->sub == NULL) goto error;
    } else if (strcmp(cur->key, "iss") == 0) {
      claims->iss = validate_string_field(cur, "iss");
      if (claims->iss == NULL) goto error;
    } else if (strcmp(cur->key, "aud") == 0) {
      claims->aud = validate_string_field(cur, "aud");
      if (claims->aud == NULL) goto error;
    } else if (strcmp(cur->key, "jti") == 0) {
      claims->jti = validate_string_field(cur, "jti");
      if (claims->jti == NULL) goto error;
    } else if (strcmp(cur->key, "iat") == 0) {
      claims->iat = validate_time_field(cur, "iat");
      if (gpr_time_cmp(claims->iat, gpr_time_0(GPR_CLOCK_REALTIME)) == 0)
        goto error;
    } else if (strcmp(cur->key, "exp") == 0) {
      claims->exp = validate_time_field(cur, "exp");
      if (gpr_time_cmp(claims->exp, gpr_time_0(GPR_CLOCK_REALTIME)) == 0)
        goto error;
    } else if (strcmp(cur->key, "nbf") == 0) {
      claims->nbf = validate_time_field(cur, "nbf");
      if (gpr_time_cmp(claims->nbf, gpr_time_0(GPR_CLOCK_REALTIME)) == 0)
        goto error;
    }
  }
  return claims;

error:
  grpc_jwt_claims_destroy(claims);
  return NULL;
}
Ejemplo n.º 30
0
static void destroy_server(grpc_rb_server* server, gpr_timespec deadline) {
  grpc_event ev;
  // This can be started by app or implicitly by GC. Avoid a race between these.
  if (gpr_atm_full_fetch_add(&server->shutdown_started, (gpr_atm)1) == 0) {
    if (server->wrapped != NULL) {
      grpc_server_shutdown_and_notify(server->wrapped, server->queue, NULL);
      ev = rb_completion_queue_pluck(server->queue, NULL, deadline, NULL);
      if (ev.type == GRPC_QUEUE_TIMEOUT) {
        grpc_server_cancel_all_calls(server->wrapped);
        rb_completion_queue_pluck(server->queue, NULL,
                                  gpr_inf_future(GPR_CLOCK_REALTIME), NULL);
      }
      grpc_server_destroy(server->wrapped);
      grpc_rb_completion_queue_destroy(server->queue);
      server->wrapped = NULL;
      server->queue = NULL;
    }
  }
}