Example #1
0
static void chttp2_init_client_socketpair(grpc_end2end_test_fixture *f,
                                          grpc_channel_args *client_args,
                                          const char *query_args) {
  GPR_ASSERT(query_args == NULL);
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
  grpc_endpoint_pair *sfd = f->fixture_data;
  grpc_transport *transport;
  sp_client_setup cs;
  cs.client_args = client_args;
  cs.f = f;
  transport =
      grpc_create_chttp2_transport(&exec_ctx, client_args, sfd->client, 1);
  client_setup_transport(&exec_ctx, &cs, transport);
  GPR_ASSERT(f->client);
  grpc_chttp2_transport_start_reading(&exec_ctx, transport, NULL);
  grpc_exec_ctx_finish(&exec_ctx);
}
Example #2
0
static void thread_main(void* arg) {
  grpc_end2end_http_proxy* proxy = arg;
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
  do {
    const gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
    const gpr_timespec deadline =
        gpr_time_add(now, gpr_time_from_seconds(1, GPR_TIMESPAN));
    grpc_pollset_worker* worker = NULL;
    gpr_mu_lock(proxy->mu);
    GRPC_LOG_IF_ERROR(
        "grpc_pollset_work",
        grpc_pollset_work(&exec_ctx, proxy->pollset, &worker, now, deadline));
    gpr_mu_unlock(proxy->mu);
    grpc_exec_ctx_flush(&exec_ctx);
  } while (!gpr_atm_acq_load(&proxy->shutdown));
  grpc_exec_ctx_finish(&exec_ctx);
}
Example #3
0
static void test_get_channel_info() {
  grpc_channel *channel =
      grpc_insecure_channel_create("ipv4:127.0.0.1:1234", NULL, NULL);
  // Ensures that resolver returns.
  grpc_channel_check_connectivity_state(channel, true /* try_to_connect */);
  // First, request no fields.  This is a no-op.
  grpc_channel_info channel_info;
  memset(&channel_info, 0, sizeof(channel_info));
  grpc_channel_get_info(channel, &channel_info);
  // Request LB policy name.
  char *lb_policy_name = NULL;
  channel_info.lb_policy_name = &lb_policy_name;
  grpc_channel_get_info(channel, &channel_info);
  GPR_ASSERT(lb_policy_name != NULL);
  GPR_ASSERT(strcmp(lb_policy_name, "pick_first") == 0);
  gpr_free(lb_policy_name);
  // Request service config, which does not exist, so we'll get nothing back.
  memset(&channel_info, 0, sizeof(channel_info));
  char *service_config_json = "dummy_string";
  channel_info.service_config_json = &service_config_json;
  grpc_channel_get_info(channel, &channel_info);
  GPR_ASSERT(service_config_json == NULL);
  // Recreate the channel such that it has a service config.
  grpc_channel_destroy(channel);
  grpc_arg arg;
  arg.type = GRPC_ARG_STRING;
  arg.key = GRPC_ARG_SERVICE_CONFIG;
  arg.value.string = "{\"loadBalancingPolicy\": \"ROUND_ROBIN\"}";
  grpc_channel_args *args = grpc_channel_args_copy_and_add(NULL, &arg, 1);
  channel = grpc_insecure_channel_create("ipv4:127.0.0.1:1234", args, NULL);
  {
    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
    grpc_channel_args_destroy(&exec_ctx, args);
    grpc_exec_ctx_finish(&exec_ctx);
  }
  // Ensures that resolver returns.
  grpc_channel_check_connectivity_state(channel, true /* try_to_connect */);
  // Now request the service config again.
  grpc_channel_get_info(channel, &channel_info);
  GPR_ASSERT(service_config_json != NULL);
  GPR_ASSERT(strcmp(service_config_json, arg.value.string) == 0);
  gpr_free(service_config_json);
  // Clean up.
  grpc_channel_destroy(channel);
}
Example #4
0
int main(int argc, char **argv) {
  synchronizer sync;
  grpc_jwt_verifier *verifier;
  gpr_cmdline *cl;
  char *jwt = NULL;
  char *aud = NULL;
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;

  cl = gpr_cmdline_create("JWT verifier tool");
  gpr_cmdline_add_string(cl, "jwt", "JSON web token to verify", &jwt);
  gpr_cmdline_add_string(cl, "aud", "Audience for the JWT", &aud);
  gpr_cmdline_parse(cl, argc, argv);
  if (jwt == NULL || aud == NULL) {
    print_usage_and_exit(cl, argv[0]);
  }

  verifier = grpc_jwt_verifier_create(NULL, 0);

  grpc_init();

  sync.pollset = gpr_malloc(grpc_pollset_size());
  grpc_pollset_init(sync.pollset, &sync.mu);
  sync.is_done = 0;

  grpc_jwt_verifier_verify(&exec_ctx, verifier, sync.pollset, jwt, aud,
                           on_jwt_verification_done, &sync);

  gpr_mu_lock(sync.mu);
  while (!sync.is_done) {
    grpc_pollset_worker *worker = NULL;
    grpc_pollset_work(&exec_ctx, sync.pollset, &worker,
                      gpr_now(GPR_CLOCK_MONOTONIC),
                      gpr_inf_future(GPR_CLOCK_MONOTONIC));
    gpr_mu_unlock(sync.mu);
    grpc_exec_ctx_finish(&exec_ctx);
    gpr_mu_lock(sync.mu);
  }
  gpr_mu_unlock(sync.mu);

  gpr_free(sync.pollset);

  grpc_jwt_verifier_destroy(verifier);
  gpr_cmdline_destroy(cl);
  return !sync.success;
}
Example #5
0
File: server.c Project: An-mol/grpc
void grpc_server_start(grpc_server *server) {
  listener *l;
  size_t i;
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;

  GRPC_API_TRACE("grpc_server_start(server=%p)", 1, (server));

  server->pollsets = gpr_malloc(sizeof(grpc_pollset *) * server->cq_count);
  for (i = 0; i < server->cq_count; i++) {
    server->pollsets[i] = grpc_cq_pollset(server->cqs[i]);
  }

  for (l = server->listeners; l; l = l->next) {
    l->start(&exec_ctx, server, l->arg, server->pollsets, server->cq_count);
  }

  grpc_exec_ctx_finish(&exec_ctx);
}
static void test_subscribe_with_failure_then_destroy(void) {
  grpc_connectivity_state_tracker tracker;
  grpc_closure *closure = grpc_closure_create(must_fail, THE_ARG);
  grpc_connectivity_state state = GRPC_CHANNEL_FATAL_FAILURE;
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
  gpr_log(GPR_DEBUG, "test_subscribe_with_failure_then_destroy");
  g_counter = 0;
  grpc_connectivity_state_init(&tracker, GRPC_CHANNEL_FATAL_FAILURE, "xxx");
  GPR_ASSERT(0 == grpc_connectivity_state_notify_on_state_change(
                      &exec_ctx, &tracker, &state, closure));
  grpc_exec_ctx_flush(&exec_ctx);
  GPR_ASSERT(state == GRPC_CHANNEL_FATAL_FAILURE);
  GPR_ASSERT(g_counter == 0);
  grpc_connectivity_state_destroy(&exec_ctx, &tracker);
  grpc_exec_ctx_finish(&exec_ctx);
  GPR_ASSERT(state == GRPC_CHANNEL_FATAL_FAILURE);
  GPR_ASSERT(g_counter == 1);
}
Example #7
0
void test_tcp_server_destroy(test_tcp_server *server) {
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
  gpr_timespec shutdown_deadline;
  grpc_closure do_nothing_cb;
  grpc_tcp_server_unref(&exec_ctx, server->tcp_server);
  grpc_closure_init(&do_nothing_cb, do_nothing, NULL);
  shutdown_deadline = gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC),
                                   gpr_time_from_seconds(5, GPR_TIMESPAN));
  while (!server->shutdown &&
         gpr_time_cmp(gpr_now(GPR_CLOCK_MONOTONIC), shutdown_deadline) < 0) {
    test_tcp_server_poll(server, 1);
  }
  grpc_pollset_shutdown(&exec_ctx, server->pollset, &do_nothing_cb);
  grpc_exec_ctx_finish(&exec_ctx);
  grpc_pollset_destroy(server->pollset);
  gpr_free(server->pollset);
  grpc_shutdown();
}
Example #8
0
void chttp2_init_server_load_reporting(grpc_end2end_test_fixture *f,
                                       grpc_channel_args *server_args) {
  load_reporting_fixture_data *ffd = f->fixture_data;
  grpc_arg arg = grpc_load_reporting_enable_arg();
  if (f->server) {
    grpc_server_destroy(f->server);
  }
  server_args = grpc_channel_args_copy_and_add(server_args, &arg, 1);
  f->server = grpc_server_create(server_args, NULL);
  {
    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
    grpc_channel_args_destroy(&exec_ctx, server_args);
    grpc_exec_ctx_finish(&exec_ctx);
  }
  grpc_server_register_completion_queue(f->server, f->cq, NULL);
  GPR_ASSERT(grpc_server_add_insecure_http2_port(f->server, ffd->localaddr));
  grpc_server_start(f->server);
}
Example #9
0
static void test_parse_ipv4(const char *uri_text, const char *host,
                            unsigned short port) {
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
  grpc_uri *uri = grpc_uri_parse(&exec_ctx, uri_text, 0);
  grpc_resolved_address addr;
  char ntop_buf[INET_ADDRSTRLEN];

  GPR_ASSERT(1 == parse_ipv4(uri, &addr));
  struct sockaddr_in *addr_in = (struct sockaddr_in *)addr.addr;
  GPR_ASSERT(AF_INET == addr_in->sin_family);
  GPR_ASSERT(NULL != grpc_inet_ntop(AF_INET, &addr_in->sin_addr, ntop_buf,
                                    sizeof(ntop_buf)));
  GPR_ASSERT(0 == strcmp(ntop_buf, host));
  GPR_ASSERT(ntohs(addr_in->sin_port) == port);

  grpc_uri_destroy(uri);
  grpc_exec_ctx_finish(&exec_ctx);
}
Example #10
0
grpc_call_error grpc_call_start_batch(grpc_call *call, const grpc_op *ops,
                                      size_t nops, void *tag, void *reserved) {
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
  grpc_call_error err;

  GRPC_API_TRACE(
      "grpc_call_start_batch(call=%p, ops=%p, nops=%lu, tag=%p, reserved=%p)",
      5, (call, ops, (unsigned long)nops, tag, reserved));

  if (reserved != NULL) {
    err = GRPC_CALL_ERROR;
  } else {
    err = call_start_batch(&exec_ctx, call, ops, nops, tag, 0);
  }

  grpc_exec_ctx_finish(&exec_ctx);
  return err;
}
Example #11
0
static void test_no_op_with_port(void) {
  g_number_of_orphan_calls = 0;
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
  struct sockaddr_in addr;
  grpc_udp_server *s = grpc_udp_server_create();
  LOG_TEST("test_no_op_with_port");

  memset(&addr, 0, sizeof(addr));
  addr.sin_family = AF_INET;
  GPR_ASSERT(grpc_udp_server_add_port(s, (struct sockaddr *)&addr, sizeof(addr),
                                      on_read, on_fd_orphaned));

  grpc_udp_server_destroy(&exec_ctx, s, NULL);
  grpc_exec_ctx_finish(&exec_ctx);

  /* The server had a single FD, which should have been orphaned. */
  GPR_ASSERT(g_number_of_orphan_calls == 1);
}
Example #12
0
static void ru_slice_unref(void *p) {
  ru_slice_refcount *rc = p;
  if (gpr_unref(&rc->refs)) {
    /* TODO(ctiller): this is dangerous, but I think safe for now:
       we have no guarantee here that we're at a safe point for creating an
       execution context, but we have no way of writing this code otherwise.
       In the future: consider lifting grpc_slice to grpc, and offering an
       internal_{ref,unref} pair that is execution context aware.
       Alternatively,
       make exec_ctx be thread local and 'do the right thing' (whatever that
       is)
       if NULL */
    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
    grpc_resource_user_free(&exec_ctx, rc->resource_user, rc->size);
    grpc_exec_ctx_finish(&exec_ctx);
    gpr_free(rc);
  }
}
Example #13
0
int main(int argc, char **argv) {
  grpc_closure destroyed;
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
  grpc_test_init(argc, argv);
  grpc_init();
  g_pollset = gpr_zalloc(grpc_pollset_size());
  grpc_pollset_init(g_pollset, &g_mu);
  grpc_endpoint_tests(configs[0], g_pollset, g_mu);
  run_tests();
  grpc_closure_init(&destroyed, destroy_pollset, g_pollset,
                    grpc_schedule_on_exec_ctx);
  grpc_pollset_shutdown(&exec_ctx, g_pollset, &destroyed);
  grpc_exec_ctx_finish(&exec_ctx);
  grpc_shutdown();
  gpr_free(g_pollset);

  return 0;
}
Example #14
0
static grpc_endpoint_test_fixture create_fixture_tcp_socketpair(
    size_t slice_size) {
  int sv[2];
  grpc_endpoint_test_fixture f;
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;

  create_sockets(sv);
  f.client_ep = grpc_tcp_create(grpc_fd_create(sv[0], "fixture:client"),
                                slice_size, "test");
  f.server_ep = grpc_tcp_create(grpc_fd_create(sv[1], "fixture:server"),
                                slice_size, "test");
  grpc_endpoint_add_to_pollset(&exec_ctx, f.client_ep, &g_pollset);
  grpc_endpoint_add_to_pollset(&exec_ctx, f.server_ep, &g_pollset);

  grpc_exec_ctx_finish(&exec_ctx);

  return f;
}
Example #15
0
void grpc_channel_ping(grpc_channel *channel, grpc_completion_queue *cq,
                       void *tag, void *reserved) {
    grpc_transport_op op;
    ping_result *pr = gpr_malloc(sizeof(*pr));
    grpc_channel_element *top_elem =
        grpc_channel_stack_element(grpc_channel_get_channel_stack(channel), 0);
    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
    GPR_ASSERT(reserved == NULL);
    memset(&op, 0, sizeof(op));
    pr->tag = tag;
    pr->cq = cq;
    grpc_closure_init(&pr->closure, ping_done, pr);
    op.send_ping = &pr->closure;
    op.bind_pollset = grpc_cq_pollset(cq);
    grpc_cq_begin_op(cq, tag);
    top_elem->filter->start_transport_op(&exec_ctx, top_elem, &op);
    grpc_exec_ctx_finish(&exec_ctx);
}
Example #16
0
static void on_connect(uv_stream_t *server, int status) {
  grpc_tcp_listener *sp = (grpc_tcp_listener *)server->data;
  uv_tcp_t *client;
  grpc_endpoint *ep = NULL;
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
  grpc_resolved_address peer_name;
  char *peer_name_string;
  int err;

  if (status < 0) {
    gpr_log(GPR_INFO, "Skipping on_accept due to error: %s",
            uv_strerror(status));
    return;
  }

  client = gpr_malloc(sizeof(uv_tcp_t));
  uv_tcp_init(uv_default_loop(), client);
  // UV documentation says this is guaranteed to succeed
  uv_accept((uv_stream_t *)server, (uv_stream_t *)client);
  // If the server has not been started, we discard incoming connections
  if (sp->server->on_accept_cb == NULL) {
    uv_close((uv_handle_t *)client, accepted_connection_close_cb);
  } else {
    peer_name_string = NULL;
    memset(&peer_name, 0, sizeof(grpc_resolved_address));
    peer_name.len = sizeof(struct sockaddr_storage);
    err = uv_tcp_getpeername(client, (struct sockaddr *)&peer_name.addr,
                             (int *)&peer_name.len);
    if (err == 0) {
      peer_name_string = grpc_sockaddr_to_uri(&peer_name);
    } else {
      gpr_log(GPR_INFO, "uv_tcp_getpeername error: %s", uv_strerror(status));
    }
    ep = grpc_tcp_create(client, sp->server->resource_quota, peer_name_string);
    // Create acceptor.
    grpc_tcp_server_acceptor *acceptor = gpr_malloc(sizeof(*acceptor));
    acceptor->from_server = sp->server;
    acceptor->port_index = sp->port_index;
    acceptor->fd_index = 0;
    sp->server->on_accept_cb(&exec_ctx, sp->server->on_accept_cb_arg, ep, NULL,
                             acceptor);
    grpc_exec_ctx_finish(&exec_ctx);
  }
}
Example #17
0
/* called from application code */
static void on_md_processing_done(
    void *user_data, const grpc_metadata *consumed_md, size_t num_consumed_md,
    const grpc_metadata *response_md, size_t num_response_md,
    grpc_status_code status, const char *error_details) {
  grpc_call_element *elem = user_data;
  call_data *calld = elem->call_data;
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;

  /* TODO(jboeuf): Implement support for response_md. */
  if (response_md != NULL && num_response_md > 0) {
    gpr_log(GPR_INFO,
            "response_md in auth metadata processing not supported for now. "
            "Ignoring...");
  }

  if (status == GRPC_STATUS_OK) {
    calld->consumed_md = consumed_md;
    calld->num_consumed_md = num_consumed_md;
    grpc_metadata_batch_filter(calld->recv_initial_metadata, remove_consumed_md,
                               elem);
    grpc_metadata_array_destroy(&calld->md);
    calld->on_done_recv->cb(&exec_ctx, calld->on_done_recv->cb_arg, 1);
  } else {
    gpr_slice message;
    grpc_transport_stream_op close_op;
    memset(&close_op, 0, sizeof(close_op));
    grpc_metadata_array_destroy(&calld->md);
    error_details = error_details != NULL
                        ? error_details
                        : "Authentication metadata processing failed.";
    message = gpr_slice_from_copied_string(error_details);
    calld->transport_op.send_initial_metadata = NULL;
    if (calld->transport_op.send_message != NULL) {
      grpc_byte_stream_destroy(calld->transport_op.send_message);
      calld->transport_op.send_message = NULL;
    }
    calld->transport_op.send_trailing_metadata = NULL;
    grpc_transport_stream_op_add_close(&close_op, status, &message);
    grpc_call_next_op(&exec_ctx, elem, &close_op);
    calld->on_done_recv->cb(&exec_ctx, calld->on_done_recv->cb_arg, 0);
  }

  grpc_exec_ctx_finish(&exec_ctx);
}
Example #18
0
grpc_call *grpc_channel_create_call(grpc_channel *channel,
                                    grpc_call *parent_call,
                                    uint32_t propagation_mask,
                                    grpc_completion_queue *cq,
                                    grpc_slice method, const grpc_slice *host,
                                    gpr_timespec deadline, void *reserved) {
  GPR_ASSERT(!reserved);
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
  grpc_call *call = grpc_channel_create_call_internal(
      &exec_ctx, channel, parent_call, propagation_mask, cq, NULL,
      grpc_mdelem_from_slices(&exec_ctx, GRPC_MDSTR_PATH,
                              grpc_slice_ref_internal(method)),
      host != NULL ? grpc_mdelem_from_slices(&exec_ctx, GRPC_MDSTR_AUTHORITY,
                                             grpc_slice_ref_internal(*host))
                   : GRPC_MDNULL,
      deadline);
  grpc_exec_ctx_finish(&exec_ctx);
  return call;
}
Example #19
0
int main(int argc, char **argv) {
  grpc_closure destroyed;
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
  grpc_test_init(argc, argv);
  grpc_iomgr_init(&exec_ctx);
  grpc_iomgr_start(&exec_ctx);
  g_pollset = gpr_zalloc(grpc_pollset_size());
  grpc_pollset_init(g_pollset, &g_mu);
  test_grpc_fd();
  test_grpc_fd_change();
  GRPC_CLOSURE_INIT(&destroyed, destroy_pollset, g_pollset,
                    grpc_schedule_on_exec_ctx);
  grpc_pollset_shutdown(&exec_ctx, g_pollset, &destroyed);
  grpc_exec_ctx_flush(&exec_ctx);
  gpr_free(g_pollset);
  grpc_iomgr_shutdown(&exec_ctx);
  grpc_exec_ctx_finish(&exec_ctx);
  return 0;
}
Example #20
0
void grpc_server_destroy(grpc_server *server) {
  listener *l;
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;

  gpr_mu_lock(&server->mu_global);
  GPR_ASSERT(gpr_atm_acq_load(&server->shutdown_flag) || !server->listeners);
  GPR_ASSERT(server->listeners_destroyed == num_listeners(server));

  while (server->listeners) {
    l = server->listeners;
    server->listeners = l->next;
    gpr_free(l);
  }

  gpr_mu_unlock(&server->mu_global);

  server_unref(&exec_ctx, server);
  grpc_exec_ctx_finish(&exec_ctx);
}
Example #21
0
/* Shutdown simply drops a ref that we reserved at creation time; if we drop
   to zero here, then enter shutdown mode and wake up any waiters */
void grpc_completion_queue_shutdown(grpc_completion_queue *cc) {
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
  gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
  if (cc->shutdown_called) {
    gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
    return;
  }
  cc->shutdown_called = 1;
  gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));

  if (gpr_unref(&cc->pending_events)) {
    gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
    GPR_ASSERT(!cc->shutdown);
    cc->shutdown = 1;
    gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
    grpc_pollset_shutdown(&exec_ctx, &cc->pollset, &cc->pollset_destroy_done);
  }
  grpc_exec_ctx_finish(&exec_ctx);
}
Example #22
0
void grpc_channel_ping(grpc_channel *channel, grpc_completion_queue *cq,
                       void *tag, void *reserved) {
  GRPC_API_TRACE("grpc_channel_ping(channel=%p, cq=%p, tag=%p, reserved=%p)", 4,
                 (channel, cq, tag, reserved));
  grpc_transport_op *op = grpc_make_transport_op(NULL);
  ping_result *pr = gpr_malloc(sizeof(*pr));
  grpc_channel_element *top_elem =
      grpc_channel_stack_element(grpc_channel_get_channel_stack(channel), 0);
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
  GPR_ASSERT(reserved == NULL);
  pr->tag = tag;
  pr->cq = cq;
  grpc_closure_init(&pr->closure, ping_done, pr);
  op->send_ping = &pr->closure;
  op->bind_pollset = grpc_cq_pollset(cq);
  grpc_cq_begin_op(cq, tag);
  top_elem->filter->start_transport_op(&exec_ctx, top_elem, op);
  grpc_exec_ctx_finish(&exec_ctx);
}
static void test_no_op_with_port_and_start(void) {
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
  struct sockaddr_in addr;
  grpc_tcp_server *s;
  GPR_ASSERT(GRPC_ERROR_NONE == grpc_tcp_server_create(NULL, NULL, &s));
  LOG_TEST("test_no_op_with_port_and_start");
  int port;

  memset(&addr, 0, sizeof(addr));
  addr.sin_family = AF_INET;
  GPR_ASSERT(grpc_tcp_server_add_port(s, (struct sockaddr *)&addr, sizeof(addr),
                                      &port) == GRPC_ERROR_NONE &&
             port > 0);

  grpc_tcp_server_start(&exec_ctx, s, NULL, 0, on_connect, NULL);

  grpc_tcp_server_unref(&exec_ctx, s);
  grpc_exec_ctx_finish(&exec_ctx);
}
Example #24
0
void grpc_inproc_transport_init(void) {
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
  GRPC_CLOSURE_INIT(&do_nothing_closure, do_nothing, NULL,
                    grpc_schedule_on_exec_ctx);
  g_empty_slice = grpc_slice_from_static_buffer(NULL, 0);

  grpc_slice key_tmp = grpc_slice_from_static_string(":path");
  g_fake_path_key = grpc_slice_intern(key_tmp);
  grpc_slice_unref_internal(&exec_ctx, key_tmp);

  g_fake_path_value = grpc_slice_from_static_string("/");

  grpc_slice auth_tmp = grpc_slice_from_static_string(":authority");
  g_fake_auth_key = grpc_slice_intern(auth_tmp);
  grpc_slice_unref_internal(&exec_ctx, auth_tmp);

  g_fake_auth_value = grpc_slice_from_static_string("inproc-fail");
  grpc_exec_ctx_finish(&exec_ctx);
}
Example #25
0
static void test_post(int port) {
  grpc_httpcli_request req;
  char *host;
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;

  g_done = 0;
  gpr_log(GPR_INFO, "test_post");

  gpr_asprintf(&host, "localhost:%d", port);
  gpr_log(GPR_INFO, "posting to %s", host);

  memset(&req, 0, sizeof(req));
  req.host = host;
  req.ssl_host_override = "foo.test.google.fr";
  req.http.path = "/post";
  req.handshaker = &grpc_httpcli_ssl;

  grpc_http_response response;
  memset(&response, 0, sizeof(response));
  grpc_resource_quota *resource_quota = grpc_resource_quota_create("test_post");
  grpc_httpcli_post(
      &exec_ctx, &g_context, &g_pops, resource_quota, &req, "hello", 5,
      n_seconds_time(15),
      grpc_closure_create(on_finish, &response, grpc_schedule_on_exec_ctx),
      &response);
  grpc_resource_quota_unref_internal(&exec_ctx, resource_quota);
  gpr_mu_lock(g_mu);
  while (!g_done) {
    grpc_pollset_worker *worker = NULL;
    GPR_ASSERT(GRPC_LOG_IF_ERROR(
        "pollset_work",
        grpc_pollset_work(&exec_ctx, grpc_polling_entity_pollset(&g_pops),
                          &worker, gpr_now(GPR_CLOCK_MONOTONIC),
                          n_seconds_time(20))));
    gpr_mu_unlock(g_mu);
    grpc_exec_ctx_finish(&exec_ctx);
    gpr_mu_lock(g_mu);
  }
  gpr_mu_unlock(g_mu);
  gpr_free(host);
  grpc_http_response_destroy(&response);
}
Example #26
0
File: tcp_uv.c Project: yugui/grpc
static void read_callback(uv_stream_t *stream, ssize_t nread,
                          const uv_buf_t *buf) {
  grpc_slice sub;
  grpc_error *error;
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
  grpc_tcp *tcp = stream->data;
  grpc_closure *cb = tcp->read_cb;
  if (nread == 0) {
    // Nothing happened. Wait for the next callback
    return;
  }
  TCP_UNREF(&exec_ctx, tcp, "read");
  tcp->read_cb = NULL;
  // TODO(murgatroid99): figure out what the return value here means
  uv_read_stop(stream);
  if (nread == UV_EOF) {
    error = GRPC_ERROR_CREATE("EOF");
  } else if (nread > 0) {
    // Successful read
    sub = grpc_slice_sub_no_ref(tcp->read_slice, 0, (size_t)nread);
    grpc_slice_buffer_add(tcp->read_slices, sub);
    error = GRPC_ERROR_NONE;
    if (grpc_tcp_trace) {
      size_t i;
      const char *str = grpc_error_string(error);
      gpr_log(GPR_DEBUG, "read: error=%s", str);

      for (i = 0; i < tcp->read_slices->count; i++) {
        char *dump = grpc_dump_slice(tcp->read_slices->slices[i],
                                     GPR_DUMP_HEX | GPR_DUMP_ASCII);
        gpr_log(GPR_DEBUG, "READ %p (peer=%s): %s", tcp, tcp->peer_string,
                dump);
        gpr_free(dump);
      }
    }
  } else {
    // nread < 0: Error
    error = GRPC_ERROR_CREATE("TCP Read failed");
  }
  grpc_closure_sched(&exec_ctx, cb, error);
  grpc_exec_ctx_finish(&exec_ctx);
}
Example #27
0
/* Takes ownership of the header, claims and signature. */
static verifier_cb_ctx *verifier_cb_ctx_create(
    grpc_jwt_verifier *verifier, grpc_pollset *pollset, jose_header *header,
    grpc_jwt_claims *claims, const char *audience, grpc_slice signature,
    const char *signed_jwt, size_t signed_jwt_len, void *user_data,
    grpc_jwt_verification_done_cb cb) {
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
  verifier_cb_ctx *ctx = gpr_malloc(sizeof(verifier_cb_ctx));
  memset(ctx, 0, sizeof(verifier_cb_ctx));
  ctx->verifier = verifier;
  ctx->pollent = grpc_polling_entity_create_from_pollset(pollset);
  ctx->header = header;
  ctx->audience = gpr_strdup(audience);
  ctx->claims = claims;
  ctx->signature = signature;
  ctx->signed_data = grpc_slice_from_copied_buffer(signed_jwt, signed_jwt_len);
  ctx->user_data = user_data;
  ctx->user_cb = cb;
  grpc_exec_ctx_finish(&exec_ctx);
  return ctx;
}
Example #28
0
void test_tcp_server_start(test_tcp_server *server, int port) {
  struct sockaddr_in addr;
  int port_added;
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;

  addr.sin_family = AF_INET;
  addr.sin_port = htons((uint16_t)port);
  memset(&addr.sin_addr, 0, sizeof(addr.sin_addr));

  server->tcp_server = grpc_tcp_server_create(&server->shutdown_complete);
  port_added =
      grpc_tcp_server_add_port(server->tcp_server, &addr, sizeof(addr));
  GPR_ASSERT(port_added == port);

  grpc_tcp_server_start(&exec_ctx, server->tcp_server, server->pollsets, 1,
                        server->on_connect, server->cb_data);
  gpr_log(GPR_INFO, "test tcp server listening on 0.0.0.0:%d", port);

  grpc_exec_ctx_finish(&exec_ctx);
}
Example #29
0
void grpc_executor_shutdown() {
  int pending_join;
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;

  gpr_mu_lock(&g_executor.mu);
  pending_join = g_executor.pending_join;
  g_executor.shutting_down = 1;
  gpr_mu_unlock(&g_executor.mu);
  /* we can release the lock at this point despite the access to the closure
   * list below because we aren't accepting new work */

  /* Execute pending callbacks, some may be performing cleanups */
  grpc_exec_ctx_enqueue_list(&exec_ctx, &g_executor.closures, NULL);
  grpc_exec_ctx_finish(&exec_ctx);
  GPR_ASSERT(grpc_closure_list_empty(g_executor.closures));
  if (pending_join) {
    gpr_thd_join(g_executor.tid);
  }
  gpr_mu_destroy(&g_executor.mu);
}
Example #30
0
static void on_handshake_next_done_grpc_wrapper(
    tsi_result result, void *user_data, const unsigned char *bytes_to_send,
    size_t bytes_to_send_size, tsi_handshaker_result *handshaker_result) {
  security_handshaker *h = user_data;
  // This callback will be invoked by TSI in a non-grpc thread, so it's
  // safe to create our own exec_ctx here.
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
  gpr_mu_lock(&h->mu);
  grpc_error *error =
      on_handshake_next_done_locked(&exec_ctx, h, result, bytes_to_send,
                                    bytes_to_send_size, handshaker_result);
  if (error != GRPC_ERROR_NONE) {
    security_handshake_failed_locked(&exec_ctx, h, error);
    gpr_mu_unlock(&h->mu);
    security_handshaker_unref(&exec_ctx, h);
  } else {
    gpr_mu_unlock(&h->mu);
  }
  grpc_exec_ctx_finish(&exec_ctx);
}