コード例 #1
0
ファイル: h2_full+trace.c プロジェクト: aaronjheng/grpc
static grpc_end2end_test_fixture chttp2_create_fixture_fullstack(
    grpc_channel_args *client_args, grpc_channel_args *server_args) {
  grpc_end2end_test_fixture f;
  int port = grpc_pick_unused_port_or_die();
  fullstack_fixture_data *ffd = gpr_malloc(sizeof(fullstack_fixture_data));
  memset(&f, 0, sizeof(f));

  gpr_join_host_port(&ffd->localaddr, "localhost", port);

  f.fixture_data = ffd;
  f.cq = grpc_completion_queue_create_for_next(NULL);
  f.shutdown_cq = grpc_completion_queue_create_for_pluck(NULL);

  return f;
}
コード例 #2
0
ファイル: rb_channel.c プロジェクト: CCNITSilchar/grpc
/* Temporary fix for
 * https://github.com/GoogleCloudPlatform/google-cloud-ruby/issues/899.
 * Transports in idle channels can get destroyed. Normally c-core re-connects,
 * but in grpc-ruby core never gets a thread until an RPC is made, because ruby
 * only calls c-core's "completion_queu_pluck" API.
 * This uses a global background thread that calls
 * "completion_queue_next" on registered "watch_channel_connectivity_state"
 * calls - so that c-core can reconnect if needed, when there aren't any RPC's.
 * TODO(apolcyn) remove this when core handles new RPCs on dead connections.
 */
void grpc_rb_channel_polling_thread_start() {
  VALUE background_thread = Qnil;

  GPR_ASSERT(!abort_channel_polling);
  GPR_ASSERT(!channel_polling_thread_started);
  GPR_ASSERT(channel_polling_cq == NULL);

  gpr_mu_init(&global_connection_polling_mu);
  gpr_cv_init(&global_connection_polling_cv);

  channel_polling_cq = grpc_completion_queue_create_for_next(NULL);
  background_thread = rb_thread_create(run_poll_channels_loop, NULL);

  if (!RTEST(background_thread)) {
    gpr_log(GPR_DEBUG, "GRPC_RUBY: failed to spawn channel polling thread");
    rb_thread_call_without_gvl(set_abort_channel_polling_without_gil, NULL,
                               NULL, NULL);
  }
}
コード例 #3
0
void create_loop_destroy(void *addr) {
  for (int i = 0; i < NUM_OUTER_LOOPS; ++i) {
    grpc_completion_queue *cq = grpc_completion_queue_create_for_next(NULL);
    grpc_channel *chan = grpc_insecure_channel_create((char *)addr, NULL, NULL);

    for (int j = 0; j < NUM_INNER_LOOPS; ++j) {
      gpr_timespec later_time =
          grpc_timeout_milliseconds_to_deadline(DELAY_MILLIS);
      grpc_connectivity_state state =
          grpc_channel_check_connectivity_state(chan, 1);
      grpc_channel_watch_connectivity_state(chan, state, later_time, cq, NULL);
      gpr_timespec poll_time =
          grpc_timeout_milliseconds_to_deadline(POLL_MILLIS);
      GPR_ASSERT(grpc_completion_queue_next(cq, poll_time, NULL).type ==
                 GRPC_OP_COMPLETE);
      /* check that the watcher from "watch state" was free'd */
      GPR_ASSERT(grpc_channel_num_external_connectivity_watchers(chan) == 0);
    }
    grpc_channel_destroy(chan);
    grpc_completion_queue_destroy(cq);
  }
}
コード例 #4
0
void watches_with_short_timeouts(void *addr) {
  for (int i = 0; i < NUM_OUTER_LOOPS_SHORT_TIMEOUTS; ++i) {
    grpc_completion_queue *cq = grpc_completion_queue_create_for_next(NULL);
    grpc_channel *chan = grpc_insecure_channel_create((char *)addr, NULL, NULL);

    for (int j = 0; j < NUM_INNER_LOOPS_SHORT_TIMEOUTS; ++j) {
      gpr_timespec later_time =
          grpc_timeout_milliseconds_to_deadline(DELAY_MILLIS_SHORT_TIMEOUTS);
      grpc_connectivity_state state =
          grpc_channel_check_connectivity_state(chan, 0);
      GPR_ASSERT(state == GRPC_CHANNEL_IDLE);
      grpc_channel_watch_connectivity_state(chan, state, later_time, cq, NULL);
      gpr_timespec poll_time =
          grpc_timeout_milliseconds_to_deadline(POLL_MILLIS_SHORT_TIMEOUTS);
      grpc_event ev = grpc_completion_queue_next(cq, poll_time, NULL);
      GPR_ASSERT(ev.type == GRPC_OP_COMPLETE);
      GPR_ASSERT(ev.success == false);
      /* check that the watcher from "watch state" was free'd */
      GPR_ASSERT(grpc_channel_num_external_connectivity_watchers(chan) == 0);
    }
    grpc_channel_destroy(chan);
    grpc_completion_queue_destroy(cq);
  }
}
コード例 #5
0
ファイル: lb_policies_test.c プロジェクト: aaronjheng/grpc
static servers_fixture *setup_servers(const char *server_host,
                                      request_data *rdata,
                                      const size_t num_servers) {
  servers_fixture *f = gpr_malloc(sizeof(servers_fixture));
  size_t i;

  f->num_servers = num_servers;
  f->server_calls = gpr_malloc(sizeof(grpc_call *) * num_servers);
  f->request_metadata_recv =
      gpr_malloc(sizeof(grpc_metadata_array) * num_servers);
  /* Create servers. */
  f->servers = gpr_malloc(sizeof(grpc_server *) * num_servers);
  f->servers_hostports = gpr_malloc(sizeof(char *) * num_servers);
  f->cq = grpc_completion_queue_create_for_next(NULL);
  f->shutdown_cq = grpc_completion_queue_create_for_pluck(NULL);
  for (i = 0; i < num_servers; i++) {
    grpc_metadata_array_init(&f->request_metadata_recv[i]);
    gpr_join_host_port(&f->servers_hostports[i], server_host,
                       grpc_pick_unused_port_or_die());
    f->servers[i] = 0;
    revive_server(f, rdata, i);
  }
  return f;
}
コード例 #6
0
ファイル: grpc_csharp_ext.c プロジェクト: grpc/grpc
GPR_EXPORT grpc_completion_queue* GPR_CALLTYPE
grpcsharp_completion_queue_create_async(void) {
  return grpc_completion_queue_create_for_next(NULL);
}
コード例 #7
0
int run_concurrent_connectivity_test() {
  struct server_thread_args args;
  memset(&args, 0, sizeof(args));

  grpc_init();

  gpr_thd_id threads[NUM_THREADS];
  gpr_thd_id server;

  char *localhost = gpr_strdup("localhost:54321");
  gpr_thd_options options = gpr_thd_options_default();
  gpr_thd_options_set_joinable(&options);

  /* First round, no server */
  gpr_log(GPR_DEBUG, "Wave 1");
  for (size_t i = 0; i < NUM_THREADS; ++i) {
    gpr_thd_new(&threads[i], create_loop_destroy, localhost, &options);
  }
  for (size_t i = 0; i < NUM_THREADS; ++i) {
    gpr_thd_join(threads[i]);
  }
  gpr_free(localhost);

  /* Second round, actual grpc server */
  gpr_log(GPR_DEBUG, "Wave 2");
  int port = grpc_pick_unused_port_or_die();
  gpr_asprintf(&args.addr, "localhost:%d", port);
  args.server = grpc_server_create(NULL, NULL);
  grpc_server_add_insecure_http2_port(args.server, args.addr);
  args.cq = grpc_completion_queue_create_for_next(NULL);
  grpc_server_register_completion_queue(args.server, args.cq, NULL);
  grpc_server_start(args.server);
  gpr_thd_new(&server, server_thread, &args, &options);

  for (size_t i = 0; i < NUM_THREADS; ++i) {
    gpr_thd_new(&threads[i], create_loop_destroy, args.addr, &options);
  }
  for (size_t i = 0; i < NUM_THREADS; ++i) {
    gpr_thd_join(threads[i]);
  }
  grpc_server_shutdown_and_notify(args.server, args.cq, tag(0xd1e));

  gpr_thd_join(server);
  grpc_server_destroy(args.server);
  grpc_completion_queue_destroy(args.cq);
  gpr_free(args.addr);

  /* Third round, bogus tcp server */
  gpr_log(GPR_DEBUG, "Wave 3");
  args.pollset = gpr_zalloc(grpc_pollset_size());
  grpc_pollset_init(args.pollset, &args.mu);
  gpr_event_init(&args.ready);
  gpr_thd_new(&server, bad_server_thread, &args, &options);
  gpr_event_wait(&args.ready, gpr_inf_future(GPR_CLOCK_MONOTONIC));

  for (size_t i = 0; i < NUM_THREADS; ++i) {
    gpr_thd_new(&threads[i], create_loop_destroy, args.addr, &options);
  }
  for (size_t i = 0; i < NUM_THREADS; ++i) {
    gpr_thd_join(threads[i]);
  }

  gpr_atm_rel_store(&args.stop, 1);
  gpr_thd_join(server);
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
  grpc_pollset_shutdown(&exec_ctx, args.pollset,
                        GRPC_CLOSURE_CREATE(done_pollset_shutdown, args.pollset,
                                            grpc_schedule_on_exec_ctx));
  grpc_exec_ctx_finish(&exec_ctx);

  grpc_shutdown();
  return 0;
}
コード例 #8
0
static void run_test(bool wait_for_ready, bool use_service_config) {
  grpc_channel *chan;
  grpc_call *call;
  grpc_completion_queue *cq;
  cq_verifier *cqv;
  grpc_op ops[6];
  grpc_op *op;
  grpc_metadata_array trailing_metadata_recv;
  grpc_status_code status;
  grpc_slice details;

  gpr_log(GPR_INFO, "TEST: wait_for_ready=%d use_service_config=%d",
          wait_for_ready, use_service_config);

  grpc_init();

  grpc_metadata_array_init(&trailing_metadata_recv);

  cq = grpc_completion_queue_create_for_next(NULL);
  cqv = cq_verifier_create(cq);

  /* if using service config, create channel args */
  grpc_channel_args *args = NULL;
  if (use_service_config) {
    GPR_ASSERT(wait_for_ready);
    grpc_arg arg;
    arg.type = GRPC_ARG_STRING;
    arg.key = GRPC_ARG_SERVICE_CONFIG;
    arg.value.string =
        "{\n"
        "  \"methodConfig\": [ {\n"
        "    \"name\": [\n"
        "      { \"service\": \"service\", \"method\": \"method\" }\n"
        "    ],\n"
        "    \"waitForReady\": true\n"
        "  } ]\n"
        "}";
    args = grpc_channel_args_copy_and_add(args, &arg, 1);
  }

  /* create a call, channel to a port which will refuse connection */
  int port = grpc_pick_unused_port_or_die();
  char *addr;
  gpr_join_host_port(&addr, "127.0.0.1", port);
  gpr_log(GPR_INFO, "server: %s", addr);
  chan = grpc_insecure_channel_create(addr, args, NULL);
  grpc_slice host = grpc_slice_from_static_string("nonexistant");
  gpr_timespec deadline = grpc_timeout_seconds_to_deadline(2);
  call = grpc_channel_create_call(
      chan, NULL, GRPC_PROPAGATE_DEFAULTS, cq,
      grpc_slice_from_static_string("/service/method"), &host, deadline, NULL);

  gpr_free(addr);

  memset(ops, 0, sizeof(ops));
  op = ops;
  op->op = GRPC_OP_SEND_INITIAL_METADATA;
  op->data.send_initial_metadata.count = 0;
  op->flags = (wait_for_ready && !use_service_config)
                  ? GRPC_INITIAL_METADATA_WAIT_FOR_READY
                  : 0;
  op->reserved = NULL;
  op++;
  op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
  op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv;
  op->data.recv_status_on_client.status = &status;
  op->data.recv_status_on_client.status_details = &details;
  op->flags = 0;
  op->reserved = NULL;
  op++;
  GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(
                                 call, ops, (size_t)(op - ops), tag(1), NULL));
  /* verify that all tags get completed */
  CQ_EXPECT_COMPLETION(cqv, tag(1), 1);
  cq_verify(cqv);

  if (wait_for_ready) {
    GPR_ASSERT(status == GRPC_STATUS_DEADLINE_EXCEEDED);
  } else {
    GPR_ASSERT(status == GRPC_STATUS_UNAVAILABLE);
  }

  grpc_completion_queue_shutdown(cq);
  while (
      grpc_completion_queue_next(cq, gpr_inf_future(GPR_CLOCK_REALTIME), NULL)
          .type != GRPC_QUEUE_SHUTDOWN)
    ;
  grpc_completion_queue_destroy(cq);
  grpc_call_unref(call);
  grpc_channel_destroy(chan);
  cq_verifier_destroy(cqv);

  grpc_slice_unref(details);
  grpc_metadata_array_destroy(&trailing_metadata_recv);

  {
    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
    if (args != NULL) grpc_channel_args_destroy(&exec_ctx, args);
    grpc_exec_ctx_finish(&exec_ctx);
  }

  grpc_shutdown();
}
コード例 #9
0
ファイル: server_fuzzer.c プロジェクト: endobson/grpc
int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {
  grpc_test_only_set_slice_hash_seed(0);
  struct grpc_memory_counters counters;
  if (squelch) gpr_set_log_function(dont_log);
  if (leak_check) grpc_memory_counters_init();
  grpc_init();
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
  grpc_executor_set_threading(&exec_ctx, false);

  grpc_resource_quota *resource_quota =
      grpc_resource_quota_create("server_fuzzer");
  grpc_endpoint *mock_endpoint =
      grpc_mock_endpoint_create(discard_write, resource_quota);
  grpc_resource_quota_unref_internal(&exec_ctx, resource_quota);
  grpc_mock_endpoint_put_read(
      &exec_ctx, mock_endpoint,
      grpc_slice_from_copied_buffer((const char *)data, size));

  grpc_server *server = grpc_server_create(NULL, NULL);
  grpc_completion_queue *cq = grpc_completion_queue_create_for_next(NULL);
  grpc_server_register_completion_queue(server, cq, NULL);
  // TODO(ctiller): add registered methods (one for POST, one for PUT)
  // void *registered_method =
  //    grpc_server_register_method(server, "/reg", NULL, 0);
  grpc_server_start(server);
  grpc_transport *transport =
      grpc_create_chttp2_transport(&exec_ctx, NULL, mock_endpoint, 0);
  grpc_server_setup_transport(&exec_ctx, server, transport, NULL, NULL);
  grpc_chttp2_transport_start_reading(&exec_ctx, transport, NULL);

  grpc_call *call1 = NULL;
  grpc_call_details call_details1;
  grpc_metadata_array request_metadata1;
  grpc_call_details_init(&call_details1);
  grpc_metadata_array_init(&request_metadata1);
  int requested_calls = 0;

  GPR_ASSERT(GRPC_CALL_OK ==
             grpc_server_request_call(server, &call1, &call_details1,
                                      &request_metadata1, cq, cq, tag(1)));
  requested_calls++;

  grpc_event ev;
  while (1) {
    grpc_exec_ctx_flush(&exec_ctx);
    ev = grpc_completion_queue_next(cq, gpr_inf_past(GPR_CLOCK_REALTIME), NULL);
    switch (ev.type) {
      case GRPC_QUEUE_TIMEOUT:
        goto done;
      case GRPC_QUEUE_SHUTDOWN:
        break;
      case GRPC_OP_COMPLETE:
        switch (detag(ev.tag)) {
          case 1:
            requested_calls--;
            // TODO(ctiller): keep reading that call!
            break;
        }
    }
  }

done:
  if (call1 != NULL) grpc_call_unref(call1);
  grpc_call_details_destroy(&call_details1);
  grpc_metadata_array_destroy(&request_metadata1);
  grpc_server_shutdown_and_notify(server, cq, tag(0xdead));
  grpc_server_cancel_all_calls(server);
  for (int i = 0; i <= requested_calls; i++) {
    ev = grpc_completion_queue_next(cq, gpr_inf_past(GPR_CLOCK_REALTIME), NULL);
    GPR_ASSERT(ev.type == GRPC_OP_COMPLETE);
  }
  grpc_completion_queue_shutdown(cq);
  for (int i = 0; i <= requested_calls; i++) {
    ev = grpc_completion_queue_next(cq, gpr_inf_past(GPR_CLOCK_REALTIME), NULL);
    GPR_ASSERT(ev.type == GRPC_QUEUE_SHUTDOWN);
  }
  grpc_server_destroy(server);
  grpc_completion_queue_destroy(cq);
  grpc_shutdown();
  if (leak_check) {
    counters = grpc_memory_counters_snapshot();
    grpc_memory_counters_destroy();
    GPR_ASSERT(counters.total_size_relative == 0);
  }
  return 0;
}