Esempio n. 1
0
static gpr_timespec n_seconds_from_now(int n) {
  return grpc_timeout_seconds_to_deadline(n);
}
Esempio n. 2
0
int main(int argc, char **argv) {
  grpc_channel *chan;
  grpc_call *call;
  grpc_completion_queue *cq;
  cq_verifier *cqv;
  grpc_op ops[6];
  grpc_op *op;
  grpc_metadata_array initial_metadata_recv;
  grpc_metadata_array trailing_metadata_recv;
  grpc_status_code status;
  grpc_call_error error;
  grpc_slice details;
  char *peer;

  grpc_test_init(argc, argv);
  grpc_init();

  grpc_metadata_array_init(&initial_metadata_recv);
  grpc_metadata_array_init(&trailing_metadata_recv);

  chan = grpc_lame_client_channel_create(
      "lampoon:national", GRPC_STATUS_UNKNOWN, "Rpc sent on a lame channel.");
  GPR_ASSERT(chan);

  test_transport_op(chan);

  GPR_ASSERT(GRPC_CHANNEL_SHUTDOWN ==
             grpc_channel_check_connectivity_state(chan, 0));

  cq = grpc_completion_queue_create(NULL);

  grpc_slice host = grpc_slice_from_static_string("anywhere");
  call = grpc_channel_create_call(chan, NULL, GRPC_PROPAGATE_DEFAULTS, cq,
                                  grpc_slice_from_static_string("/Foo"), &host,
                                  grpc_timeout_seconds_to_deadline(100), NULL);
  GPR_ASSERT(call);
  cqv = cq_verifier_create(cq);

  memset(ops, 0, sizeof(ops));
  op = ops;
  op->op = GRPC_OP_SEND_INITIAL_METADATA;
  op->data.send_initial_metadata.count = 0;
  op->flags = 0;
  op->reserved = NULL;
  op++;
  op->op = GRPC_OP_RECV_INITIAL_METADATA;
  op->data.recv_initial_metadata.recv_initial_metadata = &initial_metadata_recv;
  op->flags = 0;
  op->reserved = NULL;
  op++;
  error = grpc_call_start_batch(call, ops, (size_t)(op - ops), tag(1), NULL);
  GPR_ASSERT(GRPC_CALL_OK == error);

  /* the call should immediately fail */
  CQ_EXPECT_COMPLETION(cqv, tag(1), 0);
  cq_verify(cqv);

  memset(ops, 0, sizeof(ops));
  op = ops;
  op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
  op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv;
  op->data.recv_status_on_client.status = &status;
  op->data.recv_status_on_client.status_details = &details;
  op->flags = 0;
  op->reserved = NULL;
  op++;
  error = grpc_call_start_batch(call, ops, (size_t)(op - ops), tag(2), NULL);
  GPR_ASSERT(GRPC_CALL_OK == error);

  /* the call should immediately fail */
  CQ_EXPECT_COMPLETION(cqv, tag(2), 1);
  cq_verify(cqv);

  peer = grpc_call_get_peer(call);
  GPR_ASSERT(strcmp(peer, "lampoon:national") == 0);
  gpr_free(peer);

  grpc_call_destroy(call);
  grpc_channel_destroy(chan);
  cq_verifier_destroy(cqv);
  grpc_completion_queue_destroy(cq);

  grpc_metadata_array_destroy(&initial_metadata_recv);
  grpc_metadata_array_destroy(&trailing_metadata_recv);
  grpc_slice_unref(details);

  grpc_shutdown();

  return 0;
}
Esempio n. 3
0
static gpr_timespec test_deadline(void) {
  return grpc_timeout_seconds_to_deadline(10);
}
Esempio n. 4
0
static void run_test(bool wait_for_ready, bool use_service_config) {
  grpc_channel *chan;
  grpc_call *call;
  grpc_completion_queue *cq;
  cq_verifier *cqv;
  grpc_op ops[6];
  grpc_op *op;
  grpc_metadata_array trailing_metadata_recv;
  grpc_status_code status;
  grpc_slice details;

  gpr_log(GPR_INFO, "TEST: wait_for_ready=%d use_service_config=%d",
          wait_for_ready, use_service_config);

  grpc_init();

  grpc_metadata_array_init(&trailing_metadata_recv);

  cq = grpc_completion_queue_create_for_next(NULL);
  cqv = cq_verifier_create(cq);

  /* if using service config, create channel args */
  grpc_channel_args *args = NULL;
  if (use_service_config) {
    GPR_ASSERT(wait_for_ready);
    grpc_arg arg;
    arg.type = GRPC_ARG_STRING;
    arg.key = GRPC_ARG_SERVICE_CONFIG;
    arg.value.string =
        "{\n"
        "  \"methodConfig\": [ {\n"
        "    \"name\": [\n"
        "      { \"service\": \"service\", \"method\": \"method\" }\n"
        "    ],\n"
        "    \"waitForReady\": true\n"
        "  } ]\n"
        "}";
    args = grpc_channel_args_copy_and_add(args, &arg, 1);
  }

  /* create a call, channel to a port which will refuse connection */
  int port = grpc_pick_unused_port_or_die();
  char *addr;
  gpr_join_host_port(&addr, "127.0.0.1", port);
  gpr_log(GPR_INFO, "server: %s", addr);
  chan = grpc_insecure_channel_create(addr, args, NULL);
  grpc_slice host = grpc_slice_from_static_string("nonexistant");
  gpr_timespec deadline = grpc_timeout_seconds_to_deadline(2);
  call = grpc_channel_create_call(
      chan, NULL, GRPC_PROPAGATE_DEFAULTS, cq,
      grpc_slice_from_static_string("/service/method"), &host, deadline, NULL);

  gpr_free(addr);

  memset(ops, 0, sizeof(ops));
  op = ops;
  op->op = GRPC_OP_SEND_INITIAL_METADATA;
  op->data.send_initial_metadata.count = 0;
  op->flags = (wait_for_ready && !use_service_config)
                  ? GRPC_INITIAL_METADATA_WAIT_FOR_READY
                  : 0;
  op->reserved = NULL;
  op++;
  op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
  op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv;
  op->data.recv_status_on_client.status = &status;
  op->data.recv_status_on_client.status_details = &details;
  op->flags = 0;
  op->reserved = NULL;
  op++;
  GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(
                                 call, ops, (size_t)(op - ops), tag(1), NULL));
  /* verify that all tags get completed */
  CQ_EXPECT_COMPLETION(cqv, tag(1), 1);
  cq_verify(cqv);

  if (wait_for_ready) {
    GPR_ASSERT(status == GRPC_STATUS_DEADLINE_EXCEEDED);
  } else {
    GPR_ASSERT(status == GRPC_STATUS_UNAVAILABLE);
  }

  grpc_completion_queue_shutdown(cq);
  while (
      grpc_completion_queue_next(cq, gpr_inf_future(GPR_CLOCK_REALTIME), NULL)
          .type != GRPC_QUEUE_SHUTDOWN)
    ;
  grpc_completion_queue_destroy(cq);
  grpc_call_unref(call);
  grpc_channel_destroy(chan);
  cq_verifier_destroy(cqv);

  grpc_slice_unref(details);
  grpc_metadata_array_destroy(&trailing_metadata_recv);

  {
    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
    if (args != NULL) grpc_channel_args_destroy(&exec_ctx, args);
    grpc_exec_ctx_finish(&exec_ctx);
  }

  grpc_shutdown();
}
Esempio n. 5
0
/* Do a read_test, then release fd and try to read/write again. Verify that
   grpc_tcp_fd() is available before the fd is released. */
static void release_fd_test(size_t num_bytes, size_t slice_size) {
  int sv[2];
  grpc_endpoint *ep;
  struct read_socket_state state;
  size_t written_bytes;
  int fd;
  gpr_timespec deadline = grpc_timeout_seconds_to_deadline(20);
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
  grpc_closure fd_released_cb;
  int fd_released_done = 0;
  grpc_closure_init(&fd_released_cb, &on_fd_released, &fd_released_done,
                    grpc_schedule_on_exec_ctx);

  gpr_log(GPR_INFO,
          "Release fd read_test of size %" PRIuPTR ", slice size %" PRIuPTR,
          num_bytes, slice_size);

  create_sockets(sv);

  grpc_resource_quota *resource_quota =
      grpc_resource_quota_create("release_fd_test");
  ep = grpc_tcp_create(grpc_fd_create(sv[1], "read_test"), resource_quota,
                       slice_size, "test");
  GPR_ASSERT(grpc_tcp_fd(ep) == sv[1] && sv[1] >= 0);
  grpc_resource_quota_unref_internal(&exec_ctx, resource_quota);
  grpc_endpoint_add_to_pollset(&exec_ctx, ep, g_pollset);

  written_bytes = fill_socket_partial(sv[0], num_bytes);
  gpr_log(GPR_INFO, "Wrote %" PRIuPTR " bytes", written_bytes);

  state.ep = ep;
  state.read_bytes = 0;
  state.target_read_bytes = written_bytes;
  grpc_slice_buffer_init(&state.incoming);
  grpc_closure_init(&state.read_cb, read_cb, &state, grpc_schedule_on_exec_ctx);

  grpc_endpoint_read(&exec_ctx, ep, &state.incoming, &state.read_cb);

  gpr_mu_lock(g_mu);
  while (state.read_bytes < state.target_read_bytes) {
    grpc_pollset_worker *worker = NULL;
    GPR_ASSERT(GRPC_LOG_IF_ERROR(
        "pollset_work",
        grpc_pollset_work(&exec_ctx, g_pollset, &worker,
                          gpr_now(GPR_CLOCK_MONOTONIC), deadline)));
    gpr_log(GPR_DEBUG, "wakeup: read=%" PRIdPTR " target=%" PRIdPTR,
            state.read_bytes, state.target_read_bytes);
    gpr_mu_unlock(g_mu);
    grpc_exec_ctx_flush(&exec_ctx);
    gpr_mu_lock(g_mu);
  }
  GPR_ASSERT(state.read_bytes == state.target_read_bytes);
  gpr_mu_unlock(g_mu);

  grpc_slice_buffer_destroy_internal(&exec_ctx, &state.incoming);
  grpc_tcp_destroy_and_release_fd(&exec_ctx, ep, &fd, &fd_released_cb);
  grpc_exec_ctx_flush(&exec_ctx);
  gpr_mu_lock(g_mu);
  while (!fd_released_done) {
    grpc_pollset_worker *worker = NULL;
    GPR_ASSERT(GRPC_LOG_IF_ERROR(
        "pollset_work",
        grpc_pollset_work(&exec_ctx, g_pollset, &worker,
                          gpr_now(GPR_CLOCK_MONOTONIC), deadline)));
    gpr_log(GPR_DEBUG, "wakeup: fd_released_done=%d", fd_released_done);
  }
  gpr_mu_unlock(g_mu);
  GPR_ASSERT(fd_released_done == 1);
  GPR_ASSERT(fd == sv[1]);
  grpc_exec_ctx_finish(&exec_ctx);

  written_bytes = fill_socket_partial(sv[0], num_bytes);
  drain_socket_blocking(fd, written_bytes, written_bytes);
  written_bytes = fill_socket_partial(fd, num_bytes);
  drain_socket_blocking(sv[0], written_bytes, written_bytes);
  close(fd);
}
Esempio n. 6
0
/* Do both reading and writing using the grpc_endpoint API.

   This also includes a test of the shutdown behavior.
 */
static void read_and_write_test(grpc_endpoint_test_config config,
                                size_t num_bytes, size_t write_size,
                                size_t slice_size, bool shutdown) {
  struct read_and_write_test_state state;
  gpr_timespec deadline = grpc_timeout_seconds_to_deadline(20);
  grpc_endpoint_test_fixture f =
      begin_test(config, "read_and_write_test", slice_size);
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
  gpr_log(GPR_DEBUG, "num_bytes=%" PRIuPTR " write_size=%" PRIuPTR
                     " slice_size=%" PRIuPTR " shutdown=%d",
          num_bytes, write_size, slice_size, shutdown);

  if (shutdown) {
    gpr_log(GPR_INFO, "Start read and write shutdown test");
  } else {
    gpr_log(GPR_INFO, "Start read and write test with %" PRIuPTR
                      " bytes, slice size %" PRIuPTR,
            num_bytes, slice_size);
  }

  state.read_ep = f.client_ep;
  state.write_ep = f.server_ep;
  state.target_bytes = num_bytes;
  state.bytes_read = 0;
  state.current_write_size = write_size;
  state.bytes_written = 0;
  state.read_done = 0;
  state.write_done = 0;
  state.current_read_data = 0;
  state.current_write_data = 0;
  GRPC_CLOSURE_INIT(&state.done_read, read_and_write_test_read_handler, &state,
                    grpc_schedule_on_exec_ctx);
  GRPC_CLOSURE_INIT(&state.done_write, read_and_write_test_write_handler,
                    &state, grpc_schedule_on_exec_ctx);
  grpc_slice_buffer_init(&state.outgoing);
  grpc_slice_buffer_init(&state.incoming);

  /* Get started by pretending an initial write completed */
  /* NOTE: Sets up initial conditions so we can have the same write handler
     for the first iteration as for later iterations. It does the right thing
     even when bytes_written is unsigned. */
  state.bytes_written -= state.current_write_size;
  read_and_write_test_write_handler(&exec_ctx, &state, GRPC_ERROR_NONE);
  grpc_exec_ctx_flush(&exec_ctx);

  grpc_endpoint_read(&exec_ctx, state.read_ep, &state.incoming,
                     &state.done_read);

  if (shutdown) {
    gpr_log(GPR_DEBUG, "shutdown read");
    grpc_endpoint_shutdown(
        &exec_ctx, state.read_ep,
        GRPC_ERROR_CREATE_FROM_STATIC_STRING("Test Shutdown"));
    gpr_log(GPR_DEBUG, "shutdown write");
    grpc_endpoint_shutdown(
        &exec_ctx, state.write_ep,
        GRPC_ERROR_CREATE_FROM_STATIC_STRING("Test Shutdown"));
  }
  grpc_exec_ctx_flush(&exec_ctx);

  gpr_mu_lock(g_mu);
  while (!state.read_done || !state.write_done) {
    grpc_pollset_worker *worker = NULL;
    GPR_ASSERT(gpr_time_cmp(gpr_now(GPR_CLOCK_MONOTONIC), deadline) < 0);
    GPR_ASSERT(GRPC_LOG_IF_ERROR(
        "pollset_work",
        grpc_pollset_work(&exec_ctx, g_pollset, &worker,
                          gpr_now(GPR_CLOCK_MONOTONIC), deadline)));
  }
  gpr_mu_unlock(g_mu);
  grpc_exec_ctx_flush(&exec_ctx);

  end_test(config);
  grpc_slice_buffer_destroy_internal(&exec_ctx, &state.outgoing);
  grpc_slice_buffer_destroy_internal(&exec_ctx, &state.incoming);
  grpc_endpoint_destroy(&exec_ctx, state.read_ep);
  grpc_endpoint_destroy(&exec_ctx, state.write_ep);
  grpc_exec_ctx_finish(&exec_ctx);
}
Esempio n. 7
0
                          .value.integer = MAX_CONNECTION_AGE_MS},
                         {.type = GRPC_ARG_INTEGER,
                          .key = GRPC_ARG_MAX_CONNECTION_AGE_GRACE_MS,
                          .value.integer = MAX_CONNECTION_AGE_GRACE_MS},
                         {.type = GRPC_ARG_INTEGER,
                          .key = GRPC_ARG_MAX_CONNECTION_IDLE_MS,
                          .value.integer = MAX_CONNECTION_IDLE_MS}};
  grpc_channel_args server_args = {.num_args = GPR_ARRAY_SIZE(server_a),
                                   .args = server_a};

  config.init_client(&f, NULL);
  config.init_server(&f, &server_args);

  grpc_call *c;
  grpc_call *s;
  gpr_timespec deadline = grpc_timeout_seconds_to_deadline(CALL_DEADLINE_S);
  grpc_op ops[6];
  grpc_op *op;
  grpc_metadata_array initial_metadata_recv;
  grpc_metadata_array trailing_metadata_recv;
  grpc_metadata_array request_metadata_recv;
  grpc_call_details call_details;
  grpc_status_code status;
  grpc_call_error error;
  grpc_slice details;
  int was_cancelled = 2;

  c = grpc_channel_create_call(
      f.client, NULL, GRPC_PROPAGATE_DEFAULTS, f.cq,
      grpc_slice_from_static_string("/foo"),
      get_host_override_slice("foo.test.google.fr:1234", config), deadline,
Esempio n. 8
0
static gpr_timespec n_seconds_time(int seconds) {
  return grpc_timeout_seconds_to_deadline(seconds);
}