Beispiel #1
0
unsigned gpr_cpu_current_cpu(void) {
  /* TODO(jtattermusch): implement */
  gpr_log(GPR_ERROR, "Cannot determine current CPU");
  return 0;
}
Beispiel #2
0
void grpc_iomgr_shutdown(void) {
  gpr_timespec shutdown_deadline = gpr_time_add(
      gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_seconds(10, GPR_TIMESPAN));
  gpr_timespec last_warning_time = gpr_now(GPR_CLOCK_REALTIME);
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;

  grpc_iomgr_platform_flush();

  gpr_mu_lock(&g_mu);
  g_shutdown = 1;
  while (g_root_object.next != &g_root_object) {
    if (gpr_time_cmp(
            gpr_time_sub(gpr_now(GPR_CLOCK_REALTIME), last_warning_time),
            gpr_time_from_seconds(1, GPR_TIMESPAN)) >= 0) {
      if (g_root_object.next != &g_root_object) {
        gpr_log(GPR_DEBUG, "Waiting for %d iomgr objects to be destroyed",
                count_objects());
      }
      last_warning_time = gpr_now(GPR_CLOCK_REALTIME);
    }
    if (grpc_timer_check(&exec_ctx, gpr_inf_future(GPR_CLOCK_MONOTONIC),
                         NULL)) {
      gpr_mu_unlock(&g_mu);
      grpc_exec_ctx_flush(&exec_ctx);
      gpr_mu_lock(&g_mu);
      continue;
    }
    if (g_root_object.next != &g_root_object) {
      gpr_timespec short_deadline = gpr_time_add(
          gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_millis(100, GPR_TIMESPAN));
      if (gpr_cv_wait(&g_rcv, &g_mu, short_deadline)) {
        if (gpr_time_cmp(gpr_now(GPR_CLOCK_REALTIME), shutdown_deadline) > 0) {
          if (g_root_object.next != &g_root_object) {
            gpr_log(GPR_DEBUG,
                    "Failed to free %d iomgr objects before shutdown deadline: "
                    "memory leaks are likely",
                    count_objects());
            dump_objects("LEAKED");
            if (grpc_iomgr_abort_on_leaks()) {
              abort();
            }
          }
          break;
        }
      }
    }
  }
  gpr_mu_unlock(&g_mu);

  grpc_timer_list_shutdown(&exec_ctx);
  grpc_exec_ctx_finish(&exec_ctx);

  /* ensure all threads have left g_mu */
  gpr_mu_lock(&g_mu);
  gpr_mu_unlock(&g_mu);

  grpc_pollset_global_shutdown();
  grpc_iomgr_platform_shutdown();
  grpc_exec_ctx_global_shutdown();
  gpr_mu_destroy(&g_mu);
  gpr_cv_destroy(&g_rcv);
}
Beispiel #3
0
static void tcp_ref(grpc_tcp *tcp, const char *reason, const char *file,
                    int line) {
  gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "TCP   ref %p : %s %d -> %d", tcp,
          reason, tcp->refcount.count, tcp->refcount.count + 1);
  gpr_ref(&tcp->refcount);
}
Beispiel #4
0
static int do_iocp_work() {
  BOOL success;
  DWORD bytes = 0;
  DWORD flags = 0;
  ULONG_PTR completion_key;
  LPOVERLAPPED overlapped;
  gpr_timespec wait_time = gpr_inf_future;
  grpc_winsocket *socket;
  grpc_winsocket_callback_info *info;
  void(*f)(void *, int) = NULL;
  void *opaque = NULL;
  success = GetQueuedCompletionStatus(g_iocp, &bytes,
                                      &completion_key, &overlapped,
                                      gpr_time_to_millis(wait_time));
  if (!success && !overlapped) {
    /* The deadline got attained. */
    return 0;
  }
  GPR_ASSERT(completion_key && overlapped);
  if (overlapped == &g_iocp_custom_overlap) {
    if (completion_key == (ULONG_PTR) &g_iocp_kick_token) {
      /* We were awoken from a kick. */
      gpr_log(GPR_DEBUG, "do_iocp_work - got a kick");
      return 1;
    }
    gpr_log(GPR_ERROR, "Unknown custom completion key.");
    abort();
  }

  socket = (grpc_winsocket*) completion_key;
  if (overlapped == &socket->write_info.overlapped) {
    gpr_log(GPR_DEBUG, "do_iocp_work - got write packet");
    info = &socket->write_info;
  } else if (overlapped == &socket->read_info.overlapped) {
    gpr_log(GPR_DEBUG, "do_iocp_work - got read packet");
    info = &socket->read_info;
  } else {
    gpr_log(GPR_ERROR, "Unknown IOCP operation");
    abort();
  }
  success = WSAGetOverlappedResult(socket->socket, &info->overlapped, &bytes,
                                   FALSE, &flags);
  gpr_log(GPR_DEBUG, "bytes: %u, flags: %u - op %s", bytes, flags,
          success ? "succeeded" : "failed");
  info->bytes_transfered = bytes;
  info->wsa_error = success ? 0 : WSAGetLastError();
  GPR_ASSERT(overlapped == &info->overlapped);
  gpr_mu_lock(&socket->state_mu);
  GPR_ASSERT(!info->has_pending_iocp);
  if (info->cb) {
    f = info->cb;
    opaque = info->opaque;
    info->cb = NULL;
  } else {
    info->has_pending_iocp = 1;
  }
  gpr_mu_unlock(&socket->state_mu);
  if (f) f(opaque, 1);

  return 1;
}
Beispiel #5
0
static void on_handshake_data_received_from_peer(
    void *setup, gpr_slice *slices, size_t nslices,
    grpc_endpoint_cb_status error) {
  grpc_secure_transport_setup *s = setup;
  size_t consumed_slice_size = 0;
  tsi_result result = TSI_OK;
  size_t i;
  size_t num_left_overs;
  int has_left_overs_in_current_slice = 0;

  if (error != GRPC_ENDPOINT_CB_OK) {
    gpr_log(GPR_ERROR, "Read failed.");
    cleanup_slices(slices, nslices);
    secure_transport_setup_done(s, 0);
    return;
  }

  for (i = 0; i < nslices; i++) {
    consumed_slice_size = GPR_SLICE_LENGTH(slices[i]);
    result = tsi_handshaker_process_bytes_from_peer(
        s->handshaker, GPR_SLICE_START_PTR(slices[i]), &consumed_slice_size);
    if (!tsi_handshaker_is_in_progress(s->handshaker)) break;
  }

  if (tsi_handshaker_is_in_progress(s->handshaker)) {
    /* We may need more data. */
    if (result == TSI_INCOMPLETE_DATA) {
      /* TODO(klempner,jboeuf): This should probably use the client setup
         deadline */
      grpc_endpoint_notify_on_read(s->endpoint,
                                   on_handshake_data_received_from_peer, setup);
      cleanup_slices(slices, nslices);
      return;
    } else {
      send_handshake_bytes_to_peer(s);
      cleanup_slices(slices, nslices);
      return;
    }
  }

  if (result != TSI_OK) {
    gpr_log(GPR_ERROR, "Handshake failed with error %s",
            tsi_result_to_string(result));
    cleanup_slices(slices, nslices);
    secure_transport_setup_done(s, 0);
    return;
  }

  /* Handshake is done and successful this point. */
  has_left_overs_in_current_slice =
      (consumed_slice_size < GPR_SLICE_LENGTH(slices[i]));
  num_left_overs = (has_left_overs_in_current_slice ? 1 : 0) + nslices - i - 1;
  if (num_left_overs == 0) {
    cleanup_slices(slices, nslices);
    check_peer(s);
    return;
  }
  cleanup_slices(slices, nslices - num_left_overs);

  /* Put the leftovers in our buffer (ownership transfered). */
  if (has_left_overs_in_current_slice) {
    gpr_slice_buffer_add(&s->left_overs,
                         gpr_slice_split_tail(&slices[i], consumed_slice_size));
    gpr_slice_unref(slices[i]); /* split_tail above increments refcount. */
  }
  gpr_slice_buffer_addn(&s->left_overs, &slices[i + 1],
                        num_left_overs - has_left_overs_in_current_slice);
  check_peer(s);
}
grpc_auth_context *grpc_auth_context_ref(grpc_auth_context *ctx,
                                         const char *file, int line,
                                         const char *reason) {
  if (ctx == NULL) return NULL;
  gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
          "AUTH_CONTEXT:%p   ref %d -> %d %s", ctx, (int)ctx->refcount.count,
          (int)ctx->refcount.count + 1, reason);
#else
grpc_auth_context *grpc_auth_context_ref(grpc_auth_context *ctx) {
  if (ctx == NULL) return NULL;
#endif
  gpr_ref(&ctx->refcount);
  return ctx;
}

#ifdef GRPC_AUTH_CONTEXT_REFCOUNT_DEBUG
void grpc_auth_context_unref(grpc_auth_context *ctx, const char *file, int line,
                             const char *reason) {
  if (ctx == NULL) return;
  gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
          "AUTH_CONTEXT:%p unref %d -> %d %s", ctx, (int)ctx->refcount.count,
          (int)ctx->refcount.count - 1, reason);
#else
void grpc_auth_context_unref(grpc_auth_context *ctx) {
  if (ctx == NULL) return;
#endif
  if (gpr_unref(&ctx->refcount)) {
    size_t i;
    GRPC_AUTH_CONTEXT_UNREF(ctx->chained, "chained");
    if (ctx->properties.array != NULL) {
      for (i = 0; i < ctx->properties.count; i++) {
        grpc_auth_property_reset(&ctx->properties.array[i]);
      }
      gpr_free(ctx->properties.array);
    }
    gpr_free(ctx);
  }
}

const char *grpc_auth_context_peer_identity_property_name(
    const grpc_auth_context *ctx) {
  return ctx->peer_identity_property_name;
}

int grpc_auth_context_set_peer_identity_property_name(grpc_auth_context *ctx,
                                                      const char *name) {
  grpc_auth_property_iterator it =
      grpc_auth_context_find_properties_by_name(ctx, name);
  const grpc_auth_property *prop = grpc_auth_property_iterator_next(&it);
  if (prop == NULL) {
    gpr_log(GPR_ERROR, "Property name %s not found in auth context.",
            name != NULL ? name : "NULL");
    return 0;
  }
  ctx->peer_identity_property_name = prop->name;
  return 1;
}

int grpc_auth_context_peer_is_authenticated(const grpc_auth_context *ctx) {
  return ctx->peer_identity_property_name == NULL ? 0 : 1;
}

grpc_auth_property_iterator grpc_auth_context_property_iterator(
    const grpc_auth_context *ctx) {
  grpc_auth_property_iterator it = empty_iterator;
  if (ctx == NULL) return it;
  it.ctx = ctx;
  return it;
}
Beispiel #7
0
static void test_threading(int producers, int consumers) {
  test_thread_options *options =
      gpr_malloc((producers + consumers) * sizeof(test_thread_options));
  gpr_event phase1 = GPR_EVENT_INIT;
  gpr_event phase2 = GPR_EVENT_INIT;
  grpc_completion_queue *cc = grpc_completion_queue_create();
  int i;
  int total_consumed = 0;
  static int optid = 101;

  gpr_log(GPR_INFO, "%s: %d producers, %d consumers", "test_threading",
          producers, consumers);

  /* start all threads: they will wait for phase1 */
  for (i = 0; i < producers + consumers; i++) {
    gpr_thd_id id;
    gpr_event_init(&options[i].on_started);
    gpr_event_init(&options[i].on_phase1_done);
    gpr_event_init(&options[i].on_finished);
    options[i].phase1 = &phase1;
    options[i].phase2 = &phase2;
    options[i].events_triggered = 0;
    options[i].cc = cc;
    options[i].id = optid++;
    GPR_ASSERT(gpr_thd_new(&id,
                           i < producers ? producer_thread : consumer_thread,
                           options + i, NULL));
    gpr_event_wait(&options[i].on_started, ten_seconds_time());
  }

  /* start phase1: producers will pre-declare all operations they will
     complete */
  gpr_log(GPR_INFO, "start phase 1");
  gpr_event_set(&phase1, (void *)(gpr_intptr)1);

  gpr_log(GPR_INFO, "wait phase 1");
  for (i = 0; i < producers + consumers; i++) {
    GPR_ASSERT(gpr_event_wait(&options[i].on_phase1_done, ten_seconds_time()));
  }
  gpr_log(GPR_INFO, "done phase 1");

  /* start phase2: operations will complete, and consumers will consume them */
  gpr_log(GPR_INFO, "start phase 2");
  gpr_event_set(&phase2, (void *)(gpr_intptr)1);

  /* in parallel, we shutdown the completion channel - all events should still
     be consumed */
  grpc_completion_queue_shutdown(cc);

  /* join all threads */
  gpr_log(GPR_INFO, "wait phase 2");
  for (i = 0; i < producers + consumers; i++) {
    GPR_ASSERT(gpr_event_wait(&options[i].on_finished, ten_seconds_time()));
  }
  gpr_log(GPR_INFO, "done phase 2");

  /* destroy the completion channel */
  grpc_completion_queue_destroy(cc);

  /* verify that everything was produced and consumed */
  for (i = 0; i < producers + consumers; i++) {
    if (i < producers) {
      GPR_ASSERT(options[i].events_triggered == TEST_THREAD_EVENTS);
    } else {
      total_consumed += options[i].events_triggered;
    }
  }
  GPR_ASSERT(total_consumed == producers * TEST_THREAD_EVENTS);

  gpr_free(options);
}
Beispiel #8
0
// Test with response larger than the limit.
// If send_limit is true, applies send limit on server; otherwise, applies
// recv limit on client.
static void test_max_message_length_on_response(grpc_end2end_test_config config,
                                                bool send_limit,
                                                bool use_service_config,
                                                bool use_string_json_value) {
  gpr_log(GPR_INFO,
          "testing response with send_limit=%d use_service_config=%d "
          "use_string_json_value=%d",
          send_limit, use_service_config, use_string_json_value);

  grpc_end2end_test_fixture f;
  grpc_call *c = NULL;
  grpc_call *s = NULL;
  cq_verifier *cqv;
  grpc_op ops[6];
  grpc_op *op;
  grpc_slice response_payload_slice =
      grpc_slice_from_copied_string("hello world");
  grpc_byte_buffer *response_payload =
      grpc_raw_byte_buffer_create(&response_payload_slice, 1);
  grpc_byte_buffer *recv_payload = NULL;
  grpc_metadata_array initial_metadata_recv;
  grpc_metadata_array trailing_metadata_recv;
  grpc_metadata_array request_metadata_recv;
  grpc_call_details call_details;
  grpc_status_code status;
  grpc_call_error error;
  grpc_slice details;
  int was_cancelled = 2;

  grpc_channel_args *client_args = NULL;
  grpc_channel_args *server_args = NULL;
  if (use_service_config) {
    // We don't currently support service configs on the server side.
    GPR_ASSERT(!send_limit);
    grpc_arg arg;
    arg.type = GRPC_ARG_STRING;
    arg.key = GRPC_ARG_SERVICE_CONFIG;
    arg.value.string =
        use_string_json_value
            ? "{\n"
              "  \"methodConfig\": [ {\n"
              "    \"name\": [\n"
              "      { \"service\": \"service\", \"method\": \"method\" }\n"
              "    ],\n"
              "    \"maxResponseMessageBytes\": \"5\"\n"
              "  } ]\n"
              "}"
            : "{\n"
              "  \"methodConfig\": [ {\n"
              "    \"name\": [\n"
              "      { \"service\": \"service\", \"method\": \"method\" }\n"
              "    ],\n"
              "    \"maxResponseMessageBytes\": 5\n"
              "  } ]\n"
              "}";
    client_args = grpc_channel_args_copy_and_add(NULL, &arg, 1);
  } else {
    // Set limit via channel args.
    grpc_arg arg;
    arg.key = send_limit ? GRPC_ARG_MAX_SEND_MESSAGE_LENGTH
                         : GRPC_ARG_MAX_RECEIVE_MESSAGE_LENGTH;
    arg.type = GRPC_ARG_INTEGER;
    arg.value.integer = 5;
    grpc_channel_args *args = grpc_channel_args_copy_and_add(NULL, &arg, 1);
    if (send_limit) {
      server_args = args;
    } else {
      client_args = args;
    }
  }

  f = begin_test(config, "test_max_response_message_length", client_args,
                 server_args);
  {
    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
    if (client_args != NULL) grpc_channel_args_destroy(&exec_ctx, client_args);
    if (server_args != NULL) grpc_channel_args_destroy(&exec_ctx, server_args);
    grpc_exec_ctx_finish(&exec_ctx);
  }
  cqv = cq_verifier_create(f.cq);

  c = grpc_channel_create_call(
      f.client, NULL, GRPC_PROPAGATE_DEFAULTS, f.cq,
      grpc_slice_from_static_string("/service/method"),
      get_host_override_slice("foo.test.google.fr:1234", config),
      gpr_inf_future(GPR_CLOCK_REALTIME), NULL);
  GPR_ASSERT(c);

  grpc_metadata_array_init(&initial_metadata_recv);
  grpc_metadata_array_init(&trailing_metadata_recv);
  grpc_metadata_array_init(&request_metadata_recv);
  grpc_call_details_init(&call_details);

  memset(ops, 0, sizeof(ops));
  op = ops;
  op->op = GRPC_OP_SEND_INITIAL_METADATA;
  op->data.send_initial_metadata.count = 0;
  op->flags = 0;
  op->reserved = NULL;
  op++;
  op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
  op->flags = 0;
  op->reserved = NULL;
  op++;
  op->op = GRPC_OP_RECV_INITIAL_METADATA;
  op->data.recv_initial_metadata.recv_initial_metadata = &initial_metadata_recv;
  op->flags = 0;
  op->reserved = NULL;
  op++;
  op->op = GRPC_OP_RECV_MESSAGE;
  op->data.recv_message.recv_message = &recv_payload;
  op->flags = 0;
  op->reserved = NULL;
  op++;
  op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
  op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv;
  op->data.recv_status_on_client.status = &status;
  op->data.recv_status_on_client.status_details = &details;
  op->flags = 0;
  op->reserved = NULL;
  op++;
  error = grpc_call_start_batch(c, ops, (size_t)(op - ops), tag(1), NULL);
  GPR_ASSERT(GRPC_CALL_OK == error);

  error =
      grpc_server_request_call(f.server, &s, &call_details,
                               &request_metadata_recv, f.cq, f.cq, tag(101));
  GPR_ASSERT(GRPC_CALL_OK == error);
  CQ_EXPECT_COMPLETION(cqv, tag(101), 1);
  cq_verify(cqv);

  memset(ops, 0, sizeof(ops));
  op = ops;
  op->op = GRPC_OP_SEND_INITIAL_METADATA;
  op->data.send_initial_metadata.count = 0;
  op->flags = 0;
  op->reserved = NULL;
  op++;
  op->op = GRPC_OP_RECV_CLOSE_ON_SERVER;
  op->data.recv_close_on_server.cancelled = &was_cancelled;
  op->flags = 0;
  op->reserved = NULL;
  op++;
  op->op = GRPC_OP_SEND_MESSAGE;
  op->data.send_message.send_message = response_payload;
  op->flags = 0;
  op->reserved = NULL;
  op++;
  op->op = GRPC_OP_SEND_STATUS_FROM_SERVER;
  op->data.send_status_from_server.trailing_metadata_count = 0;
  op->data.send_status_from_server.status = GRPC_STATUS_OK;
  grpc_slice status_details = grpc_slice_from_static_string("xyz");
  op->data.send_status_from_server.status_details = &status_details;
  op->flags = 0;
  op->reserved = NULL;
  op++;
  error = grpc_call_start_batch(s, ops, (size_t)(op - ops), tag(102), NULL);
  GPR_ASSERT(GRPC_CALL_OK == error);

  CQ_EXPECT_COMPLETION(cqv, tag(102), 1);
  CQ_EXPECT_COMPLETION(cqv, tag(1), 1);
  cq_verify(cqv);

  GPR_ASSERT(0 == grpc_slice_str_cmp(call_details.method, "/service/method"));
  GPR_ASSERT(0 ==
             grpc_slice_str_cmp(call_details.host, "foo.test.google.fr:1234"));

  GPR_ASSERT(status == GRPC_STATUS_INVALID_ARGUMENT);
  GPR_ASSERT(
      grpc_slice_str_cmp(
          details, send_limit
                       ? "Sent message larger than max (11 vs. 5)"
                       : "Received message larger than max (11 vs. 5)") == 0);

  grpc_slice_unref(details);
  grpc_metadata_array_destroy(&initial_metadata_recv);
  grpc_metadata_array_destroy(&trailing_metadata_recv);
  grpc_metadata_array_destroy(&request_metadata_recv);
  grpc_call_details_destroy(&call_details);
  grpc_byte_buffer_destroy(response_payload);
  grpc_byte_buffer_destroy(recv_payload);

  grpc_call_destroy(c);
  if (s != NULL) grpc_call_destroy(s);

  cq_verifier_destroy(cqv);

  end_test(&f);
  config.tear_down_data(&f);
}
Beispiel #9
0
void grpc_end2end_tests(int argc, char **argv,
                        grpc_end2end_test_config config) {
  int i;

  if (argc <= 1) {
    bad_hostname(config);
    binary_metadata(config);
    cancel_after_accept(config);
    cancel_after_client_done(config);
    cancel_after_invoke(config);
    cancel_before_invoke(config);
    cancel_in_a_vacuum(config);
    cancel_with_status(config);
    compressed_payload(config);
    connectivity(config);
    default_host(config);
    disappearing_server(config);
    empty_batch(config);
    graceful_server_shutdown(config);
    high_initial_seqno(config);
    hpack_size(config);
    invoke_large_request(config);
    large_metadata(config);
    max_concurrent_streams(config);
    max_message_length(config);
    negative_deadline(config);
    no_op(config);
    payload(config);
    ping(config);
    ping_pong_streaming(config);
    registered_call(config);
    request_with_flags(config);
    request_with_payload(config);
    server_finishes_request(config);
    shutdown_finishes_calls(config);
    shutdown_finishes_tags(config);
    simple_delayed_request(config);
    simple_metadata(config);
    simple_request(config);
    trailing_metadata(config);
    return;
  }

  for (i = 1; i < argc; i++) {
    if (0 == strcmp("bad_hostname", argv[i])) {
      bad_hostname(config);
      continue;
    }
    if (0 == strcmp("binary_metadata", argv[i])) {
      binary_metadata(config);
      continue;
    }
    if (0 == strcmp("cancel_after_accept", argv[i])) {
      cancel_after_accept(config);
      continue;
    }
    if (0 == strcmp("cancel_after_client_done", argv[i])) {
      cancel_after_client_done(config);
      continue;
    }
    if (0 == strcmp("cancel_after_invoke", argv[i])) {
      cancel_after_invoke(config);
      continue;
    }
    if (0 == strcmp("cancel_before_invoke", argv[i])) {
      cancel_before_invoke(config);
      continue;
    }
    if (0 == strcmp("cancel_in_a_vacuum", argv[i])) {
      cancel_in_a_vacuum(config);
      continue;
    }
    if (0 == strcmp("cancel_with_status", argv[i])) {
      cancel_with_status(config);
      continue;
    }
    if (0 == strcmp("compressed_payload", argv[i])) {
      compressed_payload(config);
      continue;
    }
    if (0 == strcmp("connectivity", argv[i])) {
      connectivity(config);
      continue;
    }
    if (0 == strcmp("default_host", argv[i])) {
      default_host(config);
      continue;
    }
    if (0 == strcmp("disappearing_server", argv[i])) {
      disappearing_server(config);
      continue;
    }
    if (0 == strcmp("empty_batch", argv[i])) {
      empty_batch(config);
      continue;
    }
    if (0 == strcmp("graceful_server_shutdown", argv[i])) {
      graceful_server_shutdown(config);
      continue;
    }
    if (0 == strcmp("high_initial_seqno", argv[i])) {
      high_initial_seqno(config);
      continue;
    }
    if (0 == strcmp("hpack_size", argv[i])) {
      hpack_size(config);
      continue;
    }
    if (0 == strcmp("invoke_large_request", argv[i])) {
      invoke_large_request(config);
      continue;
    }
    if (0 == strcmp("large_metadata", argv[i])) {
      large_metadata(config);
      continue;
    }
    if (0 == strcmp("max_concurrent_streams", argv[i])) {
      max_concurrent_streams(config);
      continue;
    }
    if (0 == strcmp("max_message_length", argv[i])) {
      max_message_length(config);
      continue;
    }
    if (0 == strcmp("negative_deadline", argv[i])) {
      negative_deadline(config);
      continue;
    }
    if (0 == strcmp("no_op", argv[i])) {
      no_op(config);
      continue;
    }
    if (0 == strcmp("payload", argv[i])) {
      payload(config);
      continue;
    }
    if (0 == strcmp("ping", argv[i])) {
      ping(config);
      continue;
    }
    if (0 == strcmp("ping_pong_streaming", argv[i])) {
      ping_pong_streaming(config);
      continue;
    }
    if (0 == strcmp("registered_call", argv[i])) {
      registered_call(config);
      continue;
    }
    if (0 == strcmp("request_with_flags", argv[i])) {
      request_with_flags(config);
      continue;
    }
    if (0 == strcmp("request_with_payload", argv[i])) {
      request_with_payload(config);
      continue;
    }
    if (0 == strcmp("server_finishes_request", argv[i])) {
      server_finishes_request(config);
      continue;
    }
    if (0 == strcmp("shutdown_finishes_calls", argv[i])) {
      shutdown_finishes_calls(config);
      continue;
    }
    if (0 == strcmp("shutdown_finishes_tags", argv[i])) {
      shutdown_finishes_tags(config);
      continue;
    }
    if (0 == strcmp("simple_delayed_request", argv[i])) {
      simple_delayed_request(config);
      continue;
    }
    if (0 == strcmp("simple_metadata", argv[i])) {
      simple_metadata(config);
      continue;
    }
    if (0 == strcmp("simple_request", argv[i])) {
      simple_request(config);
      continue;
    }
    if (0 == strcmp("trailing_metadata", argv[i])) {
      trailing_metadata(config);
      continue;
    }
    gpr_log(GPR_DEBUG, "not a test: '%s'", argv[i]);
    abort();
  }
}
Beispiel #10
0
/* Create a secure client channel:
   Asynchronously: - resolve target
                   - connect to it (trying alternatives as presented)
                   - perform handshakes */
grpc_channel *grpc_secure_channel_create(grpc_credentials *creds,
                                         const char *target,
                                         const grpc_channel_args *args,
                                         void *reserved) {
  grpc_channel *channel;
  grpc_arg connector_arg;
  grpc_channel_args *args_copy;
  grpc_channel_args *new_args_from_connector;
  grpc_channel_security_connector *connector;
  grpc_mdctx *mdctx;
  grpc_resolver *resolver;
  subchannel_factory *f;
#define MAX_FILTERS 3
  const grpc_channel_filter *filters[MAX_FILTERS];
  size_t n = 0;

  GPR_ASSERT(reserved == NULL);
  if (grpc_find_security_connector_in_args(args) != NULL) {
    gpr_log(GPR_ERROR, "Cannot set security context in channel args.");
    return grpc_lame_client_channel_create(
        target, GRPC_STATUS_INVALID_ARGUMENT,
        "Security connector exists in channel args.");
  }

  if (grpc_credentials_create_security_connector(
          creds, target, args, NULL, &connector, &new_args_from_connector) !=
      GRPC_SECURITY_OK) {
    return grpc_lame_client_channel_create(
        target, GRPC_STATUS_INVALID_ARGUMENT,
        "Failed to create security connector.");
  }
  mdctx = grpc_mdctx_create();

  connector_arg = grpc_security_connector_to_arg(&connector->base);
  args_copy = grpc_channel_args_copy_and_add(
      new_args_from_connector != NULL ? new_args_from_connector : args,
      &connector_arg, 1);
  if (grpc_channel_args_is_census_enabled(args)) {
    filters[n++] = &grpc_client_census_filter;
  }
  filters[n++] = &grpc_compress_filter;
  filters[n++] = &grpc_client_channel_filter;
  GPR_ASSERT(n <= MAX_FILTERS);

  channel =
      grpc_channel_create_from_filters(target, filters, n, args_copy, mdctx, 1);

  f = gpr_malloc(sizeof(*f));
  f->base.vtable = &subchannel_factory_vtable;
  gpr_ref_init(&f->refs, 1);
  grpc_mdctx_ref(mdctx);
  f->mdctx = mdctx;
  GRPC_SECURITY_CONNECTOR_REF(&connector->base, "subchannel_factory");
  f->security_connector = connector;
  f->merge_args = grpc_channel_args_copy(args_copy);
  f->master = channel;
  GRPC_CHANNEL_INTERNAL_REF(channel, "subchannel_factory");
  resolver = grpc_resolver_create(target, &f->base);
  if (!resolver) {
    return NULL;
  }

  grpc_client_channel_set_resolver(grpc_channel_get_channel_stack(channel),
                                   resolver);
  GRPC_RESOLVER_UNREF(resolver, "create");
  grpc_subchannel_factory_unref(&f->base);
  GRPC_SECURITY_CONNECTOR_UNREF(&connector->base, "channel_create");

  grpc_channel_args_destroy(args_copy);
  if (new_args_from_connector != NULL) {
    grpc_channel_args_destroy(new_args_from_connector);
  }

  return channel;
}
Beispiel #11
0
grpc_security_status grpc_ssl_channel_security_connector_create(
    grpc_credentials *request_metadata_creds, const grpc_ssl_config *config,
    const char *target_name, const char *overridden_target_name,
    grpc_channel_security_connector **sc) {
  size_t num_alpn_protocols = grpc_chttp2_num_alpn_versions();
  const unsigned char **alpn_protocol_strings =
      gpr_malloc(sizeof(const char *) * num_alpn_protocols);
  unsigned char *alpn_protocol_string_lengths =
      gpr_malloc(sizeof(unsigned char) * num_alpn_protocols);
  tsi_result result = TSI_OK;
  grpc_ssl_channel_security_connector *c;
  size_t i;
  const unsigned char *pem_root_certs;
  size_t pem_root_certs_size;
  char *port;

  for (i = 0; i < num_alpn_protocols; i++) {
    alpn_protocol_strings[i] =
        (const unsigned char *)grpc_chttp2_get_alpn_version_index(i);
    alpn_protocol_string_lengths[i] =
        (unsigned char)strlen(grpc_chttp2_get_alpn_version_index(i));
  }

  if (config == NULL || target_name == NULL) {
    gpr_log(GPR_ERROR, "An ssl channel needs a config and a target name.");
    goto error;
  }
  if (!check_request_metadata_creds(request_metadata_creds)) {
    goto error;
  }
  if (config->pem_root_certs == NULL) {
    pem_root_certs_size = grpc_get_default_ssl_roots(&pem_root_certs);
    if (pem_root_certs == NULL || pem_root_certs_size == 0) {
      gpr_log(GPR_ERROR, "Could not get default pem root certs.");
      goto error;
    }
  } else {
    pem_root_certs = config->pem_root_certs;
    pem_root_certs_size = config->pem_root_certs_size;
  }

  c = gpr_malloc(sizeof(grpc_ssl_channel_security_connector));
  memset(c, 0, sizeof(grpc_ssl_channel_security_connector));

  gpr_ref_init(&c->base.base.refcount, 1);
  c->base.base.vtable = &ssl_channel_vtable;
  c->base.base.is_client_side = 1;
  c->base.base.url_scheme = GRPC_SSL_URL_SCHEME;
  c->base.request_metadata_creds = grpc_credentials_ref(request_metadata_creds);
  c->base.check_call_host = ssl_channel_check_call_host;
  gpr_split_host_port(target_name, &c->target_name, &port);
  gpr_free(port);
  if (overridden_target_name != NULL) {
    c->overridden_target_name = gpr_strdup(overridden_target_name);
  }
  result = tsi_create_ssl_client_handshaker_factory(
      config->pem_private_key, config->pem_private_key_size,
      config->pem_cert_chain, config->pem_cert_chain_size, pem_root_certs,
      pem_root_certs_size, ssl_cipher_suites(), alpn_protocol_strings,
      alpn_protocol_string_lengths, (uint16_t)num_alpn_protocols,
      &c->handshaker_factory);
  if (result != TSI_OK) {
    gpr_log(GPR_ERROR, "Handshaker factory creation failed with %s.",
            tsi_result_to_string(result));
    ssl_channel_destroy(&c->base.base);
    *sc = NULL;
    goto error;
  }
  *sc = &c->base;
  gpr_free(alpn_protocol_strings);
  gpr_free(alpn_protocol_string_lengths);
  return GRPC_SECURITY_OK;

error:
  gpr_free(alpn_protocol_strings);
  gpr_free(alpn_protocol_string_lengths);
  return GRPC_SECURITY_ERROR;
}
static grpc_mdelem *server_filter(void *user_data, grpc_mdelem *md) {
  server_filter_args *a = user_data;
  grpc_call_element *elem = a->elem;
  call_data *calld = elem->call_data;

  /* Check if it is one of the headers we care about. */
  if (md == GRPC_MDELEM_TE_TRAILERS || md == GRPC_MDELEM_METHOD_POST ||
      md == GRPC_MDELEM_METHOD_PUT || md == GRPC_MDELEM_SCHEME_HTTP ||
      md == GRPC_MDELEM_SCHEME_HTTPS ||
      md == GRPC_MDELEM_CONTENT_TYPE_APPLICATION_SLASH_GRPC) {
    /* swallow it */
    if (md == GRPC_MDELEM_METHOD_POST) {
      calld->seen_method = 1;
      *calld->recv_idempotent_request = false;
    } else if (md == GRPC_MDELEM_METHOD_PUT) {
      calld->seen_method = 1;
      *calld->recv_idempotent_request = true;
    } else if (md->key == GRPC_MDSTR_SCHEME) {
      calld->seen_scheme = 1;
    } else if (md == GRPC_MDELEM_TE_TRAILERS) {
      calld->seen_te_trailers = 1;
    }
    /* TODO(klempner): Track that we've seen all the headers we should
       require */
    return NULL;
  } else if (md->key == GRPC_MDSTR_CONTENT_TYPE) {
    const char *value_str = grpc_mdstr_as_c_string(md->value);
    if (strncmp(value_str, EXPECTED_CONTENT_TYPE,
                EXPECTED_CONTENT_TYPE_LENGTH) == 0 &&
        (value_str[EXPECTED_CONTENT_TYPE_LENGTH] == '+' ||
         value_str[EXPECTED_CONTENT_TYPE_LENGTH] == ';')) {
      /* Although the C implementation doesn't (currently) generate them,
         any custom +-suffix is explicitly valid. */
      /* TODO(klempner): We should consider preallocating common values such
         as +proto or +json, or at least stashing them if we see them. */
      /* TODO(klempner): Should we be surfacing this to application code? */
    } else {
      /* TODO(klempner): We're currently allowing this, but we shouldn't
         see it without a proxy so log for now. */
      gpr_log(GPR_INFO, "Unexpected content-type %s", value_str);
    }
    return NULL;
  } else if (md->key == GRPC_MDSTR_TE || md->key == GRPC_MDSTR_METHOD ||
             md->key == GRPC_MDSTR_SCHEME) {
    gpr_log(GPR_ERROR, "Invalid %s: header: '%s'",
            grpc_mdstr_as_c_string(md->key), grpc_mdstr_as_c_string(md->value));
    /* swallow it and error everything out. */
    /* TODO(klempner): We ought to generate more descriptive error messages
       on the wire here. */
    grpc_call_element_send_cancel(a->exec_ctx, elem);
    return NULL;
  } else if (md->key == GRPC_MDSTR_PATH) {
    if (calld->seen_path) {
      gpr_log(GPR_ERROR, "Received :path twice");
      return NULL;
    }
    calld->seen_path = 1;
    return md;
  } else if (md->key == GRPC_MDSTR_AUTHORITY) {
    calld->seen_authority = 1;
    return md;
  } else if (md->key == GRPC_MDSTR_HOST) {
    /* translate host to :authority since :authority may be
       omitted */
    grpc_mdelem *authority = grpc_mdelem_from_metadata_strings(
        GRPC_MDSTR_AUTHORITY, GRPC_MDSTR_REF(md->value));
    calld->seen_authority = 1;
    return authority;
  } else {
    return md;
  }
}
Beispiel #13
0
void grpc_run_bad_client_test(grpc_bad_client_server_side_validator validator,
                              const char *client_payload,
                              size_t client_payload_length, gpr_uint32 flags) {
  grpc_endpoint_pair sfd;
  thd_args a;
  gpr_thd_id id;
  char *hex;
  grpc_transport *transport;
  grpc_mdctx *mdctx = grpc_mdctx_create();
  gpr_slice slice =
      gpr_slice_from_copied_buffer(client_payload, client_payload_length);
  gpr_slice_buffer outgoing;
  grpc_iomgr_closure done_write_closure;

  hex = gpr_dump(client_payload, client_payload_length,
                 GPR_DUMP_HEX | GPR_DUMP_ASCII);

  /* Add a debug log */
  gpr_log(GPR_INFO, "TEST: %s", hex);

  gpr_free(hex);

  /* Init grpc */
  grpc_init();

  /* Create endpoints */
  sfd = grpc_iomgr_create_endpoint_pair("fixture", 65536);

  /* Create server, completion events */
  a.server = grpc_server_create_from_filters(NULL, 0, NULL);
  a.cq = grpc_completion_queue_create(NULL);
  gpr_event_init(&a.done_thd);
  gpr_event_init(&a.done_write);
  a.validator = validator;
  grpc_server_register_completion_queue(a.server, a.cq, NULL);
  grpc_server_start(a.server);
  transport = grpc_create_chttp2_transport(NULL, sfd.server, mdctx, 0);
  server_setup_transport(&a, transport, mdctx);
  grpc_chttp2_transport_start_reading(transport, NULL, 0);

  /* Bind everything into the same pollset */
  grpc_endpoint_add_to_pollset(sfd.client, grpc_cq_pollset(a.cq));
  grpc_endpoint_add_to_pollset(sfd.server, grpc_cq_pollset(a.cq));

  /* Check a ground truth */
  GPR_ASSERT(grpc_server_has_open_connections(a.server));

  /* Start validator */
  gpr_thd_new(&id, thd_func, &a, NULL);

  gpr_slice_buffer_init(&outgoing);
  gpr_slice_buffer_add(&outgoing, slice);
  grpc_iomgr_closure_init(&done_write_closure, done_write, &a);

  /* Write data */
  switch (grpc_endpoint_write(sfd.client, &outgoing, &done_write_closure)) {
    case GRPC_ENDPOINT_DONE:
      done_write(&a, 1);
      break;
    case GRPC_ENDPOINT_PENDING:
      break;
    case GRPC_ENDPOINT_ERROR:
      done_write(&a, 0);
      break;
  }

  /* Await completion */
  GPR_ASSERT(
      gpr_event_wait(&a.done_write, GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5)));

  if (flags & GRPC_BAD_CLIENT_DISCONNECT) {
    grpc_endpoint_shutdown(sfd.client);
    grpc_endpoint_destroy(sfd.client);
    sfd.client = NULL;
  }

  GPR_ASSERT(gpr_event_wait(&a.done_thd, GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5)));

  /* Shutdown */
  if (sfd.client) {
    grpc_endpoint_shutdown(sfd.client);
    grpc_endpoint_destroy(sfd.client);
  }
  grpc_server_shutdown_and_notify(a.server, a.cq, NULL);
  GPR_ASSERT(grpc_completion_queue_pluck(
                 a.cq, NULL, GRPC_TIMEOUT_SECONDS_TO_DEADLINE(1), NULL)
                 .type == GRPC_OP_COMPLETE);
  grpc_server_destroy(a.server);
  grpc_completion_queue_destroy(a.cq);
  gpr_slice_buffer_destroy(&outgoing);

  grpc_shutdown();
}
Beispiel #14
0
static void simple_request_body(grpc_end2end_test_fixture f, size_t num_ops) {
  grpc_call *c;
  gpr_timespec deadline = gpr_inf_past(GPR_CLOCK_REALTIME);
  cq_verifier *cqv = cq_verifier_create(f.cq);
  grpc_op ops[6];
  grpc_op *op;
  grpc_metadata_array initial_metadata_recv;
  grpc_metadata_array trailing_metadata_recv;
  grpc_status_code status;
  grpc_call_error error;
  char *details = NULL;
  size_t details_capacity = 0;

  gpr_log(GPR_DEBUG, "test with %" PRIuPTR " ops", num_ops);

  c = grpc_channel_create_call(f.client, NULL, GRPC_PROPAGATE_DEFAULTS, f.cq,
                               "/foo", "foo.test.google.fr:1234", deadline,
                               NULL);
  GPR_ASSERT(c);

  grpc_metadata_array_init(&initial_metadata_recv);
  grpc_metadata_array_init(&trailing_metadata_recv);

  memset(ops, 0, sizeof(ops));
  op = ops;
  op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
  op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv;
  op->data.recv_status_on_client.status = &status;
  op->data.recv_status_on_client.status_details = &details;
  op->data.recv_status_on_client.status_details_capacity = &details_capacity;
  op->flags = 0;
  op->reserved = NULL;
  op++;
  op->op = GRPC_OP_RECV_INITIAL_METADATA;
  op->data.recv_initial_metadata = &initial_metadata_recv;
  op->flags = 0;
  op->reserved = NULL;
  op++;
  op->op = GRPC_OP_SEND_INITIAL_METADATA;
  op->data.send_initial_metadata.count = 0;
  op->flags = 0;
  op->reserved = NULL;
  op++;
  op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
  op->flags = 0;
  op->reserved = NULL;
  op++;
  GPR_ASSERT(num_ops <= (size_t)(op - ops));
  error = grpc_call_start_batch(c, ops, num_ops, tag(1), NULL);
  GPR_ASSERT(GRPC_CALL_OK == error);

  CQ_EXPECT_COMPLETION(cqv, tag(1), 1);
  cq_verify(cqv);

  GPR_ASSERT(status == GRPC_STATUS_DEADLINE_EXCEEDED);

  gpr_free(details);
  grpc_metadata_array_destroy(&initial_metadata_recv);
  grpc_metadata_array_destroy(&trailing_metadata_recv);

  grpc_call_destroy(c);

  cq_verifier_destroy(cqv);
}
Beispiel #15
0
/* Create a secure client channel:
   Asynchronously: - resolve target
                   - connect to it (trying alternatives as presented)
                   - perform handshakes */
grpc_channel *grpc_secure_channel_create(grpc_channel_credentials *creds,
                                         const char *target,
                                         const grpc_channel_args *args,
                                         void *reserved) {
  grpc_arg connector_arg;
  grpc_channel_args *args_copy;
  grpc_channel_args *new_args_from_connector;
  grpc_channel_security_connector *security_connector;
  client_channel_factory *f;
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;

  GRPC_API_TRACE(
      "grpc_secure_channel_create(creds=%p, target=%s, args=%p, "
      "reserved=%p)",
      4, (creds, target, args, reserved));
  GPR_ASSERT(reserved == NULL);

  if (grpc_find_security_connector_in_args(args) != NULL) {
    gpr_log(GPR_ERROR, "Cannot set security context in channel args.");
    grpc_exec_ctx_finish(&exec_ctx);
    return grpc_lame_client_channel_create(
        target, GRPC_STATUS_INTERNAL,
        "Security connector exists in channel args.");
  }

  if (grpc_channel_credentials_create_security_connector(
          creds, target, args, &security_connector, &new_args_from_connector) !=
      GRPC_SECURITY_OK) {
    grpc_exec_ctx_finish(&exec_ctx);
    return grpc_lame_client_channel_create(
        target, GRPC_STATUS_INTERNAL, "Failed to create security connector.");
  }

  connector_arg = grpc_security_connector_to_arg(&security_connector->base);
  args_copy = grpc_channel_args_copy_and_add(
      new_args_from_connector != NULL ? new_args_from_connector : args,
      &connector_arg, 1);

  f = gpr_malloc(sizeof(*f));
  memset(f, 0, sizeof(*f));
  f->base.vtable = &client_channel_factory_vtable;
  gpr_ref_init(&f->refs, 1);

  f->merge_args = grpc_channel_args_copy(args_copy);
  grpc_channel_args_destroy(args_copy);
  if (new_args_from_connector != NULL) {
    grpc_channel_args_destroy(new_args_from_connector);
  }

  GRPC_SECURITY_CONNECTOR_REF(&security_connector->base,
                              "grpc_secure_channel_create");
  f->security_connector = security_connector;

  grpc_channel *channel = client_channel_factory_create_channel(
      &exec_ctx, &f->base, target, GRPC_CLIENT_CHANNEL_TYPE_REGULAR, NULL);
  if (channel != NULL) {
    f->master = channel;
    GRPC_CHANNEL_INTERNAL_REF(f->master, "grpc_secure_channel_create");
  }

  grpc_client_channel_factory_unref(&exec_ctx, &f->base);
  grpc_exec_ctx_finish(&exec_ctx);

  return channel; /* may be NULL */
}
Beispiel #16
0
static void test2(void) {
  gpr_log(GPR_INFO, "test2");

  grpc_timer_heap pq;

  static const size_t elems_size = 1000;
  elem_struct *elems = gpr_malloc(elems_size * sizeof(elem_struct));
  size_t num_inserted = 0;

  grpc_timer_heap_init(&pq);
  memset(elems, 0, elems_size);

  for (size_t round = 0; round < 10000; round++) {
    int r = rand() % 1000;
    if (r <= 550) {
      /* 55% of the time we try to add something */
      elem_struct *el = search_elems(elems, GPR_ARRAY_SIZE(elems), false);
      if (el != NULL) {
        el->elem.deadline = random_deadline();
        grpc_timer_heap_add(&pq, &el->elem);
        el->inserted = true;
        num_inserted++;
        check_valid(&pq);
      }
    } else if (r <= 650) {
      /* 10% of the time we try to remove something */
      elem_struct *el = search_elems(elems, GPR_ARRAY_SIZE(elems), true);
      if (el != NULL) {
        grpc_timer_heap_remove(&pq, &el->elem);
        el->inserted = false;
        num_inserted--;
        check_valid(&pq);
      }
    } else {
      /* the remaining times we pop */
      if (num_inserted > 0) {
        grpc_timer *top = grpc_timer_heap_top(&pq);
        grpc_timer_heap_pop(&pq);
        for (size_t i = 0; i < elems_size; i++) {
          if (top == &elems[i].elem) {
            GPR_ASSERT(elems[i].inserted);
            elems[i].inserted = false;
          }
        }
        num_inserted--;
        check_valid(&pq);
      }
    }

    if (num_inserted) {
      gpr_timespec *min_deadline = NULL;
      for (size_t i = 0; i < elems_size; i++) {
        if (elems[i].inserted) {
          if (min_deadline == NULL) {
            min_deadline = &elems[i].elem.deadline;
          } else {
            if (gpr_time_cmp(elems[i].elem.deadline, *min_deadline) < 0) {
              min_deadline = &elems[i].elem.deadline;
            }
          }
        }
      }
      GPR_ASSERT(
          0 == gpr_time_cmp(grpc_timer_heap_top(&pq)->deadline, *min_deadline));
    }
  }

  grpc_timer_heap_destroy(&pq);
  gpr_free(elems);
}
Beispiel #17
0
/* Initiates a write. */
static grpc_endpoint_op_status win_write(grpc_endpoint *ep,
                                         gpr_slice_buffer *slices,
                                         grpc_iomgr_closure *cb) {
  grpc_tcp *tcp = (grpc_tcp *)ep;
  grpc_winsocket *socket = tcp->socket;
  grpc_winsocket_callback_info *info = &socket->write_info;
  unsigned i;
  DWORD bytes_sent;
  int status;
  WSABUF local_buffers[16];
  WSABUF *allocated = NULL;
  WSABUF *buffers = local_buffers;

  GPR_ASSERT(!tcp->socket->write_info.outstanding);
  if (tcp->shutting_down) {
    return GRPC_ENDPOINT_ERROR;
  }
  TCP_REF(tcp, "write");

  tcp->socket->write_info.outstanding = 1;
  tcp->write_cb = cb;
  tcp->write_slices = slices;

  if (tcp->write_slices->count > GPR_ARRAY_SIZE(local_buffers)) {
    buffers = (WSABUF *)gpr_malloc(sizeof(WSABUF) * tcp->write_slices->count);
    allocated = buffers;
  }

  for (i = 0; i < tcp->write_slices->count; i++) {
    buffers[i].len = GPR_SLICE_LENGTH(tcp->write_slices->slices[i]);
    buffers[i].buf = (char *)GPR_SLICE_START_PTR(tcp->write_slices->slices[i]);
  }

  /* First, let's try a synchronous, non-blocking write. */
  status = WSASend(socket->socket, buffers, tcp->write_slices->count,
                   &bytes_sent, 0, NULL, NULL);
  info->wsa_error = status == 0 ? 0 : WSAGetLastError();

  /* We would kind of expect to get a WSAEWOULDBLOCK here, especially on a busy
     connection that has its send queue filled up. But if we don't, then we can
     avoid doing an async write operation at all. */
  if (info->wsa_error != WSAEWOULDBLOCK) {
    grpc_endpoint_op_status ret = GRPC_ENDPOINT_ERROR;
    if (status == 0) {
      ret = GRPC_ENDPOINT_DONE;
      GPR_ASSERT(bytes_sent == tcp->write_slices->length);
    } else {
      if (socket->read_info.wsa_error != WSAECONNRESET) {
        char *utf8_message = gpr_format_message(info->wsa_error);
        gpr_log(GPR_ERROR, "WSASend error: %s", utf8_message);
        gpr_free(utf8_message);
      }
    }
    if (allocated) gpr_free(allocated);
    tcp->socket->write_info.outstanding = 0;
    TCP_UNREF(tcp, "write");
    return ret;
  }

  /* If we got a WSAEWOULDBLOCK earlier, then we need to re-do the same
     operation, this time asynchronously. */
  memset(&socket->write_info.overlapped, 0, sizeof(OVERLAPPED));
  status = WSASend(socket->socket, buffers, tcp->write_slices->count,
                   &bytes_sent, 0, &socket->write_info.overlapped, NULL);
  if (allocated) gpr_free(allocated);

  if (status != 0) {
    int wsa_error = WSAGetLastError();
    if (wsa_error != WSA_IO_PENDING) {
      tcp->socket->write_info.outstanding = 0;
      TCP_UNREF(tcp, "write");
      return GRPC_ENDPOINT_ERROR;
    }
  }

  /* As all is now setup, we can now ask for the IOCP notification. It may
     trigger the callback immediately however, but no matter. */
  grpc_socket_notify_on_write(socket, on_write, tcp);
  return GRPC_ENDPOINT_PENDING;
}
Beispiel #18
0
static void on_read(void *user_data, gpr_slice *slices, size_t nslices,
                    grpc_endpoint_cb_status error) {
  unsigned i;
  gpr_uint8 keep_looping = 0;
  size_t input_buffer_count = 0;
  tsi_result result = TSI_OK;
  secure_endpoint *ep = (secure_endpoint *)user_data;
  gpr_uint8 *cur = GPR_SLICE_START_PTR(ep->read_staging_buffer);
  gpr_uint8 *end = GPR_SLICE_END_PTR(ep->read_staging_buffer);

  /* TODO(yangg) check error, maybe bail out early */
  for (i = 0; i < nslices; i++) {
    gpr_slice encrypted = slices[i];
    gpr_uint8 *message_bytes = GPR_SLICE_START_PTR(encrypted);
    size_t message_size = GPR_SLICE_LENGTH(encrypted);

    while (message_size > 0 || keep_looping) {
      size_t unprotected_buffer_size_written = (size_t)(end - cur);
      size_t processed_message_size = message_size;
      gpr_mu_lock(&ep->protector_mu);
      result = tsi_frame_protector_unprotect(ep->protector, message_bytes,
                                             &processed_message_size, cur,
                                             &unprotected_buffer_size_written);
      gpr_mu_unlock(&ep->protector_mu);
      if (result != TSI_OK) {
        gpr_log(GPR_ERROR, "Decryption error: %s",
                tsi_result_to_string(result));
        break;
      }
      message_bytes += processed_message_size;
      message_size -= processed_message_size;
      cur += unprotected_buffer_size_written;

      if (cur == end) {
        flush_read_staging_buffer(ep, &cur, &end);
        /* Force to enter the loop again to extract buffered bytes in protector.
           The bytes could be buffered because of running out of staging_buffer.
           If this happens at the end of all slices, doing another unprotect
           avoids leaving data in the protector. */
        keep_looping = 1;
      } else if (unprotected_buffer_size_written > 0) {
        keep_looping = 1;
      } else {
        keep_looping = 0;
      }
    }
    if (result != TSI_OK) break;
  }

  if (cur != GPR_SLICE_START_PTR(ep->read_staging_buffer)) {
    gpr_slice_buffer_add(
        &ep->input_buffer,
        gpr_slice_split_head(
            &ep->read_staging_buffer,
            (size_t)(cur - GPR_SLICE_START_PTR(ep->read_staging_buffer))));
  }

  /* TODO(yangg) experiment with moving this block after read_cb to see if it
     helps latency */
  for (i = 0; i < nslices; i++) {
    gpr_slice_unref(slices[i]);
  }

  if (result != TSI_OK) {
    gpr_slice_buffer_reset_and_unref(&ep->input_buffer);
    call_read_cb(ep, NULL, 0, GRPC_ENDPOINT_CB_ERROR);
    return;
  }
  /* The upper level will unref the slices. */
  input_buffer_count = ep->input_buffer.count;
  ep->input_buffer.count = 0;
  call_read_cb(ep, ep->input_buffer.slices, input_buffer_count, error);
}
static void multipoll_with_epoll_pollset_maybe_work(
    grpc_pollset *pollset, grpc_pollset_worker *worker, gpr_timespec deadline,
    gpr_timespec now, int allow_synchronous_callback) {
  struct epoll_event ep_ev[GRPC_EPOLL_MAX_EVENTS];
  int ep_rv;
  int poll_rv;
  pollset_hdr *h = pollset->data.ptr;
  int timeout_ms;
  struct pollfd pfds[2];

  /* If you want to ignore epoll's ability to sanely handle parallel pollers,
   * for a more apples-to-apples performance comparison with poll, add a
   * if (pollset->counter != 0) { return 0; }
   * here.
   */

  gpr_mu_unlock(&pollset->mu);

  timeout_ms = grpc_poll_deadline_to_millis_timeout(deadline, now);

  pfds[0].fd = GRPC_WAKEUP_FD_GET_READ_FD(&worker->wakeup_fd);
  pfds[0].events = POLLIN;
  pfds[0].revents = 0;
  pfds[1].fd = h->epoll_fd;
  pfds[1].events = POLLIN;
  pfds[1].revents = 0;

  poll_rv = grpc_poll_function(pfds, 2, timeout_ms);

  if (poll_rv < 0) {
    if (errno != EINTR) {
      gpr_log(GPR_ERROR, "poll() failed: %s", strerror(errno));
    }
  } else if (poll_rv == 0) {
    /* do nothing */
  } else {
    if (pfds[0].revents) {
      grpc_wakeup_fd_consume_wakeup(&worker->wakeup_fd);
    }
    if (pfds[1].revents) {
      do {
        ep_rv = epoll_wait(h->epoll_fd, ep_ev, GRPC_EPOLL_MAX_EVENTS, 0);
        if (ep_rv < 0) {
          if (errno != EINTR) {
            gpr_log(GPR_ERROR, "epoll_wait() failed: %s", strerror(errno));
          }
        } else {
          int i;
          for (i = 0; i < ep_rv; ++i) {
            grpc_fd *fd = ep_ev[i].data.ptr;
            /* TODO(klempner): We might want to consider making err and pri
             * separate events */
            int cancel = ep_ev[i].events & (EPOLLERR | EPOLLHUP);
            int read = ep_ev[i].events & (EPOLLIN | EPOLLPRI);
            int write = ep_ev[i].events & EPOLLOUT;
            if (read || cancel) {
              grpc_fd_become_readable(fd, allow_synchronous_callback);
            }
            if (write || cancel) {
              grpc_fd_become_writable(fd, allow_synchronous_callback);
            }
          }
        }
      } while (ep_rv == GRPC_EPOLL_MAX_EVENTS);
    }
  }

  gpr_mu_lock(&pollset->mu);
}
Beispiel #20
0
static grpc_endpoint_write_status endpoint_write(grpc_endpoint *secure_ep,
                                                 gpr_slice *slices,
                                                 size_t nslices,
                                                 grpc_endpoint_write_cb cb,
                                                 void *user_data) {
  unsigned i;
  size_t output_buffer_count = 0;
  tsi_result result = TSI_OK;
  secure_endpoint *ep = (secure_endpoint *)secure_ep;
  gpr_uint8 *cur = GPR_SLICE_START_PTR(ep->write_staging_buffer);
  gpr_uint8 *end = GPR_SLICE_END_PTR(ep->write_staging_buffer);
  grpc_endpoint_write_status status;
  GPR_ASSERT(ep->output_buffer.count == 0);

  if (grpc_trace_secure_endpoint) {
    for (i = 0; i < nslices; i++) {
      char *data = gpr_dump_slice(slices[i], GPR_DUMP_HEX | GPR_DUMP_ASCII);
      gpr_log(GPR_DEBUG, "WRITE %p: %s", ep, data);
      gpr_free(data);
    }
  }

  for (i = 0; i < nslices; i++) {
    gpr_slice plain = slices[i];
    gpr_uint8 *message_bytes = GPR_SLICE_START_PTR(plain);
    size_t message_size = GPR_SLICE_LENGTH(plain);
    while (message_size > 0) {
      size_t protected_buffer_size_to_send = (size_t)(end - cur);
      size_t processed_message_size = message_size;
      gpr_mu_lock(&ep->protector_mu);
      result = tsi_frame_protector_protect(ep->protector, message_bytes,
                                           &processed_message_size, cur,
                                           &protected_buffer_size_to_send);
      gpr_mu_unlock(&ep->protector_mu);
      if (result != TSI_OK) {
        gpr_log(GPR_ERROR, "Encryption error: %s",
                tsi_result_to_string(result));
        break;
      }
      message_bytes += processed_message_size;
      message_size -= processed_message_size;
      cur += protected_buffer_size_to_send;

      if (cur == end) {
        flush_write_staging_buffer(ep, &cur, &end);
      }
    }
    if (result != TSI_OK) break;
  }
  if (result == TSI_OK) {
    size_t still_pending_size;
    do {
      size_t protected_buffer_size_to_send = (size_t)(end - cur);
      gpr_mu_lock(&ep->protector_mu);
      result = tsi_frame_protector_protect_flush(ep->protector, cur,
                                                 &protected_buffer_size_to_send,
                                                 &still_pending_size);
      gpr_mu_unlock(&ep->protector_mu);
      if (result != TSI_OK) break;
      cur += protected_buffer_size_to_send;
      if (cur == end) {
        flush_write_staging_buffer(ep, &cur, &end);
      }
    } while (still_pending_size > 0);
    if (cur != GPR_SLICE_START_PTR(ep->write_staging_buffer)) {
      gpr_slice_buffer_add(
          &ep->output_buffer,
          gpr_slice_split_head(
              &ep->write_staging_buffer,
              (size_t)(cur - GPR_SLICE_START_PTR(ep->write_staging_buffer))));
    }
  }

  for (i = 0; i < nslices; i++) {
    gpr_slice_unref(slices[i]);
  }

  if (result != TSI_OK) {
    /* TODO(yangg) do different things according to the error type? */
    gpr_slice_buffer_reset_and_unref(&ep->output_buffer);
    return GRPC_ENDPOINT_WRITE_ERROR;
  }

  /* clear output_buffer and let the lower level handle its slices. */
  output_buffer_count = ep->output_buffer.count;
  ep->output_buffer.count = 0;
  ep->write_cb = cb;
  ep->write_user_data = user_data;
  /* Need to keep the endpoint alive across a transport */
  secure_endpoint_ref(ep);
  status = grpc_endpoint_write(ep->wrapped_ep, ep->output_buffer.slices,
                               output_buffer_count, on_write, ep);
  if (status != GRPC_ENDPOINT_WRITE_PENDING) {
    secure_endpoint_unref(ep);
  }
  return status;
}
Beispiel #21
0
void grpc_socket_notify_on_read(grpc_winsocket *socket,
                                void(*cb)(void *, int), void *opaque) {
  gpr_log(GPR_DEBUG, "grpc_socket_notify_on_read");
  socket_notify_on_iocp(socket, cb, opaque, &socket->read_info);
}
Beispiel #22
0
static grpc_mdelem *server_filter(void *user_data, grpc_mdelem *md) {
  server_filter_args *a = user_data;
  grpc_call_element *elem = a->elem;
  channel_data *channeld = elem->channel_data;
  call_data *calld = elem->call_data;

  /* Check if it is one of the headers we care about. */
  if (md == channeld->te_trailers || md == channeld->method_post ||
      md == channeld->http_scheme || md == channeld->https_scheme ||
      md == channeld->grpc_scheme || md == channeld->content_type) {
    /* swallow it */
    if (md == channeld->method_post) {
      calld->seen_post = 1;
    } else if (md->key == channeld->http_scheme->key) {
      calld->seen_scheme = 1;
    } else if (md == channeld->te_trailers) {
      calld->seen_te_trailers = 1;
    }
    /* TODO(klempner): Track that we've seen all the headers we should
       require */
    return NULL;
  } else if (md->key == channeld->content_type->key) {
    if (strncmp(grpc_mdstr_as_c_string(md->value), "application/grpc+", 17) ==
        0) {
      /* Although the C implementation doesn't (currently) generate them,
         any custom +-suffix is explicitly valid. */
      /* TODO(klempner): We should consider preallocating common values such
         as +proto or +json, or at least stashing them if we see them. */
      /* TODO(klempner): Should we be surfacing this to application code? */
    } else {
      /* TODO(klempner): We're currently allowing this, but we shouldn't
         see it without a proxy so log for now. */
      gpr_log(GPR_INFO, "Unexpected content-type %s",
              channeld->content_type->key);
    }
    return NULL;
  } else if (md->key == channeld->te_trailers->key ||
             md->key == channeld->method_post->key ||
             md->key == channeld->http_scheme->key) {
    gpr_log(GPR_ERROR, "Invalid %s: header: '%s'",
            grpc_mdstr_as_c_string(md->key), grpc_mdstr_as_c_string(md->value));
    /* swallow it and error everything out. */
    /* TODO(klempner): We ought to generate more descriptive error messages
       on the wire here. */
    grpc_call_element_send_cancel(a->exec_ctx, elem);
    return NULL;
  } else if (md->key == channeld->path_key) {
    if (calld->seen_path) {
      gpr_log(GPR_ERROR, "Received :path twice");
      return NULL;
    }
    calld->seen_path = 1;
    return md;
  } else if (md->key == channeld->authority_key) {
    calld->seen_authority = 1;
    return md;
  } else if (md->key == channeld->host_key) {
    /* translate host to :authority since :authority may be
       omitted */
    grpc_mdelem *authority = grpc_mdelem_from_metadata_strings(
        channeld->mdctx, GRPC_MDSTR_REF(channeld->authority_key),
        GRPC_MDSTR_REF(md->value));
    GRPC_MDELEM_UNREF(md);
    calld->seen_authority = 1;
    return authority;
  } else {
    return md;
  }
}
Beispiel #23
0
int main(int argc, char **argv) {
  grpc_event *ev;
  call_state *s;
  char *addr_buf = NULL;
  gpr_cmdline *cl;
  int shutdown_started = 0;
  int shutdown_finished = 0;

  int secure = 0;
  char *addr = NULL;

  char *fake_argv[1];

#define MAX_ARGS 4
  grpc_arg arge[MAX_ARGS];
  grpc_arg *e;
  grpc_channel_args args = {0, NULL};

  grpc_http_server_page home_page = {"/", "text/html",
                                     "<head>\n"
                                     "<title>Echo Server</title>\n"
                                     "</head>\n"
                                     "<body>\n"
                                     "Welcome to the world of the future!\n"
                                     "</body>\n"};

  GPR_ASSERT(argc >= 1);
  fake_argv[0] = argv[0];
  grpc_test_init(1, fake_argv);

  grpc_init();
  srand(clock());
  memset(arge, 0, sizeof(arge));
  args.args = arge;

  cl = gpr_cmdline_create("echo server");
  gpr_cmdline_add_string(cl, "bind", "Bind host:port", &addr);
  gpr_cmdline_add_flag(cl, "secure", "Run with security?", &secure);
  gpr_cmdline_parse(cl, argc, argv);
  gpr_cmdline_destroy(cl);

  e = &arge[args.num_args++];
  e->type = GRPC_ARG_POINTER;
  e->key = GRPC_ARG_SERVE_OVER_HTTP;
  e->value.pointer.p = &home_page;

  if (addr == NULL) {
    gpr_join_host_port(&addr_buf, "::", grpc_pick_unused_port_or_die());
    addr = addr_buf;
  }
  gpr_log(GPR_INFO, "creating server on: %s", addr);

  cq = grpc_completion_queue_create();
  if (secure) {
    grpc_ssl_pem_key_cert_pair pem_key_cert_pair = {test_server1_key,
                                                    test_server1_cert};
    grpc_server_credentials *ssl_creds =
        grpc_ssl_server_credentials_create(NULL, &pem_key_cert_pair, 1);
    server = grpc_server_create(cq, &args);
    GPR_ASSERT(grpc_server_add_secure_http2_port(server, addr, ssl_creds));
    grpc_server_credentials_release(ssl_creds);
  } else {
    server = grpc_server_create(cq, &args);
    GPR_ASSERT(grpc_server_add_http2_port(server, addr));
  }
  grpc_server_start(server);

  gpr_free(addr_buf);
  addr = addr_buf = NULL;

  request_call();

  signal(SIGINT, sigint_handler);
  while (!shutdown_finished) {
    if (got_sigint && !shutdown_started) {
      gpr_log(GPR_INFO, "Shutting down due to SIGINT");
      grpc_server_shutdown(server);
      grpc_completion_queue_shutdown(cq);
      shutdown_started = 1;
    }
    ev = grpc_completion_queue_next(
        cq, gpr_time_add(gpr_now(), gpr_time_from_seconds(1)));
    if (!ev) continue;
    s = ev->tag;
    switch (ev->type) {
      case GRPC_SERVER_RPC_NEW:
        if (ev->call != NULL) {
          /* initial ops are already started in request_call */
          grpc_call_server_accept_old(ev->call, cq, s);
          grpc_call_server_end_initial_metadata_old(ev->call,
                                                    GRPC_WRITE_BUFFER_HINT);
          GPR_ASSERT(grpc_call_start_read_old(ev->call, s) == GRPC_CALL_OK);
          request_call();
        } else {
          GPR_ASSERT(shutdown_started);
          gpr_free(s);
        }
        break;
      case GRPC_WRITE_ACCEPTED:
        GPR_ASSERT(ev->data.write_accepted == GRPC_OP_OK);
        GPR_ASSERT(grpc_call_start_read_old(ev->call, s) == GRPC_CALL_OK);
        break;
      case GRPC_READ:
        if (ev->data.read) {
          assert_read_ok(ev->tag, ev->data.read);
          GPR_ASSERT(grpc_call_start_write_old(ev->call, ev->data.read, s,
                                               GRPC_WRITE_BUFFER_HINT) ==
                     GRPC_CALL_OK);
        } else {
          GPR_ASSERT(grpc_call_start_write_status_old(ev->call, GRPC_STATUS_OK,
                                                      NULL, s) == GRPC_CALL_OK);
        }
        break;
      case GRPC_FINISH_ACCEPTED:
      case GRPC_FINISHED:
        if (gpr_unref(&s->pending_ops)) {
          grpc_call_destroy(ev->call);
          gpr_free(s);
        }
        break;
      case GRPC_QUEUE_SHUTDOWN:
        GPR_ASSERT(shutdown_started);
        shutdown_finished = 1;
        break;
      default:
        GPR_ASSERT(0);
    }
    grpc_event_finish(ev);
  }

  grpc_server_destroy(server);
  grpc_completion_queue_destroy(cq);
  grpc_shutdown();

  return 0;
}
Beispiel #24
0
static void on_initial_header(grpc_exec_ctx *exec_ctx, void *tp,
                              grpc_mdelem *md) {
  grpc_chttp2_transport *t = tp;
  grpc_chttp2_stream *s = t->incoming_stream;

  GPR_TIMER_BEGIN("on_initial_header", 0);

  GPR_ASSERT(s != NULL);

  GRPC_CHTTP2_IF_TRACING(gpr_log(
      GPR_INFO, "HTTP:%d:HDR:%s: %s: %s", s->id, t->is_client ? "CLI" : "SVR",
      grpc_mdstr_as_c_string(md->key), grpc_mdstr_as_c_string(md->value)));

  if (md->key == GRPC_MDSTR_GRPC_STATUS && md != GRPC_MDELEM_GRPC_STATUS_0) {
    /* TODO(ctiller): check for a status like " 0" */
    s->seen_error = true;
  }

  if (md->key == GRPC_MDSTR_GRPC_TIMEOUT) {
    gpr_timespec *cached_timeout = grpc_mdelem_get_user_data(md, free_timeout);
    if (!cached_timeout) {
      /* not already parsed: parse it now, and store the result away */
      cached_timeout = gpr_malloc(sizeof(gpr_timespec));
      if (!grpc_http2_decode_timeout(grpc_mdstr_as_c_string(md->value),
                                     cached_timeout)) {
        gpr_log(GPR_ERROR, "Ignoring bad timeout value '%s'",
                grpc_mdstr_as_c_string(md->value));
        *cached_timeout = gpr_inf_future(GPR_TIMESPAN);
      }
      cached_timeout =
          grpc_mdelem_set_user_data(md, free_timeout, cached_timeout);
    }
    grpc_chttp2_incoming_metadata_buffer_set_deadline(
        &s->metadata_buffer[0],
        gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), *cached_timeout));
    GRPC_MDELEM_UNREF(md);
  } else {
    const size_t new_size = s->metadata_buffer[0].size + GRPC_MDELEM_LENGTH(md);
    const size_t metadata_size_limit =
        t->settings[GRPC_ACKED_SETTINGS]
                   [GRPC_CHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE];
    if (new_size > metadata_size_limit) {
      gpr_log(GPR_DEBUG,
              "received initial metadata size exceeds limit (%" PRIuPTR
              " vs. %" PRIuPTR ")",
              new_size, metadata_size_limit);
      grpc_chttp2_cancel_stream(
          exec_ctx, t, s,
          grpc_error_set_int(
              GRPC_ERROR_CREATE("received initial metadata size exceeds limit"),
              GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_RESOURCE_EXHAUSTED));
      grpc_chttp2_parsing_become_skip_parser(exec_ctx, t);
      s->seen_error = true;
      GRPC_MDELEM_UNREF(md);
    } else {
      grpc_chttp2_incoming_metadata_buffer_add(&s->metadata_buffer[0], md);
    }
  }

  GPR_TIMER_END("on_initial_header", 0);
}
Beispiel #25
0
static void dump_objects(const char *kind) {
  grpc_iomgr_object *obj;
  for (obj = g_root_object.next; obj != &g_root_object; obj = obj->next) {
    gpr_log(GPR_DEBUG, "%s OBJECT: %s %p", kind, obj->name, obj);
  }
}
Beispiel #26
0
static grpc_error *init_header_frame_parser(grpc_exec_ctx *exec_ctx,
                                            grpc_chttp2_transport *t,
                                            int is_continuation) {
  uint8_t is_eoh =
      (t->incoming_frame_flags & GRPC_CHTTP2_DATA_FLAG_END_HEADERS) != 0;
  grpc_chttp2_stream *s;

  /* TODO(ctiller): when to increment header_frames_received? */

  if (is_eoh) {
    t->expect_continuation_stream_id = 0;
  } else {
    t->expect_continuation_stream_id = t->incoming_stream_id;
  }

  if (!is_continuation) {
    t->header_eof =
        (t->incoming_frame_flags & GRPC_CHTTP2_DATA_FLAG_END_STREAM) != 0;
  }

  /* could be a new grpc_chttp2_stream or an existing grpc_chttp2_stream */
  s = grpc_chttp2_parsing_lookup_stream(t, t->incoming_stream_id);
  if (s == NULL) {
    if (is_continuation) {
      GRPC_CHTTP2_IF_TRACING(
          gpr_log(GPR_ERROR,
                  "grpc_chttp2_stream disbanded before CONTINUATION received"));
      return init_skip_frame_parser(exec_ctx, t, 1);
    }
    if (t->is_client) {
      if ((t->incoming_stream_id & 1) &&
          t->incoming_stream_id < t->next_stream_id) {
        /* this is an old (probably cancelled) grpc_chttp2_stream */
      } else {
        GRPC_CHTTP2_IF_TRACING(gpr_log(
            GPR_ERROR, "ignoring new grpc_chttp2_stream creation on client"));
      }
      return init_skip_frame_parser(exec_ctx, t, 1);
    } else if (t->last_new_stream_id >= t->incoming_stream_id) {
      GRPC_CHTTP2_IF_TRACING(gpr_log(
          GPR_ERROR,
          "ignoring out of order new grpc_chttp2_stream request on server; "
          "last grpc_chttp2_stream "
          "id=%d, new grpc_chttp2_stream id=%d",
          t->last_new_stream_id, t->incoming_stream_id));
      return init_skip_frame_parser(exec_ctx, t, 1);
    } else if ((t->incoming_stream_id & 1) == 0) {
      GRPC_CHTTP2_IF_TRACING(gpr_log(
          GPR_ERROR,
          "ignoring grpc_chttp2_stream with non-client generated index %d",
          t->incoming_stream_id));
      return init_skip_frame_parser(exec_ctx, t, 1);
    }
    t->last_new_stream_id = t->incoming_stream_id;
    s = t->incoming_stream =
        grpc_chttp2_parsing_accept_stream(exec_ctx, t, t->incoming_stream_id);
    if (s == NULL) {
      GRPC_CHTTP2_IF_TRACING(
          gpr_log(GPR_ERROR, "grpc_chttp2_stream not accepted"));
      return init_skip_frame_parser(exec_ctx, t, 1);
    }
  } else {
    t->incoming_stream = s;
  }
  GPR_ASSERT(s != NULL);
  s->stats.incoming.framing_bytes += 9;
  if (s->read_closed) {
    GRPC_CHTTP2_IF_TRACING(gpr_log(
        GPR_ERROR, "skipping already closed grpc_chttp2_stream header"));
    t->incoming_stream = NULL;
    return init_skip_frame_parser(exec_ctx, t, 1);
  }
  t->parser = grpc_chttp2_header_parser_parse;
  t->parser_data = &t->hpack_parser;
  switch (s->header_frames_received) {
    case 0:
      t->hpack_parser.on_header = on_initial_header;
      break;
    case 1:
      t->hpack_parser.on_header = on_trailing_header;
      break;
    case 2:
      gpr_log(GPR_ERROR, "too many header frames received");
      return init_skip_frame_parser(exec_ctx, t, 1);
  }
  t->hpack_parser.on_header_user_data = t;
  t->hpack_parser.is_boundary = is_eoh;
  t->hpack_parser.is_eof = (uint8_t)(is_eoh ? t->header_eof : 0);
  if (!is_continuation &&
      (t->incoming_frame_flags & GRPC_CHTTP2_FLAG_HAS_PRIORITY)) {
    grpc_chttp2_hpack_parser_set_has_priority(&t->hpack_parser);
  }
  return GRPC_ERROR_NONE;
}
void test_times_out(void) {
  struct sockaddr_in addr;
  socklen_t addr_len = sizeof(addr);
  int svr_fd;
#define NUM_CLIENT_CONNECTS 100
  int client_fd[NUM_CLIENT_CONNECTS];
  int i;
  int r;
  int connections_complete_before;
  gpr_timespec connect_deadline;
  grpc_closure done;
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;

  gpr_log(GPR_DEBUG, "test_times_out");

  memset(&addr, 0, sizeof(addr));
  addr.sin_family = AF_INET;

  /* create a dummy server */
  svr_fd = socket(AF_INET, SOCK_STREAM, 0);
  GPR_ASSERT(svr_fd >= 0);
  GPR_ASSERT(0 == bind(svr_fd, (struct sockaddr *)&addr, addr_len));
  GPR_ASSERT(0 == listen(svr_fd, 1));
  /* Get its address */
  GPR_ASSERT(getsockname(svr_fd, (struct sockaddr *)&addr, &addr_len) == 0);

  /* tie up the listen buffer, which is somewhat arbitrarily sized. */
  for (i = 0; i < NUM_CLIENT_CONNECTS; ++i) {
    client_fd[i] = socket(AF_INET, SOCK_STREAM, 0);
    grpc_set_socket_nonblocking(client_fd[i], 1);
    do {
      r = connect(client_fd[i], (struct sockaddr *)&addr, addr_len);
    } while (r == -1 && errno == EINTR);
    GPR_ASSERT(r < 0);
    GPR_ASSERT(errno == EWOULDBLOCK || errno == EINPROGRESS);
  }

  /* connect to dummy server address */

  connect_deadline = GRPC_TIMEOUT_SECONDS_TO_DEADLINE(1);

  gpr_mu_lock(g_mu);
  connections_complete_before = g_connections_complete;
  gpr_mu_unlock(g_mu);

  grpc_closure_init(&done, must_fail, NULL);
  grpc_tcp_client_connect(&exec_ctx, &done, &g_connecting, g_pollset_set,
                          (struct sockaddr *)&addr, addr_len, connect_deadline);

  /* Make sure the event doesn't trigger early */
  gpr_mu_lock(g_mu);
  for (;;) {
    grpc_pollset_worker *worker = NULL;
    gpr_timespec now = gpr_now(connect_deadline.clock_type);
    gpr_timespec continue_verifying_time =
        gpr_time_from_seconds(5, GPR_TIMESPAN);
    gpr_timespec grace_time = gpr_time_from_seconds(3, GPR_TIMESPAN);
    gpr_timespec finish_time =
        gpr_time_add(connect_deadline, continue_verifying_time);
    gpr_timespec restart_verifying_time =
        gpr_time_add(connect_deadline, grace_time);
    int is_after_deadline = gpr_time_cmp(now, connect_deadline) > 0;
    if (gpr_time_cmp(now, finish_time) > 0) {
      break;
    }
    gpr_log(GPR_DEBUG, "now=%lld.%09d connect_deadline=%lld.%09d",
            (long long)now.tv_sec, (int)now.tv_nsec,
            (long long)connect_deadline.tv_sec, (int)connect_deadline.tv_nsec);
    if (is_after_deadline && gpr_time_cmp(now, restart_verifying_time) <= 0) {
      /* allow some slack before insisting that things be done */
    } else {
      GPR_ASSERT(g_connections_complete ==
                 connections_complete_before + is_after_deadline);
    }
    gpr_timespec polling_deadline = GRPC_TIMEOUT_MILLIS_TO_DEADLINE(10);
    if (!grpc_timer_check(&exec_ctx, now, &polling_deadline)) {
      grpc_pollset_work(&exec_ctx, g_pollset, &worker, now, polling_deadline);
    }
    gpr_mu_unlock(g_mu);
    grpc_exec_ctx_flush(&exec_ctx);
    gpr_mu_lock(g_mu);
  }
  gpr_mu_unlock(g_mu);

  grpc_exec_ctx_finish(&exec_ctx);

  close(svr_fd);
  for (i = 0; i < NUM_CLIENT_CONNECTS; ++i) {
    close(client_fd[i]);
  }
}
Beispiel #28
0
static void simple_request_body(grpc_end2end_test_fixture f) {
  grpc_call *c;
  grpc_call *s;
  gpr_timespec deadline = five_seconds_time();
  cq_verifier *cqv = cq_verifier_create(f.cq);
  grpc_op ops[6];
  grpc_op *op;
  grpc_metadata_array initial_metadata_recv;
  grpc_metadata_array trailing_metadata_recv;
  grpc_metadata_array request_metadata_recv;
  grpc_call_details call_details;
  grpc_status_code status;
  grpc_call_error error;
  char *details = NULL;
  size_t details_capacity = 0;
  int was_cancelled = 2;
  char *peer;

  c = grpc_channel_create_call(f.client, NULL, GRPC_PROPAGATE_DEFAULTS, f.cq,
                               "/foo", NULL, deadline, NULL);
  GPR_ASSERT(c);

  peer = grpc_call_get_peer(c);
  GPR_ASSERT(peer != NULL);
  gpr_log(GPR_DEBUG, "client_peer_before_call=%s", peer);
  gpr_free(peer);

  grpc_metadata_array_init(&initial_metadata_recv);
  grpc_metadata_array_init(&trailing_metadata_recv);
  grpc_metadata_array_init(&request_metadata_recv);
  grpc_call_details_init(&call_details);

  memset(ops, 0, sizeof(ops));
  op = ops;
  op->op = GRPC_OP_SEND_INITIAL_METADATA;
  op->data.send_initial_metadata.count = 0;
  op->flags = 0;
  op->reserved = NULL;
  op++;
  op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
  op->flags = 0;
  op->reserved = NULL;
  op++;
  op->op = GRPC_OP_RECV_INITIAL_METADATA;
  op->data.recv_initial_metadata = &initial_metadata_recv;
  op->flags = 0;
  op->reserved = NULL;
  op++;
  op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
  op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv;
  op->data.recv_status_on_client.status = &status;
  op->data.recv_status_on_client.status_details = &details;
  op->data.recv_status_on_client.status_details_capacity = &details_capacity;
  op->flags = 0;
  op->reserved = NULL;
  op++;
  error = grpc_call_start_batch(c, ops, (size_t)(op - ops), tag(1), NULL);
  GPR_ASSERT(error == GRPC_CALL_OK);

  error =
      grpc_server_request_call(f.server, &s, &call_details,
                               &request_metadata_recv, f.cq, f.cq, tag(101));
  GPR_ASSERT(error == GRPC_CALL_OK);
  cq_expect_completion(cqv, tag(101), 1);
  cq_verify(cqv);

  peer = grpc_call_get_peer(s);
  GPR_ASSERT(peer != NULL);
  gpr_log(GPR_DEBUG, "server_peer=%s", peer);
  gpr_free(peer);
  peer = grpc_call_get_peer(c);
  GPR_ASSERT(peer != NULL);
  gpr_log(GPR_DEBUG, "client_peer=%s", peer);
  gpr_free(peer);

  memset(ops, 0, sizeof(ops));
  op = ops;
  op->op = GRPC_OP_SEND_INITIAL_METADATA;
  op->data.send_initial_metadata.count = 0;
  op->flags = 0;
  op->reserved = NULL;
  op++;
  op->op = GRPC_OP_SEND_STATUS_FROM_SERVER;
  op->data.send_status_from_server.trailing_metadata_count = 0;
  op->data.send_status_from_server.status = GRPC_STATUS_UNIMPLEMENTED;
  op->data.send_status_from_server.status_details = "xyz";
  op->flags = 0;
  op->reserved = NULL;
  op++;
  op->op = GRPC_OP_RECV_CLOSE_ON_SERVER;
  op->data.recv_close_on_server.cancelled = &was_cancelled;
  op->flags = 0;
  op->reserved = NULL;
  op++;
  error = grpc_call_start_batch(s, ops, (size_t)(op - ops), tag(102), NULL);
  GPR_ASSERT(error == GRPC_CALL_OK);

  cq_expect_completion(cqv, tag(102), 1);
  cq_expect_completion(cqv, tag(1), 1);
  cq_verify(cqv);

  GPR_ASSERT(status == GRPC_STATUS_UNIMPLEMENTED);
  GPR_ASSERT(0 == strcmp(details, "xyz"));
  GPR_ASSERT(0 == strcmp(call_details.method, "/foo"));
  GPR_ASSERT(0 == strncmp(call_details.host, "localhost", 9));
  GPR_ASSERT(was_cancelled == 1);

  gpr_free(details);
  grpc_metadata_array_destroy(&initial_metadata_recv);
  grpc_metadata_array_destroy(&trailing_metadata_recv);
  grpc_metadata_array_destroy(&request_metadata_recv);
  grpc_call_details_destroy(&call_details);

  grpc_call_destroy(c);
  grpc_call_destroy(s);

  cq_verifier_destroy(cqv);
}
static int multipoll_with_poll_pollset_maybe_work(
    grpc_pollset *pollset, gpr_timespec deadline, gpr_timespec now,
    int allow_synchronous_callback) {
  int timeout;
  int r;
  size_t i, np, nf, nd;
  pollset_hdr *h;
  grpc_kick_fd_info *kfd;

  h = pollset->data.ptr;
  timeout = grpc_poll_deadline_to_millis_timeout(deadline, now);
  if (h->pfd_capacity < h->fd_count + 1) {
    h->pfd_capacity = GPR_MAX(h->pfd_capacity * 3 / 2, h->fd_count + 1);
    gpr_free(h->pfds);
    gpr_free(h->watchers);
    h->pfds = gpr_malloc(sizeof(struct pollfd) * h->pfd_capacity);
    h->watchers = gpr_malloc(sizeof(grpc_fd_watcher) * h->pfd_capacity);
  }
  nf = 0;
  np = 1;
  kfd = grpc_pollset_kick_pre_poll(&pollset->kick_state);
  if (kfd == NULL) {
    /* Already kicked */
    return 1;
  }
  h->pfds[0].fd = GRPC_POLLSET_KICK_GET_FD(kfd);
  h->pfds[0].events = POLLIN;
  h->pfds[0].revents = POLLOUT;
  for (i = 0; i < h->fd_count; i++) {
    int remove = grpc_fd_is_orphaned(h->fds[i]);
    for (nd = 0; nd < h->del_count; nd++) {
      if (h->fds[i] == h->dels[nd]) remove = 1;
    }
    if (remove) {
      GRPC_FD_UNREF(h->fds[i], "multipoller");
    } else {
      h->fds[nf++] = h->fds[i];
      h->watchers[np].fd = h->fds[i];
      h->pfds[np].fd = h->fds[i]->fd;
      h->pfds[np].revents = 0;
      np++;
    }
  }
  h->pfd_count = np;
  h->fd_count = nf;
  for (nd = 0; nd < h->del_count; nd++) {
    GRPC_FD_UNREF(h->dels[nd], "multipoller_del");
  }
  h->del_count = 0;
  if (h->pfd_count == 0) {
    end_polling(pollset);
    return 0;
  }
  pollset->counter++;
  gpr_mu_unlock(&pollset->mu);

  for (i = 1; i < np; i++) {
    h->pfds[i].events = grpc_fd_begin_poll(h->watchers[i].fd, pollset, POLLIN,
                                           POLLOUT, &h->watchers[i]);
  }

  r = poll(h->pfds, h->pfd_count, timeout);

  end_polling(pollset);

  if (r < 0) {
    if (errno != EINTR) {
      gpr_log(GPR_ERROR, "poll() failed: %s", strerror(errno));
    }
  } else if (r == 0) {
    /* do nothing */
  } else {
    if (h->pfds[0].revents & POLLIN) {
      grpc_pollset_kick_consume(&pollset->kick_state, kfd);
    }
    for (i = 1; i < np; i++) {
      if (h->pfds[i].revents & (POLLIN | POLLHUP | POLLERR)) {
        grpc_fd_become_readable(h->watchers[i].fd, allow_synchronous_callback);
      }
      if (h->pfds[i].revents & (POLLOUT | POLLHUP | POLLERR)) {
        grpc_fd_become_writable(h->watchers[i].fd, allow_synchronous_callback);
      }
    }
  }
  grpc_pollset_kick_post_poll(&pollset->kick_state, kfd);

  gpr_mu_lock(&pollset->mu);
  pollset->counter--;

  return 1;
}
Beispiel #30
0
unsigned gpr_cpu_num_cores(void) {
  /* TODO(jtattermusch): implement */
  gpr_log(GPR_ERROR, "Cannot determine number of CPUs: assuming 1");
  return 1;
}