Exemple #1
0
static void destroy(grpc_exec_ctx *exec_ctx, secure_endpoint *secure_ep) {
  secure_endpoint *ep = secure_ep;
  grpc_endpoint_destroy(exec_ctx, ep->wrapped_ep);
  tsi_frame_protector_destroy(ep->protector);
  grpc_slice_buffer_destroy_internal(exec_ctx, &ep->leftover_bytes);
  grpc_slice_unref_internal(exec_ctx, ep->read_staging_buffer);
  grpc_slice_unref_internal(exec_ctx, ep->write_staging_buffer);
  grpc_slice_buffer_destroy_internal(exec_ctx, &ep->output_buffer);
  grpc_slice_buffer_destroy_internal(exec_ctx, &ep->source_buffer);
  gpr_mu_destroy(&ep->protector_mu);
  gpr_free(ep);
}
Exemple #2
0
static void me_destroy(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) {
  passthru_endpoint *p = ((half *)ep)->parent;
  gpr_mu_lock(&p->mu);
  if (0 == --p->halves) {
    gpr_mu_unlock(&p->mu);
    gpr_mu_destroy(&p->mu);
    grpc_slice_buffer_destroy_internal(exec_ctx, &p->client.read_buffer);
    grpc_slice_buffer_destroy_internal(exec_ctx, &p->server.read_buffer);
    grpc_resource_user_unref(exec_ctx, p->client.resource_user);
    grpc_resource_user_unref(exec_ctx, p->server.resource_user);
    gpr_free(p);
  } else {
    gpr_mu_unlock(&p->mu);
  }
}
Exemple #3
0
static void test_one_slice(void) {
  gpr_log(GPR_INFO, "** test_one_slice **");

  grpc_resource_quota *q = grpc_resource_quota_create("test_one_slice");
  grpc_resource_quota_resize(q, 1024);

  grpc_resource_user *usr = grpc_resource_user_create(q, "usr");

  grpc_resource_user_slice_allocator alloc;
  int num_allocs = 0;
  grpc_resource_user_slice_allocator_init(&alloc, usr, inc_int_cb, &num_allocs);

  grpc_slice_buffer buffer;
  grpc_slice_buffer_init(&buffer);

  {
    const int start_allocs = num_allocs;
    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
    grpc_resource_user_alloc_slices(&exec_ctx, &alloc, 1024, 1, &buffer);
    grpc_exec_ctx_finish(&exec_ctx);
    GPR_ASSERT(num_allocs == start_allocs + 1);
  }

  {
    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
    grpc_slice_buffer_destroy_internal(&exec_ctx, &buffer);
    grpc_exec_ctx_finish(&exec_ctx);
  }
  destroy_user(usr);
  grpc_resource_quota_unref(q);
}
Exemple #4
0
static void test_leftover(grpc_endpoint_test_config config, size_t slice_size) {
  grpc_endpoint_test_fixture f = config.create_fixture(slice_size);
  grpc_slice_buffer incoming;
  grpc_slice s =
      grpc_slice_from_copied_string("hello world 12345678900987654321");
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
  int n = 0;
  grpc_closure done_closure;
  gpr_log(GPR_INFO, "Start test left over");

  grpc_slice_buffer_init(&incoming);
  grpc_closure_init(&done_closure, inc_call_ctr, &n, grpc_schedule_on_exec_ctx);
  grpc_endpoint_read(&exec_ctx, f.client_ep, &incoming, &done_closure);
  grpc_exec_ctx_finish(&exec_ctx);
  GPR_ASSERT(n == 1);
  GPR_ASSERT(incoming.count == 1);
  GPR_ASSERT(grpc_slice_eq(s, incoming.slices[0]));

  grpc_endpoint_shutdown(&exec_ctx, f.client_ep,
                         GRPC_ERROR_CREATE("test_leftover end"));
  grpc_endpoint_shutdown(&exec_ctx, f.server_ep,
                         GRPC_ERROR_CREATE("test_leftover end"));
  grpc_endpoint_destroy(&exec_ctx, f.client_ep);
  grpc_endpoint_destroy(&exec_ctx, f.server_ep);
  grpc_exec_ctx_finish(&exec_ctx);
  grpc_slice_unref_internal(&exec_ctx, s);
  grpc_slice_buffer_destroy_internal(&exec_ctx, &incoming);

  clean_up();
}
Exemple #5
0
/* Write to a socket using the grpc_tcp API, then drain it directly.
   Note that if the write does not complete immediately we need to drain the
   socket in parallel with the read. */
static void write_test(size_t num_bytes, size_t slice_size) {
  int sv[2];
  grpc_endpoint *ep;
  struct write_socket_state state;
  size_t num_blocks;
  grpc_slice *slices;
  uint8_t current_data = 0;
  grpc_slice_buffer outgoing;
  grpc_closure write_done_closure;
  gpr_timespec deadline = grpc_timeout_seconds_to_deadline(20);
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;

  gpr_log(GPR_INFO,
          "Start write test with %" PRIuPTR " bytes, slice size %" PRIuPTR,
          num_bytes, slice_size);

  create_sockets(sv);

  grpc_resource_quota *resource_quota =
      grpc_resource_quota_create("write_test");
  ep = grpc_tcp_create(grpc_fd_create(sv[1], "write_test"), resource_quota,
                       GRPC_TCP_DEFAULT_READ_SLICE_SIZE, "test");
  grpc_resource_quota_unref_internal(&exec_ctx, resource_quota);
  grpc_endpoint_add_to_pollset(&exec_ctx, ep, g_pollset);

  state.ep = ep;
  state.write_done = 0;

  slices = allocate_blocks(num_bytes, slice_size, &num_blocks, &current_data);

  grpc_slice_buffer_init(&outgoing);
  grpc_slice_buffer_addn(&outgoing, slices, num_blocks);
  grpc_closure_init(&write_done_closure, write_done, &state,
                    grpc_schedule_on_exec_ctx);

  grpc_endpoint_write(&exec_ctx, ep, &outgoing, &write_done_closure);
  drain_socket_blocking(sv[0], num_bytes, num_bytes);
  gpr_mu_lock(g_mu);
  for (;;) {
    grpc_pollset_worker *worker = NULL;
    if (state.write_done) {
      break;
    }
    GPR_ASSERT(GRPC_LOG_IF_ERROR(
        "pollset_work",
        grpc_pollset_work(&exec_ctx, g_pollset, &worker,
                          gpr_now(GPR_CLOCK_MONOTONIC), deadline)));
    gpr_mu_unlock(g_mu);
    grpc_exec_ctx_finish(&exec_ctx);
    gpr_mu_lock(g_mu);
  }
  gpr_mu_unlock(g_mu);

  grpc_slice_buffer_destroy_internal(&exec_ctx, &outgoing);
  grpc_endpoint_destroy(&exec_ctx, ep);
  gpr_free(slices);
  grpc_exec_ctx_finish(&exec_ctx);
}
// Unref and clean up handshaker.
static void http_connect_handshaker_unref(grpc_exec_ctx* exec_ctx,
                                          http_connect_handshaker* handshaker) {
  if (gpr_unref(&handshaker->refcount)) {
    gpr_mu_destroy(&handshaker->mu);
    if (handshaker->endpoint_to_destroy != NULL) {
      grpc_endpoint_destroy(exec_ctx, handshaker->endpoint_to_destroy);
    }
    if (handshaker->read_buffer_to_destroy != NULL) {
      grpc_slice_buffer_destroy_internal(exec_ctx,
                                         handshaker->read_buffer_to_destroy);
      gpr_free(handshaker->read_buffer_to_destroy);
    }
    grpc_slice_buffer_destroy_internal(exec_ctx, &handshaker->write_buffer);
    grpc_http_parser_destroy(&handshaker->http_parser);
    grpc_http_response_destroy(&handshaker->http_response);
    gpr_free(handshaker);
  }
}
static void security_handshaker_unref(grpc_exec_ctx *exec_ctx,
                                      security_handshaker *h) {
  if (gpr_unref(&h->refs)) {
    gpr_mu_destroy(&h->mu);
    tsi_handshaker_destroy(h->handshaker);
    tsi_handshaker_result_destroy(h->handshaker_result);
    if (h->endpoint_to_destroy != NULL) {
      grpc_endpoint_destroy(exec_ctx, h->endpoint_to_destroy);
    }
    if (h->read_buffer_to_destroy != NULL) {
      grpc_slice_buffer_destroy_internal(exec_ctx, h->read_buffer_to_destroy);
      gpr_free(h->read_buffer_to_destroy);
    }
    gpr_free(h->handshake_buffer);
    grpc_slice_buffer_destroy_internal(exec_ctx, &h->outgoing);
    GRPC_AUTH_CONTEXT_UNREF(h->auth_context, "handshake");
    GRPC_SECURITY_CONNECTOR_UNREF(exec_ctx, h->connector, "handshake");
    gpr_free(h);
  }
}
Exemple #8
0
void grpc_byte_buffer_destroy(grpc_byte_buffer *bb) {
  if (!bb) return;
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
  switch (bb->type) {
    case GRPC_BB_RAW:
      grpc_slice_buffer_destroy_internal(&exec_ctx, &bb->data.raw.slice_buffer);
      break;
  }
  gpr_free(bb);
  grpc_exec_ctx_finish(&exec_ctx);
}
Exemple #9
0
/* Write to a socket until it fills up, then read from it using the grpc_tcp
   API. */
static void large_read_test(size_t slice_size) {
  int sv[2];
  grpc_endpoint *ep;
  struct read_socket_state state;
  ssize_t written_bytes;
  gpr_timespec deadline = grpc_timeout_seconds_to_deadline(20);
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;

  gpr_log(GPR_INFO, "Start large read test, slice size %" PRIuPTR, slice_size);

  create_sockets(sv);

  grpc_resource_quota *resource_quota =
      grpc_resource_quota_create("large_read_test");
  ep = grpc_tcp_create(grpc_fd_create(sv[1], "large_read_test"), resource_quota,
                       slice_size, "test");
  grpc_resource_quota_unref_internal(&exec_ctx, resource_quota);
  grpc_endpoint_add_to_pollset(&exec_ctx, ep, g_pollset);

  written_bytes = fill_socket(sv[0]);
  gpr_log(GPR_INFO, "Wrote %" PRIuPTR " bytes", written_bytes);

  state.ep = ep;
  state.read_bytes = 0;
  state.target_read_bytes = (size_t)written_bytes;
  grpc_slice_buffer_init(&state.incoming);
  grpc_closure_init(&state.read_cb, read_cb, &state, grpc_schedule_on_exec_ctx);

  grpc_endpoint_read(&exec_ctx, ep, &state.incoming, &state.read_cb);

  gpr_mu_lock(g_mu);
  while (state.read_bytes < state.target_read_bytes) {
    grpc_pollset_worker *worker = NULL;
    GPR_ASSERT(GRPC_LOG_IF_ERROR(
        "pollset_work",
        grpc_pollset_work(&exec_ctx, g_pollset, &worker,
                          gpr_now(GPR_CLOCK_MONOTONIC), deadline)));
    gpr_mu_unlock(g_mu);
    grpc_exec_ctx_finish(&exec_ctx);
    gpr_mu_lock(g_mu);
  }
  GPR_ASSERT(state.read_bytes == state.target_read_bytes);
  gpr_mu_unlock(g_mu);

  grpc_slice_buffer_destroy_internal(&exec_ctx, &state.incoming);
  grpc_endpoint_destroy(&exec_ctx, ep);
  grpc_exec_ctx_finish(&exec_ctx);
}
Exemple #10
0
static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
                              grpc_error *error) {
  grpc_handshaker_args *args = arg;
  server_connection_state *connection_state = args->user_data;
  gpr_mu_lock(&connection_state->server_state->mu);
  if (error != GRPC_ERROR_NONE || connection_state->server_state->shutdown) {
    const char *error_str = grpc_error_string(error);
    gpr_log(GPR_DEBUG, "Handshaking failed: %s", error_str);

    if (error == GRPC_ERROR_NONE && args->endpoint != NULL) {
      // We were shut down after handshaking completed successfully, so
      // destroy the endpoint here.
      // TODO(ctiller): It is currently necessary to shutdown endpoints
      // before destroying them, even if we know that there are no
      // pending read/write callbacks.  This should be fixed, at which
      // point this can be removed.
      grpc_endpoint_shutdown(exec_ctx, args->endpoint, GRPC_ERROR_NONE);
      grpc_endpoint_destroy(exec_ctx, args->endpoint);
      grpc_channel_args_destroy(exec_ctx, args->args);
      grpc_slice_buffer_destroy_internal(exec_ctx, args->read_buffer);
      gpr_free(args->read_buffer);
    }
  } else {
    // If the handshaking succeeded but there is no endpoint, then the
    // handshaker may have handed off the connection to some external
    // code, so we can just clean up here without creating a transport.
    if (args->endpoint != NULL) {
      grpc_transport *transport =
          grpc_create_chttp2_transport(exec_ctx, args->args, args->endpoint, 0);
      grpc_server_setup_transport(
          exec_ctx, connection_state->server_state->server, transport,
          connection_state->accepting_pollset, args->args);
      grpc_chttp2_transport_start_reading(exec_ctx, transport,
                                          args->read_buffer);
      grpc_channel_args_destroy(exec_ctx, args->args);
    }
  }
  grpc_handshake_manager_pending_list_remove(
      &connection_state->server_state->pending_handshake_mgrs,
      connection_state->handshake_mgr);
  gpr_mu_unlock(&connection_state->server_state->mu);
  grpc_handshake_manager_destroy(exec_ctx, connection_state->handshake_mgr);
  grpc_tcp_server_unref(exec_ctx, connection_state->server_state->tcp_server);
  gpr_free(connection_state->acceptor);
  gpr_free(connection_state);
}
static void finish_send_message(grpc_exec_ctx *exec_ctx,
                                grpc_call_element *elem) {
  call_data *calld = elem->call_data;
  int did_compress;
  grpc_slice_buffer tmp;
  grpc_slice_buffer_init(&tmp);
  did_compress = grpc_msg_compress(exec_ctx, calld->compression_algorithm,
                                   &calld->slices, &tmp);
  if (did_compress) {
    if (GRPC_TRACER_ON(grpc_compression_trace)) {
      char *algo_name;
      const size_t before_size = calld->slices.length;
      const size_t after_size = tmp.length;
      const float savings_ratio = 1.0f - (float)after_size / (float)before_size;
      GPR_ASSERT(grpc_compression_algorithm_name(calld->compression_algorithm,
                                                 &algo_name));
      gpr_log(GPR_DEBUG, "Compressed[%s] %" PRIuPTR " bytes vs. %" PRIuPTR
                         " bytes (%.2f%% savings)",
              algo_name, before_size, after_size, 100 * savings_ratio);
    }
    grpc_slice_buffer_swap(&calld->slices, &tmp);
    calld->send_flags |= GRPC_WRITE_INTERNAL_COMPRESS;
  } else {
    if (GRPC_TRACER_ON(grpc_compression_trace)) {
      char *algo_name;
      GPR_ASSERT(grpc_compression_algorithm_name(calld->compression_algorithm,
                                                 &algo_name));
      gpr_log(GPR_DEBUG,
              "Algorithm '%s' enabled but decided not to compress. Input size: "
              "%" PRIuPTR,
              algo_name, calld->slices.length);
    }
  }

  grpc_slice_buffer_destroy_internal(exec_ctx, &tmp);

  grpc_slice_buffer_stream_init(&calld->replacement_stream, &calld->slices,
                                calld->send_flags);
  calld->send_op->payload->send_message.send_message =
      &calld->replacement_stream.base;
  calld->post_send = calld->send_op->on_complete;
  calld->send_op->on_complete = &calld->send_done;

  grpc_call_next_op(exec_ctx, elem, calld->send_op);
}
Exemple #12
0
static void test_negative_rq_free_pool(void) {
  gpr_log(GPR_INFO, "** test_negative_rq_free_pool **");
  grpc_resource_quota *q =
      grpc_resource_quota_create("test_negative_rq_free_pool");
  grpc_resource_quota_resize(q, 1024);

  grpc_resource_user *usr = grpc_resource_user_create(q, "usr");

  grpc_resource_user_slice_allocator alloc;
  int num_allocs = 0;
  grpc_resource_user_slice_allocator_init(&alloc, usr, inc_int_cb, &num_allocs);

  grpc_slice_buffer buffer;
  grpc_slice_buffer_init(&buffer);

  {
    const int start_allocs = num_allocs;
    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
    grpc_resource_user_alloc_slices(&exec_ctx, &alloc, 1024, 1, &buffer);
    grpc_exec_ctx_finish(&exec_ctx);
    GPR_ASSERT(num_allocs == start_allocs + 1);
  }

  grpc_resource_quota_resize(q, 512);

  double eps = 0.0001;
  GPR_ASSERT(grpc_resource_quota_get_memory_pressure(q) < 1 + eps);
  GPR_ASSERT(grpc_resource_quota_get_memory_pressure(q) > 1 - eps);

  {
    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
    grpc_resource_user_unref(&exec_ctx, usr);
    grpc_exec_ctx_finish(&exec_ctx);
  }

  grpc_resource_quota_unref(q);
  {
    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
    grpc_slice_buffer_destroy_internal(&exec_ctx, &buffer);
    grpc_exec_ctx_finish(&exec_ctx);
  }
}
Exemple #13
0
static grpc_resolver *sockaddr_create(grpc_exec_ctx *exec_ctx,
                                      grpc_resolver_args *args,
                                      bool parse(const grpc_uri *uri,
                                                 grpc_resolved_address *dst)) {
  if (0 != strcmp(args->uri->authority, "")) {
    gpr_log(GPR_ERROR, "authority based uri's not supported by the %s scheme",
            args->uri->scheme);
    return NULL;
  }
  /* Construct addresses. */
  grpc_slice path_slice =
      grpc_slice_new(args->uri->path, strlen(args->uri->path), do_nothing);
  grpc_slice_buffer path_parts;
  grpc_slice_buffer_init(&path_parts);
  grpc_slice_split(path_slice, ",", &path_parts);
  grpc_lb_addresses *addresses =
      grpc_lb_addresses_create(path_parts.count, NULL /* user_data_vtable */);
  bool errors_found = false;
  for (size_t i = 0; i < addresses->num_addresses; i++) {
    grpc_uri ith_uri = *args->uri;
    char *part_str = grpc_slice_to_c_string(path_parts.slices[i]);
    ith_uri.path = part_str;
    if (!parse(&ith_uri, &addresses->addresses[i].address)) {
      errors_found = true; /* GPR_TRUE */
    }
    gpr_free(part_str);
    if (errors_found) break;
  }
  grpc_slice_buffer_destroy_internal(exec_ctx, &path_parts);
  grpc_slice_unref_internal(exec_ctx, path_slice);
  if (errors_found) {
    grpc_lb_addresses_destroy(exec_ctx, addresses);
    return NULL;
  }
  /* Instantiate resolver. */
  sockaddr_resolver *r = gpr_zalloc(sizeof(sockaddr_resolver));
  r->addresses = addresses;
  r->channel_args = grpc_channel_args_copy(args->args);
  grpc_resolver_init(&r->base, &sockaddr_resolver_vtable, args->combiner);
  return &r->base;
}
Exemple #14
0
static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
                              grpc_error *error) {
  grpc_handshaker_args *args = arg;
  chttp2_connector *c = args->user_data;
  gpr_mu_lock(&c->mu);
  if (error != GRPC_ERROR_NONE || c->shutdown) {
    if (error == GRPC_ERROR_NONE) {
      error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("connector shutdown");
      // We were shut down after handshaking completed successfully, so
      // destroy the endpoint here.
      // TODO(ctiller): It is currently necessary to shutdown endpoints
      // before destroying them, even if we know that there are no
      // pending read/write callbacks.  This should be fixed, at which
      // point this can be removed.
      grpc_endpoint_shutdown(exec_ctx, args->endpoint, GRPC_ERROR_REF(error));
      grpc_endpoint_destroy(exec_ctx, args->endpoint);
      grpc_channel_args_destroy(exec_ctx, args->args);
      grpc_slice_buffer_destroy_internal(exec_ctx, args->read_buffer);
      gpr_free(args->read_buffer);
    } else {
      error = GRPC_ERROR_REF(error);
    }
    memset(c->result, 0, sizeof(*c->result));
  } else {
    c->result->transport =
        grpc_create_chttp2_transport(exec_ctx, args->args, args->endpoint, 1);
    GPR_ASSERT(c->result->transport);
    grpc_chttp2_transport_start_reading(exec_ctx, c->result->transport,
                                        args->read_buffer);
    c->result->channel_args = args->args;
  }
  grpc_closure *notify = c->notify;
  c->notify = NULL;
  grpc_closure_sched(exec_ctx, notify, error);
  grpc_handshake_manager_destroy(exec_ctx, c->handshake_mgr);
  c->handshake_mgr = NULL;
  gpr_mu_unlock(&c->mu);
  chttp2_connector_unref(exec_ctx, (grpc_connector *)c);
}
Exemple #15
0
static void multiple_shutdown_test(grpc_endpoint_test_config config) {
  grpc_endpoint_test_fixture f =
      begin_test(config, "multiple_shutdown_test", 128);
  int fail_count = 0;

  grpc_slice_buffer slice_buffer;
  grpc_slice_buffer_init(&slice_buffer);

  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
  grpc_endpoint_add_to_pollset(&exec_ctx, f.client_ep, g_pollset);
  grpc_endpoint_read(&exec_ctx, f.client_ep, &slice_buffer,
                     GRPC_CLOSURE_CREATE(inc_on_failure, &fail_count,
                                         grpc_schedule_on_exec_ctx));
  wait_for_fail_count(&exec_ctx, &fail_count, 0);
  grpc_endpoint_shutdown(&exec_ctx, f.client_ep,
                         GRPC_ERROR_CREATE_FROM_STATIC_STRING("Test Shutdown"));
  wait_for_fail_count(&exec_ctx, &fail_count, 1);
  grpc_endpoint_read(&exec_ctx, f.client_ep, &slice_buffer,
                     GRPC_CLOSURE_CREATE(inc_on_failure, &fail_count,
                                         grpc_schedule_on_exec_ctx));
  wait_for_fail_count(&exec_ctx, &fail_count, 2);
  grpc_slice_buffer_add(&slice_buffer, grpc_slice_from_copied_string("a"));
  grpc_endpoint_write(&exec_ctx, f.client_ep, &slice_buffer,
                      GRPC_CLOSURE_CREATE(inc_on_failure, &fail_count,
                                          grpc_schedule_on_exec_ctx));
  wait_for_fail_count(&exec_ctx, &fail_count, 3);
  grpc_endpoint_shutdown(&exec_ctx, f.client_ep,
                         GRPC_ERROR_CREATE_FROM_STATIC_STRING("Test Shutdown"));
  wait_for_fail_count(&exec_ctx, &fail_count, 3);

  grpc_slice_buffer_destroy_internal(&exec_ctx, &slice_buffer);

  grpc_endpoint_destroy(&exec_ctx, f.client_ep);
  grpc_endpoint_destroy(&exec_ctx, f.server_ep);
  grpc_exec_ctx_finish(&exec_ctx);
}
Exemple #16
0
static grpc_resolver* fake_resolver_create(grpc_exec_ctx* exec_ctx,
                                           grpc_resolver_factory* factory,
                                           grpc_resolver_args* args) {
  if (0 != strcmp(args->uri->authority, "")) {
    gpr_log(GPR_ERROR, "authority based uri's not supported by the %s scheme",
            args->uri->scheme);
    return NULL;
  }
  // Get lb_enabled arg.  Anything other than "0" is interpreted as true.
  const char* lb_enabled_qpart =
      grpc_uri_get_query_arg(args->uri, "lb_enabled");
  const bool lb_enabled =
      lb_enabled_qpart != NULL && strcmp("0", lb_enabled_qpart) != 0;

  // Get the balancer's names.
  const char* balancer_names =
      grpc_uri_get_query_arg(args->uri, "balancer_names");
  grpc_slice_buffer balancer_names_parts;
  grpc_slice_buffer_init(&balancer_names_parts);
  if (balancer_names != NULL) {
    const grpc_slice balancer_names_slice =
        grpc_slice_from_copied_string(balancer_names);
    grpc_slice_split(balancer_names_slice, ",", &balancer_names_parts);
    grpc_slice_unref(balancer_names_slice);
  }

  // Construct addresses.
  grpc_slice path_slice =
      grpc_slice_new(args->uri->path, strlen(args->uri->path), do_nothing);
  grpc_slice_buffer path_parts;
  grpc_slice_buffer_init(&path_parts);
  grpc_slice_split(path_slice, ",", &path_parts);
  if (balancer_names_parts.count > 0 &&
      path_parts.count != balancer_names_parts.count) {
    gpr_log(GPR_ERROR,
            "Balancer names present but mismatched with number of addresses: "
            "%lu balancer names != %lu addresses",
            (unsigned long)balancer_names_parts.count,
            (unsigned long)path_parts.count);
    return NULL;
  }
  grpc_lb_addresses* addresses =
      grpc_lb_addresses_create(path_parts.count, NULL /* user_data_vtable */);
  bool errors_found = false;
  for (size_t i = 0; i < addresses->num_addresses; i++) {
    grpc_uri ith_uri = *args->uri;
    char* part_str = grpc_slice_to_c_string(path_parts.slices[i]);
    ith_uri.path = part_str;
    if (!parse_ipv4(&ith_uri, &addresses->addresses[i].address)) {
      errors_found = true;
    }
    gpr_free(part_str);
    if (errors_found) break;
    addresses->addresses[i].is_balancer = lb_enabled;
    addresses->addresses[i].balancer_name =
        balancer_names_parts.count > 0
            ? grpc_dump_slice(balancer_names_parts.slices[i], GPR_DUMP_ASCII)
            : NULL;
  }
  grpc_slice_buffer_destroy_internal(exec_ctx, &path_parts);
  grpc_slice_buffer_destroy_internal(exec_ctx, &balancer_names_parts);
  grpc_slice_unref(path_slice);
  if (errors_found) {
    grpc_lb_addresses_destroy(exec_ctx, addresses);
    return NULL;
  }
  // Instantiate resolver.
  fake_resolver* r = gpr_malloc(sizeof(fake_resolver));
  memset(r, 0, sizeof(*r));
  r->channel_args = grpc_channel_args_copy(args->args);
  r->addresses = addresses;
  gpr_mu_init(&r->mu);
  grpc_resolver_init(&r->base, &fake_resolver_vtable);
  return &r->base;
}
// Callback invoked for reading HTTP CONNECT response.
static void on_read_done(grpc_exec_ctx* exec_ctx, void* arg,
                         grpc_error* error) {
  http_connect_handshaker* handshaker = arg;
  gpr_mu_lock(&handshaker->mu);
  if (error != GRPC_ERROR_NONE || handshaker->shutdown) {
    // If the read failed or we're shutting down, clean up and invoke the
    // callback with the error.
    handshake_failed_locked(exec_ctx, handshaker, GRPC_ERROR_REF(error));
    goto done;
  }
  // Add buffer to parser.
  for (size_t i = 0; i < handshaker->args->read_buffer->count; ++i) {
    if (GRPC_SLICE_LENGTH(handshaker->args->read_buffer->slices[i]) > 0) {
      size_t body_start_offset = 0;
      error = grpc_http_parser_parse(&handshaker->http_parser,
                                     handshaker->args->read_buffer->slices[i],
                                     &body_start_offset);
      if (error != GRPC_ERROR_NONE) {
        handshake_failed_locked(exec_ctx, handshaker, error);
        goto done;
      }
      if (handshaker->http_parser.state == GRPC_HTTP_BODY) {
        // Remove the data we've already read from the read buffer,
        // leaving only the leftover bytes (if any).
        grpc_slice_buffer tmp_buffer;
        grpc_slice_buffer_init(&tmp_buffer);
        if (body_start_offset <
            GRPC_SLICE_LENGTH(handshaker->args->read_buffer->slices[i])) {
          grpc_slice_buffer_add(
              &tmp_buffer,
              grpc_slice_split_tail(&handshaker->args->read_buffer->slices[i],
                                    body_start_offset));
        }
        grpc_slice_buffer_addn(&tmp_buffer,
                               &handshaker->args->read_buffer->slices[i + 1],
                               handshaker->args->read_buffer->count - i - 1);
        grpc_slice_buffer_swap(handshaker->args->read_buffer, &tmp_buffer);
        grpc_slice_buffer_destroy_internal(exec_ctx, &tmp_buffer);
        break;
      }
    }
  }
  // If we're not done reading the response, read more data.
  // TODO(roth): In practice, I suspect that the response to a CONNECT
  // request will never include a body, in which case this check is
  // sufficient.  However, the language of RFC-2817 doesn't explicitly
  // forbid the response from including a body.  If there is a body,
  // it's possible that we might have parsed part but not all of the
  // body, in which case this check will cause us to fail to parse the
  // remainder of the body.  If that ever becomes an issue, we may
  // need to fix the HTTP parser to understand when the body is
  // complete (e.g., handling chunked transfer encoding or looking
  // at the Content-Length: header).
  if (handshaker->http_parser.state != GRPC_HTTP_BODY) {
    grpc_slice_buffer_reset_and_unref_internal(exec_ctx,
                                               handshaker->args->read_buffer);
    grpc_endpoint_read(exec_ctx, handshaker->args->endpoint,
                       handshaker->args->read_buffer,
                       &handshaker->response_read_closure);
    gpr_mu_unlock(&handshaker->mu);
    return;
  }
  // Make sure we got a 2xx response.
  if (handshaker->http_response.status < 200 ||
      handshaker->http_response.status >= 300) {
    char* msg;
    gpr_asprintf(&msg, "HTTP proxy returned response code %d",
                 handshaker->http_response.status);
    error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
    gpr_free(msg);
    handshake_failed_locked(exec_ctx, handshaker, error);
    goto done;
  }
  // Success.  Invoke handshake-done callback.
  grpc_closure_sched(exec_ctx, handshaker->on_handshake_done, error);
done:
  // Set shutdown to true so that subsequent calls to
  // http_connect_handshaker_shutdown() do nothing.
  handshaker->shutdown = true;
  gpr_mu_unlock(&handshaker->mu);
  http_connect_handshaker_unref(exec_ctx, handshaker);
}
Exemple #18
0
/* Do both reading and writing using the grpc_endpoint API.

   This also includes a test of the shutdown behavior.
 */
static void read_and_write_test(grpc_endpoint_test_config config,
                                size_t num_bytes, size_t write_size,
                                size_t slice_size, bool shutdown) {
  struct read_and_write_test_state state;
  gpr_timespec deadline = grpc_timeout_seconds_to_deadline(20);
  grpc_endpoint_test_fixture f =
      begin_test(config, "read_and_write_test", slice_size);
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
  gpr_log(GPR_DEBUG, "num_bytes=%" PRIuPTR " write_size=%" PRIuPTR
                     " slice_size=%" PRIuPTR " shutdown=%d",
          num_bytes, write_size, slice_size, shutdown);

  if (shutdown) {
    gpr_log(GPR_INFO, "Start read and write shutdown test");
  } else {
    gpr_log(GPR_INFO, "Start read and write test with %" PRIuPTR
                      " bytes, slice size %" PRIuPTR,
            num_bytes, slice_size);
  }

  state.read_ep = f.client_ep;
  state.write_ep = f.server_ep;
  state.target_bytes = num_bytes;
  state.bytes_read = 0;
  state.current_write_size = write_size;
  state.bytes_written = 0;
  state.read_done = 0;
  state.write_done = 0;
  state.current_read_data = 0;
  state.current_write_data = 0;
  GRPC_CLOSURE_INIT(&state.done_read, read_and_write_test_read_handler, &state,
                    grpc_schedule_on_exec_ctx);
  GRPC_CLOSURE_INIT(&state.done_write, read_and_write_test_write_handler,
                    &state, grpc_schedule_on_exec_ctx);
  grpc_slice_buffer_init(&state.outgoing);
  grpc_slice_buffer_init(&state.incoming);

  /* Get started by pretending an initial write completed */
  /* NOTE: Sets up initial conditions so we can have the same write handler
     for the first iteration as for later iterations. It does the right thing
     even when bytes_written is unsigned. */
  state.bytes_written -= state.current_write_size;
  read_and_write_test_write_handler(&exec_ctx, &state, GRPC_ERROR_NONE);
  grpc_exec_ctx_flush(&exec_ctx);

  grpc_endpoint_read(&exec_ctx, state.read_ep, &state.incoming,
                     &state.done_read);

  if (shutdown) {
    gpr_log(GPR_DEBUG, "shutdown read");
    grpc_endpoint_shutdown(
        &exec_ctx, state.read_ep,
        GRPC_ERROR_CREATE_FROM_STATIC_STRING("Test Shutdown"));
    gpr_log(GPR_DEBUG, "shutdown write");
    grpc_endpoint_shutdown(
        &exec_ctx, state.write_ep,
        GRPC_ERROR_CREATE_FROM_STATIC_STRING("Test Shutdown"));
  }
  grpc_exec_ctx_flush(&exec_ctx);

  gpr_mu_lock(g_mu);
  while (!state.read_done || !state.write_done) {
    grpc_pollset_worker *worker = NULL;
    GPR_ASSERT(gpr_time_cmp(gpr_now(GPR_CLOCK_MONOTONIC), deadline) < 0);
    GPR_ASSERT(GRPC_LOG_IF_ERROR(
        "pollset_work",
        grpc_pollset_work(&exec_ctx, g_pollset, &worker,
                          gpr_now(GPR_CLOCK_MONOTONIC), deadline)));
  }
  gpr_mu_unlock(g_mu);
  grpc_exec_ctx_flush(&exec_ctx);

  end_test(config);
  grpc_slice_buffer_destroy_internal(&exec_ctx, &state.outgoing);
  grpc_slice_buffer_destroy_internal(&exec_ctx, &state.incoming);
  grpc_endpoint_destroy(&exec_ctx, state.read_ep);
  grpc_endpoint_destroy(&exec_ctx, state.write_ep);
  grpc_exec_ctx_finish(&exec_ctx);
}
Exemple #19
0
/* Do a read_test, then release fd and try to read/write again. Verify that
   grpc_tcp_fd() is available before the fd is released. */
static void release_fd_test(size_t num_bytes, size_t slice_size) {
  int sv[2];
  grpc_endpoint *ep;
  struct read_socket_state state;
  size_t written_bytes;
  int fd;
  gpr_timespec deadline = grpc_timeout_seconds_to_deadline(20);
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
  grpc_closure fd_released_cb;
  int fd_released_done = 0;
  grpc_closure_init(&fd_released_cb, &on_fd_released, &fd_released_done,
                    grpc_schedule_on_exec_ctx);

  gpr_log(GPR_INFO,
          "Release fd read_test of size %" PRIuPTR ", slice size %" PRIuPTR,
          num_bytes, slice_size);

  create_sockets(sv);

  grpc_resource_quota *resource_quota =
      grpc_resource_quota_create("release_fd_test");
  ep = grpc_tcp_create(grpc_fd_create(sv[1], "read_test"), resource_quota,
                       slice_size, "test");
  GPR_ASSERT(grpc_tcp_fd(ep) == sv[1] && sv[1] >= 0);
  grpc_resource_quota_unref_internal(&exec_ctx, resource_quota);
  grpc_endpoint_add_to_pollset(&exec_ctx, ep, g_pollset);

  written_bytes = fill_socket_partial(sv[0], num_bytes);
  gpr_log(GPR_INFO, "Wrote %" PRIuPTR " bytes", written_bytes);

  state.ep = ep;
  state.read_bytes = 0;
  state.target_read_bytes = written_bytes;
  grpc_slice_buffer_init(&state.incoming);
  grpc_closure_init(&state.read_cb, read_cb, &state, grpc_schedule_on_exec_ctx);

  grpc_endpoint_read(&exec_ctx, ep, &state.incoming, &state.read_cb);

  gpr_mu_lock(g_mu);
  while (state.read_bytes < state.target_read_bytes) {
    grpc_pollset_worker *worker = NULL;
    GPR_ASSERT(GRPC_LOG_IF_ERROR(
        "pollset_work",
        grpc_pollset_work(&exec_ctx, g_pollset, &worker,
                          gpr_now(GPR_CLOCK_MONOTONIC), deadline)));
    gpr_log(GPR_DEBUG, "wakeup: read=%" PRIdPTR " target=%" PRIdPTR,
            state.read_bytes, state.target_read_bytes);
    gpr_mu_unlock(g_mu);
    grpc_exec_ctx_flush(&exec_ctx);
    gpr_mu_lock(g_mu);
  }
  GPR_ASSERT(state.read_bytes == state.target_read_bytes);
  gpr_mu_unlock(g_mu);

  grpc_slice_buffer_destroy_internal(&exec_ctx, &state.incoming);
  grpc_tcp_destroy_and_release_fd(&exec_ctx, ep, &fd, &fd_released_cb);
  grpc_exec_ctx_flush(&exec_ctx);
  gpr_mu_lock(g_mu);
  while (!fd_released_done) {
    grpc_pollset_worker *worker = NULL;
    GPR_ASSERT(GRPC_LOG_IF_ERROR(
        "pollset_work",
        grpc_pollset_work(&exec_ctx, g_pollset, &worker,
                          gpr_now(GPR_CLOCK_MONOTONIC), deadline)));
    gpr_log(GPR_DEBUG, "wakeup: fd_released_done=%d", fd_released_done);
  }
  gpr_mu_unlock(g_mu);
  GPR_ASSERT(fd_released_done == 1);
  GPR_ASSERT(fd == sv[1]);
  grpc_exec_ctx_finish(&exec_ctx);

  written_bytes = fill_socket_partial(sv[0], num_bytes);
  drain_socket_blocking(fd, written_bytes, written_bytes);
  written_bytes = fill_socket_partial(fd, num_bytes);
  drain_socket_blocking(sv[0], written_bytes, written_bytes);
  close(fd);
}
Exemple #20
0
static void sb_list_entry_destroy(grpc_exec_ctx *exec_ctx, sb_list_entry *le) {
  grpc_slice_buffer_destroy_internal(exec_ctx, &le->sb);
  gpr_free(le);
}