Example #1
0
grpc_endpoint *grpc_secure_endpoint_create(
    struct tsi_frame_protector *protector,
    struct tsi_zero_copy_grpc_protector *zero_copy_protector,
    grpc_endpoint *transport, grpc_slice *leftover_slices,
    size_t leftover_nslices) {
  size_t i;
  secure_endpoint *ep = (secure_endpoint *)gpr_malloc(sizeof(secure_endpoint));
  ep->base.vtable = &vtable;
  ep->wrapped_ep = transport;
  ep->protector = protector;
  ep->zero_copy_protector = zero_copy_protector;
  grpc_slice_buffer_init(&ep->leftover_bytes);
  for (i = 0; i < leftover_nslices; i++) {
    grpc_slice_buffer_add(&ep->leftover_bytes,
                          grpc_slice_ref_internal(leftover_slices[i]));
  }
  ep->write_staging_buffer = GRPC_SLICE_MALLOC(STAGING_BUFFER_SIZE);
  ep->read_staging_buffer = GRPC_SLICE_MALLOC(STAGING_BUFFER_SIZE);
  grpc_slice_buffer_init(&ep->output_buffer);
  grpc_slice_buffer_init(&ep->source_buffer);
  ep->read_buffer = NULL;
  GRPC_CLOSURE_INIT(&ep->on_read, on_read, ep, grpc_schedule_on_exec_ctx);
  gpr_mu_init(&ep->protector_mu);
  gpr_ref_init(&ep->ref, 1);
  return &ep->base;
}
static void on_connect(grpc_exec_ctx *exec_ctx, void *arg, grpc_endpoint *tcp,
                       grpc_pollset *accepting_pollset,
                       grpc_tcp_server_acceptor *acceptor) {
  test_tcp_server *server = arg;
  grpc_closure_init(&on_read, handle_read, NULL);
  grpc_slice_buffer_init(&state.incoming_buffer);
  grpc_slice_buffer_init(&state.temp_incoming_buffer);
  state.tcp = tcp;
  grpc_endpoint_add_to_pollset(exec_ctx, tcp, server->pollset);
  grpc_endpoint_read(exec_ctx, tcp, &state.temp_incoming_buffer, &on_read);
}
Example #3
0
static void connected(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
    connector *c = arg;
    grpc_endpoint *tcp = c->newly_connecting_endpoint;
    if (tcp != NULL) {
        gpr_mu_lock(&c->mu);
        GPR_ASSERT(c->connecting_endpoint == NULL);
        c->connecting_endpoint = tcp;
        gpr_mu_unlock(&c->mu);
        if (!GRPC_SLICE_IS_EMPTY(c->args.initial_connect_string)) {
            grpc_closure_init(&c->initial_string_sent, on_initial_connect_string_sent,
                              c);
            grpc_slice_buffer_init(&c->initial_string_buffer);
            grpc_slice_buffer_add(&c->initial_string_buffer,
                                  c->args.initial_connect_string);
            grpc_endpoint_write(exec_ctx, tcp, &c->initial_string_buffer,
                                &c->initial_string_sent);
        } else {
            grpc_handshake_manager_do_handshake(
                exec_ctx, c->handshake_mgr, tcp, c->args.channel_args,
                c->args.deadline, NULL /* acceptor */, on_handshake_done, c);
        }
    } else {
        memset(c->result, 0, sizeof(*c->result));
        grpc_closure *notify = c->notify;
        c->notify = NULL;
        grpc_exec_ctx_sched(exec_ctx, notify, GRPC_ERROR_REF(error), NULL);
    }
}
Example #4
0
static void verify_table_size_change_match_elem_size(const char *key,
                                                     const char *value) {
  grpc_slice_buffer output;
  grpc_mdelem *elem = grpc_mdelem_from_strings(key, value);
  size_t elem_size = grpc_mdelem_get_size_in_hpack_table(elem);
  size_t initial_table_size = g_compressor.table_size;
  grpc_linked_mdelem *e = gpr_malloc(sizeof(*e));
  grpc_metadata_batch b;
  grpc_metadata_batch_init(&b);
  e[0].md = elem;
  e[0].prev = NULL;
  e[0].next = NULL;
  b.list.head = &e[0];
  b.list.tail = &e[0];
  grpc_slice_buffer_init(&output);

  grpc_transport_one_way_stats stats;
  memset(&stats, 0, sizeof(stats));
  grpc_chttp2_encode_header(&g_compressor, 0xdeadbeef, &b, 0, 16384, &stats,
                            &output);
  grpc_slice_buffer_destroy(&output);
  grpc_metadata_batch_destroy(&b);

  GPR_ASSERT(g_compressor.table_size == elem_size + initial_table_size);
  gpr_free(e);
}
Example #5
0
grpc_endpoint *grpc_tcp_create(grpc_fd *em_fd,
                               grpc_resource_quota *resource_quota,
                               size_t slice_size, const char *peer_string) {
  grpc_tcp *tcp = (grpc_tcp *)gpr_malloc(sizeof(grpc_tcp));
  tcp->base.vtable = &vtable;
  tcp->peer_string = gpr_strdup(peer_string);
  tcp->fd = grpc_fd_wrapped_fd(em_fd);
  tcp->read_cb = NULL;
  tcp->write_cb = NULL;
  tcp->release_fd_cb = NULL;
  tcp->release_fd = NULL;
  tcp->incoming_buffer = NULL;
  tcp->slice_size = slice_size;
  tcp->iov_size = 1;
  tcp->finished_edge = true;
  /* paired with unref in grpc_tcp_destroy */
  gpr_ref_init(&tcp->refcount, 1);
  gpr_atm_no_barrier_store(&tcp->shutdown_count, 0);
  tcp->em_fd = em_fd;
  tcp->read_closure.cb = tcp_handle_read;
  tcp->read_closure.cb_arg = tcp;
  tcp->write_closure.cb = tcp_handle_write;
  tcp->write_closure.cb_arg = tcp;
  grpc_slice_buffer_init(&tcp->last_read_buffer);
  tcp->resource_user = grpc_resource_user_create(resource_quota, peer_string);
  grpc_resource_user_slice_allocator_init(
      &tcp->slice_allocator, tcp->resource_user, tcp_read_allocation_done, tcp);
  /* Tell network status tracker about new endpoint */
  grpc_network_status_register_endpoint(&tcp->base);

  return &tcp->base;
}
Example #6
0
static void test_one_slice(void) {
  gpr_log(GPR_INFO, "** test_one_slice **");

  grpc_resource_quota *q = grpc_resource_quota_create("test_one_slice");
  grpc_resource_quota_resize(q, 1024);

  grpc_resource_user *usr = grpc_resource_user_create(q, "usr");

  grpc_resource_user_slice_allocator alloc;
  int num_allocs = 0;
  grpc_resource_user_slice_allocator_init(&alloc, usr, inc_int_cb, &num_allocs);

  grpc_slice_buffer buffer;
  grpc_slice_buffer_init(&buffer);

  {
    const int start_allocs = num_allocs;
    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
    grpc_resource_user_alloc_slices(&exec_ctx, &alloc, 1024, 1, &buffer);
    grpc_exec_ctx_finish(&exec_ctx);
    GPR_ASSERT(num_allocs == start_allocs + 1);
  }

  {
    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
    grpc_slice_buffer_destroy_internal(&exec_ctx, &buffer);
    grpc_exec_ctx_finish(&exec_ctx);
  }
  destroy_user(usr);
  grpc_resource_quota_unref(q);
}
Example #7
0
int grpc_byte_buffer_reader_init(grpc_byte_buffer_reader *reader,
                                 grpc_byte_buffer *buffer) {
  grpc_slice_buffer decompressed_slices_buffer;
  reader->buffer_in = buffer;
  switch (reader->buffer_in->type) {
    case GRPC_BB_RAW:
      grpc_slice_buffer_init(&decompressed_slices_buffer);
      if (is_compressed(reader->buffer_in)) {
        if (grpc_msg_decompress(reader->buffer_in->data.raw.compression,
                                &reader->buffer_in->data.raw.slice_buffer,
                                &decompressed_slices_buffer) == 0) {
          gpr_log(GPR_ERROR,
                  "Unexpected error decompressing data for algorithm with enum "
                  "value '%d'.",
                  reader->buffer_in->data.raw.compression);
          memset(reader, 0, sizeof(*reader));
          return 0;
        } else { /* all fine */
          reader->buffer_out =
              grpc_raw_byte_buffer_create(decompressed_slices_buffer.slices,
                                          decompressed_slices_buffer.count);
        }
        grpc_slice_buffer_destroy(&decompressed_slices_buffer);
      } else { /* not compressed, use the input buffer as output */
        reader->buffer_out = reader->buffer_in;
      }
      reader->current.index = 0;
      break;
  }
  return 1;
}
Example #8
0
static void connected(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
  chttp2_connector *c = arg;
  gpr_mu_lock(&c->mu);
  GPR_ASSERT(c->connecting);
  c->connecting = false;
  if (error != GRPC_ERROR_NONE || c->shutdown) {
    if (error == GRPC_ERROR_NONE) {
      error = GRPC_ERROR_CREATE("connector shutdown");
    } else {
      error = GRPC_ERROR_REF(error);
    }
    memset(c->result, 0, sizeof(*c->result));
    grpc_closure *notify = c->notify;
    c->notify = NULL;
    grpc_exec_ctx_sched(exec_ctx, notify, error, NULL);
    if (c->endpoint != NULL) grpc_endpoint_shutdown(exec_ctx, c->endpoint);
    gpr_mu_unlock(&c->mu);
    chttp2_connector_unref(exec_ctx, arg);
  } else {
    GPR_ASSERT(c->endpoint != NULL);
    if (!GRPC_SLICE_IS_EMPTY(c->args.initial_connect_string)) {
      grpc_closure_init(&c->initial_string_sent, on_initial_connect_string_sent,
                        c);
      grpc_slice_buffer_init(&c->initial_string_buffer);
      grpc_slice_buffer_add(&c->initial_string_buffer,
                            c->args.initial_connect_string);
      grpc_endpoint_write(exec_ctx, c->endpoint, &c->initial_string_buffer,
                          &c->initial_string_sent);
    } else {
      start_handshake_locked(exec_ctx, c);
    }
    gpr_mu_unlock(&c->mu);
  }
}
Example #9
0
static void test_leftover(grpc_endpoint_test_config config, size_t slice_size) {
  grpc_endpoint_test_fixture f = config.create_fixture(slice_size);
  grpc_slice_buffer incoming;
  grpc_slice s =
      grpc_slice_from_copied_string("hello world 12345678900987654321");
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
  int n = 0;
  grpc_closure done_closure;
  gpr_log(GPR_INFO, "Start test left over");

  grpc_slice_buffer_init(&incoming);
  grpc_closure_init(&done_closure, inc_call_ctr, &n, grpc_schedule_on_exec_ctx);
  grpc_endpoint_read(&exec_ctx, f.client_ep, &incoming, &done_closure);
  grpc_exec_ctx_finish(&exec_ctx);
  GPR_ASSERT(n == 1);
  GPR_ASSERT(incoming.count == 1);
  GPR_ASSERT(grpc_slice_eq(s, incoming.slices[0]));

  grpc_endpoint_shutdown(&exec_ctx, f.client_ep,
                         GRPC_ERROR_CREATE("test_leftover end"));
  grpc_endpoint_shutdown(&exec_ctx, f.server_ep,
                         GRPC_ERROR_CREATE("test_leftover end"));
  grpc_endpoint_destroy(&exec_ctx, f.client_ep);
  grpc_endpoint_destroy(&exec_ctx, f.server_ep);
  grpc_exec_ctx_finish(&exec_ctx);
  grpc_slice_unref_internal(&exec_ctx, s);
  grpc_slice_buffer_destroy_internal(&exec_ctx, &incoming);

  clean_up();
}
Example #10
0
/* verify that the output generated by encoding the stream matches the
   hexstring passed in */
static void verify(size_t window_available, int eof, size_t expect_window_used,
                   const char *expected, size_t nheaders, ...) {
  grpc_slice_buffer output;
  grpc_slice merged;
  grpc_slice expect = parse_hexstring(expected);
  size_t i;
  va_list l;
  grpc_linked_mdelem *e = gpr_malloc(sizeof(*e) * nheaders);
  grpc_metadata_batch b;

  grpc_metadata_batch_init(&b);

  va_start(l, nheaders);
  for (i = 0; i < nheaders; i++) {
    char *key = va_arg(l, char *);
    char *value = va_arg(l, char *);
    if (i) {
      e[i - 1].next = &e[i];
      e[i].prev = &e[i - 1];
    }
    e[i].md = grpc_mdelem_from_strings(key, value);
  }
  e[0].prev = NULL;
  e[nheaders - 1].next = NULL;
  va_end(l);

  b.list.head = &e[0];
  b.list.tail = &e[nheaders - 1];

  if (cap_to_delete == num_to_delete) {
    cap_to_delete = GPR_MAX(2 * cap_to_delete, 1000);
    to_delete = gpr_realloc(to_delete, sizeof(*to_delete) * cap_to_delete);
  }
  to_delete[num_to_delete++] = e;

  grpc_slice_buffer_init(&output);

  grpc_transport_one_way_stats stats;
  memset(&stats, 0, sizeof(stats));
  grpc_chttp2_encode_header(&g_compressor, 0xdeadbeef, &b, eof, 16384, &stats,
                            &output);
  merged = grpc_slice_merge(output.slices, output.count);
  grpc_slice_buffer_destroy(&output);
  grpc_metadata_batch_destroy(&b);

  if (0 != grpc_slice_cmp(merged, expect)) {
    char *expect_str = grpc_dump_slice(expect, GPR_DUMP_HEX | GPR_DUMP_ASCII);
    char *got_str = grpc_dump_slice(merged, GPR_DUMP_HEX | GPR_DUMP_ASCII);
    gpr_log(GPR_ERROR, "mismatched output for %s", expected);
    gpr_log(GPR_ERROR, "EXPECT: %s", expect_str);
    gpr_log(GPR_ERROR, "GOT:    %s", got_str);
    gpr_free(expect_str);
    gpr_free(got_str);
    g_failure = 1;
  }

  grpc_slice_unref(merged);
  grpc_slice_unref(expect);
}
Example #11
0
/* Write to a socket using the grpc_tcp API, then drain it directly.
   Note that if the write does not complete immediately we need to drain the
   socket in parallel with the read. */
static void write_test(size_t num_bytes, size_t slice_size) {
  int sv[2];
  grpc_endpoint *ep;
  struct write_socket_state state;
  size_t num_blocks;
  grpc_slice *slices;
  uint8_t current_data = 0;
  grpc_slice_buffer outgoing;
  grpc_closure write_done_closure;
  gpr_timespec deadline = grpc_timeout_seconds_to_deadline(20);
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;

  gpr_log(GPR_INFO,
          "Start write test with %" PRIuPTR " bytes, slice size %" PRIuPTR,
          num_bytes, slice_size);

  create_sockets(sv);

  grpc_resource_quota *resource_quota =
      grpc_resource_quota_create("write_test");
  ep = grpc_tcp_create(grpc_fd_create(sv[1], "write_test"), resource_quota,
                       GRPC_TCP_DEFAULT_READ_SLICE_SIZE, "test");
  grpc_resource_quota_unref_internal(&exec_ctx, resource_quota);
  grpc_endpoint_add_to_pollset(&exec_ctx, ep, g_pollset);

  state.ep = ep;
  state.write_done = 0;

  slices = allocate_blocks(num_bytes, slice_size, &num_blocks, &current_data);

  grpc_slice_buffer_init(&outgoing);
  grpc_slice_buffer_addn(&outgoing, slices, num_blocks);
  grpc_closure_init(&write_done_closure, write_done, &state,
                    grpc_schedule_on_exec_ctx);

  grpc_endpoint_write(&exec_ctx, ep, &outgoing, &write_done_closure);
  drain_socket_blocking(sv[0], num_bytes, num_bytes);
  gpr_mu_lock(g_mu);
  for (;;) {
    grpc_pollset_worker *worker = NULL;
    if (state.write_done) {
      break;
    }
    GPR_ASSERT(GRPC_LOG_IF_ERROR(
        "pollset_work",
        grpc_pollset_work(&exec_ctx, g_pollset, &worker,
                          gpr_now(GPR_CLOCK_MONOTONIC), deadline)));
    gpr_mu_unlock(g_mu);
    grpc_exec_ctx_finish(&exec_ctx);
    gpr_mu_lock(g_mu);
  }
  gpr_mu_unlock(g_mu);

  grpc_slice_buffer_destroy_internal(&exec_ctx, &outgoing);
  grpc_endpoint_destroy(&exec_ctx, ep);
  gpr_free(slices);
  grpc_exec_ctx_finish(&exec_ctx);
}
Example #12
0
static void parse_query_parts(grpc_uri *uri) {
  static const char *QUERY_PARTS_SEPARATOR = "&";
  static const char *QUERY_PARTS_VALUE_SEPARATOR = "=";
  GPR_ASSERT(uri->query != NULL);
  if (uri->query[0] == '\0') {
    uri->query_parts = NULL;
    uri->query_parts_values = NULL;
    uri->num_query_parts = 0;
    return;
  }
  grpc_slice query_slice =
      grpc_slice_new(uri->query, strlen(uri->query), do_nothing);
  grpc_slice_buffer query_parts; /* the &-separated elements of the query */
  grpc_slice_buffer query_param_parts; /* the =-separated subelements */

  grpc_slice_buffer_init(&query_parts);
  grpc_slice_buffer_init(&query_param_parts);

  grpc_slice_split(query_slice, QUERY_PARTS_SEPARATOR, &query_parts);
  uri->query_parts = gpr_malloc(query_parts.count * sizeof(char *));
  uri->query_parts_values = gpr_malloc(query_parts.count * sizeof(char *));
  uri->num_query_parts = query_parts.count;
  for (size_t i = 0; i < query_parts.count; i++) {
    grpc_slice_split(query_parts.slices[i], QUERY_PARTS_VALUE_SEPARATOR,
                     &query_param_parts);
    GPR_ASSERT(query_param_parts.count > 0);
    uri->query_parts[i] =
        grpc_dump_slice(query_param_parts.slices[0], GPR_DUMP_ASCII);
    if (query_param_parts.count > 1) {
      /* TODO(dgq): only the first value after the separator is considered.
       * Perhaps all chars after the first separator for the query part should
       * be included, even if they include the separator. */
      uri->query_parts_values[i] =
          grpc_dump_slice(query_param_parts.slices[1], GPR_DUMP_ASCII);
    } else {
      uri->query_parts_values[i] = NULL;
    }
    grpc_slice_buffer_reset_and_unref(&query_param_parts);
  }
  grpc_slice_buffer_destroy(&query_parts);
  grpc_slice_buffer_destroy(&query_param_parts);
  grpc_slice_unref(query_slice);
}
Example #13
0
static grpc_handshaker *security_handshaker_create(
    grpc_exec_ctx *exec_ctx, tsi_handshaker *handshaker,
    grpc_security_connector *connector) {
  security_handshaker *h = gpr_malloc(sizeof(security_handshaker));
  memset(h, 0, sizeof(security_handshaker));
  grpc_handshaker_init(&security_handshaker_vtable, &h->base);
  h->handshaker = handshaker;
  h->connector = GRPC_SECURITY_CONNECTOR_REF(connector, "handshake");
  gpr_mu_init(&h->mu);
  gpr_ref_init(&h->refs, 1);
  h->handshake_buffer_size = GRPC_INITIAL_HANDSHAKE_BUFFER_SIZE;
  h->handshake_buffer = gpr_malloc(h->handshake_buffer_size);
  grpc_closure_init(&h->on_handshake_data_sent_to_peer,
                    on_handshake_data_sent_to_peer, h);
  grpc_closure_init(&h->on_handshake_data_received_from_peer,
                    on_handshake_data_received_from_peer, h);
  grpc_closure_init(&h->on_peer_checked, on_peer_checked, h);
  grpc_slice_buffer_init(&h->left_overs);
  grpc_slice_buffer_init(&h->outgoing);
  return &h->base;
}
Example #14
0
static void on_accept(grpc_exec_ctx* exec_ctx, void* arg,
                      grpc_endpoint* endpoint, grpc_pollset* accepting_pollset,
                      grpc_tcp_server_acceptor* acceptor) {
  grpc_end2end_http_proxy* proxy = arg;
  // Instantiate proxy_connection.
  proxy_connection* conn = gpr_malloc(sizeof(*conn));
  memset(conn, 0, sizeof(*conn));
  conn->client_endpoint = endpoint;
  gpr_ref_init(&conn->refcount, 1);
  conn->pollset_set = grpc_pollset_set_create();
  grpc_pollset_set_add_pollset(exec_ctx, conn->pollset_set, proxy->pollset);
  grpc_closure_init(&conn->on_read_request_done, on_read_request_done, conn);
  grpc_closure_init(&conn->on_server_connect_done, on_server_connect_done,
                    conn);
  grpc_closure_init(&conn->on_write_response_done, on_write_response_done,
                    conn);
  grpc_closure_init(&conn->on_client_read_done, on_client_read_done, conn);
  grpc_closure_init(&conn->on_client_write_done, on_client_write_done, conn);
  grpc_closure_init(&conn->on_server_read_done, on_server_read_done, conn);
  grpc_closure_init(&conn->on_server_write_done, on_server_write_done, conn);
  grpc_slice_buffer_init(&conn->client_read_buffer);
  grpc_slice_buffer_init(&conn->client_deferred_write_buffer);
  grpc_slice_buffer_init(&conn->client_write_buffer);
  grpc_slice_buffer_init(&conn->server_read_buffer);
  grpc_slice_buffer_init(&conn->server_deferred_write_buffer);
  grpc_slice_buffer_init(&conn->server_write_buffer);
  grpc_http_parser_init(&conn->http_parser, GRPC_HTTP_REQUEST,
                        &conn->http_request);
  grpc_endpoint_read(exec_ctx, conn->client_endpoint, &conn->client_read_buffer,
                     &conn->on_read_request_done);
}
Example #15
0
static void half_init(half *m, passthru_endpoint *parent,
                      grpc_resource_quota *resource_quota,
                      const char *half_name) {
  m->base.vtable = &vtable;
  m->parent = parent;
  grpc_slice_buffer_init(&m->read_buffer);
  m->on_read = NULL;
  char *name;
  gpr_asprintf(&name, "passthru_endpoint_%s_%" PRIxPTR, half_name,
               (intptr_t)parent);
  m->resource_user = grpc_resource_user_create(resource_quota, name);
  gpr_free(name);
}
Example #16
0
grpc_byte_buffer *grpc_raw_byte_buffer_from_reader(
    grpc_byte_buffer_reader *reader) {
  grpc_byte_buffer *bb = gpr_malloc(sizeof(grpc_byte_buffer));
  grpc_slice slice;
  bb->type = GRPC_BB_RAW;
  bb->data.raw.compression = GRPC_COMPRESS_NONE;
  grpc_slice_buffer_init(&bb->data.raw.slice_buffer);

  while (grpc_byte_buffer_reader_next(reader, &slice)) {
    grpc_slice_buffer_add(&bb->data.raw.slice_buffer, slice);
  }
  return bb;
}
Example #17
0
grpc_endpoint *grpc_secure_endpoint_create(
    struct tsi_frame_protector *protector, grpc_endpoint *transport,
    grpc_slice *leftover_slices, size_t leftover_nslices) {
  size_t i;
  secure_endpoint *ep = (secure_endpoint *)gpr_malloc(sizeof(secure_endpoint));
  ep->base.vtable = &vtable;
  ep->wrapped_ep = transport;
  ep->protector = protector;
  grpc_slice_buffer_init(&ep->leftover_bytes);
  for (i = 0; i < leftover_nslices; i++) {
    grpc_slice_buffer_add(&ep->leftover_bytes,
                          grpc_slice_ref(leftover_slices[i]));
  }
  ep->write_staging_buffer = grpc_slice_malloc(STAGING_BUFFER_SIZE);
  ep->read_staging_buffer = grpc_slice_malloc(STAGING_BUFFER_SIZE);
  grpc_slice_buffer_init(&ep->output_buffer);
  grpc_slice_buffer_init(&ep->source_buffer);
  ep->read_buffer = NULL;
  grpc_closure_init(&ep->on_read, on_read, ep);
  gpr_mu_init(&ep->protector_mu);
  gpr_ref_init(&ep->ref, 1);
  return &ep->base;
}
Example #18
0
grpc_byte_buffer *grpc_raw_compressed_byte_buffer_create(
    grpc_slice *slices, size_t nslices,
    grpc_compression_algorithm compression) {
  size_t i;
  grpc_byte_buffer *bb = gpr_malloc(sizeof(grpc_byte_buffer));
  bb->type = GRPC_BB_RAW;
  bb->data.raw.compression = compression;
  grpc_slice_buffer_init(&bb->data.raw.slice_buffer);
  for (i = 0; i < nslices; i++) {
    grpc_slice_ref_internal(slices[i]);
    grpc_slice_buffer_add(&bb->data.raw.slice_buffer, slices[i]);
  }
  return bb;
}
Example #19
0
grpc_endpoint *grpc_mock_endpoint_create(void (*on_write)(grpc_slice slice),
                                         grpc_resource_quota *resource_quota) {
  grpc_mock_endpoint *m = gpr_malloc(sizeof(*m));
  m->base.vtable = &vtable;
  char *name;
  gpr_asprintf(&name, "mock_endpoint_%" PRIxPTR, (intptr_t)m);
  m->resource_user = grpc_resource_user_create(resource_quota, name);
  gpr_free(name);
  grpc_slice_buffer_init(&m->read_buffer);
  gpr_mu_init(&m->mu);
  m->on_write = on_write;
  m->on_read = NULL;
  return &m->base;
}
Example #20
0
/* Constructor for call_data */
static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
                                  grpc_call_element *elem,
                                  grpc_call_element_args *args) {
  /* grab pointers to our data from the call element */
  call_data *calld = elem->call_data;

  /* initialize members */
  grpc_slice_buffer_init(&calld->slices);
  calld->has_compression_algorithm = 0;
  grpc_closure_init(&calld->got_slice, got_slice, elem);
  grpc_closure_init(&calld->send_done, send_done, elem);

  return GRPC_ERROR_NONE;
}
static grpc_handshaker* grpc_http_connect_handshaker_create() {
  http_connect_handshaker* handshaker = gpr_malloc(sizeof(*handshaker));
  memset(handshaker, 0, sizeof(*handshaker));
  grpc_handshaker_init(&http_connect_handshaker_vtable, &handshaker->base);
  gpr_mu_init(&handshaker->mu);
  gpr_ref_init(&handshaker->refcount, 1);
  grpc_slice_buffer_init(&handshaker->write_buffer);
  grpc_closure_init(&handshaker->request_done_closure, on_write_done,
                    handshaker, grpc_schedule_on_exec_ctx);
  grpc_closure_init(&handshaker->response_read_closure, on_read_done,
                    handshaker, grpc_schedule_on_exec_ctx);
  grpc_http_parser_init(&handshaker->http_parser, GRPC_HTTP_RESPONSE,
                        &handshaker->http_response);
  return &handshaker->base;
}
Example #22
0
/* Write to a socket until it fills up, then read from it using the grpc_tcp
   API. */
static void large_read_test(size_t slice_size) {
  int sv[2];
  grpc_endpoint *ep;
  struct read_socket_state state;
  ssize_t written_bytes;
  gpr_timespec deadline = grpc_timeout_seconds_to_deadline(20);
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;

  gpr_log(GPR_INFO, "Start large read test, slice size %" PRIuPTR, slice_size);

  create_sockets(sv);

  grpc_resource_quota *resource_quota =
      grpc_resource_quota_create("large_read_test");
  ep = grpc_tcp_create(grpc_fd_create(sv[1], "large_read_test"), resource_quota,
                       slice_size, "test");
  grpc_resource_quota_unref_internal(&exec_ctx, resource_quota);
  grpc_endpoint_add_to_pollset(&exec_ctx, ep, g_pollset);

  written_bytes = fill_socket(sv[0]);
  gpr_log(GPR_INFO, "Wrote %" PRIuPTR " bytes", written_bytes);

  state.ep = ep;
  state.read_bytes = 0;
  state.target_read_bytes = (size_t)written_bytes;
  grpc_slice_buffer_init(&state.incoming);
  grpc_closure_init(&state.read_cb, read_cb, &state, grpc_schedule_on_exec_ctx);

  grpc_endpoint_read(&exec_ctx, ep, &state.incoming, &state.read_cb);

  gpr_mu_lock(g_mu);
  while (state.read_bytes < state.target_read_bytes) {
    grpc_pollset_worker *worker = NULL;
    GPR_ASSERT(GRPC_LOG_IF_ERROR(
        "pollset_work",
        grpc_pollset_work(&exec_ctx, g_pollset, &worker,
                          gpr_now(GPR_CLOCK_MONOTONIC), deadline)));
    gpr_mu_unlock(g_mu);
    grpc_exec_ctx_finish(&exec_ctx);
    gpr_mu_lock(g_mu);
  }
  GPR_ASSERT(state.read_bytes == state.target_read_bytes);
  gpr_mu_unlock(g_mu);

  grpc_slice_buffer_destroy_internal(&exec_ctx, &state.incoming);
  grpc_endpoint_destroy(&exec_ctx, ep);
  grpc_exec_ctx_finish(&exec_ctx);
}
Example #23
0
static void finish_send_message(grpc_exec_ctx *exec_ctx,
                                grpc_call_element *elem) {
  call_data *calld = elem->call_data;
  int did_compress;
  grpc_slice_buffer tmp;
  grpc_slice_buffer_init(&tmp);
  did_compress = grpc_msg_compress(exec_ctx, calld->compression_algorithm,
                                   &calld->slices, &tmp);
  if (did_compress) {
    if (GRPC_TRACER_ON(grpc_compression_trace)) {
      char *algo_name;
      const size_t before_size = calld->slices.length;
      const size_t after_size = tmp.length;
      const float savings_ratio = 1.0f - (float)after_size / (float)before_size;
      GPR_ASSERT(grpc_compression_algorithm_name(calld->compression_algorithm,
                                                 &algo_name));
      gpr_log(GPR_DEBUG, "Compressed[%s] %" PRIuPTR " bytes vs. %" PRIuPTR
                         " bytes (%.2f%% savings)",
              algo_name, before_size, after_size, 100 * savings_ratio);
    }
    grpc_slice_buffer_swap(&calld->slices, &tmp);
    calld->send_flags |= GRPC_WRITE_INTERNAL_COMPRESS;
  } else {
    if (GRPC_TRACER_ON(grpc_compression_trace)) {
      char *algo_name;
      GPR_ASSERT(grpc_compression_algorithm_name(calld->compression_algorithm,
                                                 &algo_name));
      gpr_log(GPR_DEBUG,
              "Algorithm '%s' enabled but decided not to compress. Input size: "
              "%" PRIuPTR,
              algo_name, calld->slices.length);
    }
  }

  grpc_slice_buffer_destroy_internal(exec_ctx, &tmp);

  grpc_slice_buffer_stream_init(&calld->replacement_stream, &calld->slices,
                                calld->send_flags);
  calld->send_op->payload->send_message.send_message =
      &calld->replacement_stream.base;
  calld->post_send = calld->send_op->on_complete;
  calld->send_op->on_complete = &calld->send_done;

  grpc_call_next_op(exec_ctx, elem, calld->send_op);
}
Example #24
0
static void test_negative_rq_free_pool(void) {
  gpr_log(GPR_INFO, "** test_negative_rq_free_pool **");
  grpc_resource_quota *q =
      grpc_resource_quota_create("test_negative_rq_free_pool");
  grpc_resource_quota_resize(q, 1024);

  grpc_resource_user *usr = grpc_resource_user_create(q, "usr");

  grpc_resource_user_slice_allocator alloc;
  int num_allocs = 0;
  grpc_resource_user_slice_allocator_init(&alloc, usr, inc_int_cb, &num_allocs);

  grpc_slice_buffer buffer;
  grpc_slice_buffer_init(&buffer);

  {
    const int start_allocs = num_allocs;
    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
    grpc_resource_user_alloc_slices(&exec_ctx, &alloc, 1024, 1, &buffer);
    grpc_exec_ctx_finish(&exec_ctx);
    GPR_ASSERT(num_allocs == start_allocs + 1);
  }

  grpc_resource_quota_resize(q, 512);

  double eps = 0.0001;
  GPR_ASSERT(grpc_resource_quota_get_memory_pressure(q) < 1 + eps);
  GPR_ASSERT(grpc_resource_quota_get_memory_pressure(q) > 1 - eps);

  {
    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
    grpc_resource_user_unref(&exec_ctx, usr);
    grpc_exec_ctx_finish(&exec_ctx);
  }

  grpc_resource_quota_unref(q);
  {
    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
    grpc_slice_buffer_destroy_internal(&exec_ctx, &buffer);
    grpc_exec_ctx_finish(&exec_ctx);
  }
}
grpc_handshaker* grpc_http_connect_handshaker_create(const char* proxy_server,
                                                     const char* server_name) {
  GPR_ASSERT(proxy_server != NULL);
  GPR_ASSERT(server_name != NULL);
  http_connect_handshaker* handshaker = gpr_malloc(sizeof(*handshaker));
  memset(handshaker, 0, sizeof(*handshaker));
  grpc_handshaker_init(&http_connect_handshaker_vtable, &handshaker->base);
  gpr_mu_init(&handshaker->mu);
  gpr_ref_init(&handshaker->refcount, 1);
  handshaker->proxy_server = gpr_strdup(proxy_server);
  handshaker->server_name = gpr_strdup(server_name);
  grpc_slice_buffer_init(&handshaker->write_buffer);
  grpc_closure_init(&handshaker->request_done_closure, on_write_done,
                    handshaker);
  grpc_closure_init(&handshaker->response_read_closure, on_read_done,
                    handshaker);
  grpc_http_parser_init(&handshaker->http_parser, GRPC_HTTP_RESPONSE,
                        &handshaker->http_response);
  return &handshaker->base;
}
Example #26
0
static grpc_resolver *sockaddr_create(grpc_exec_ctx *exec_ctx,
                                      grpc_resolver_args *args,
                                      bool parse(const grpc_uri *uri,
                                                 grpc_resolved_address *dst)) {
  if (0 != strcmp(args->uri->authority, "")) {
    gpr_log(GPR_ERROR, "authority based uri's not supported by the %s scheme",
            args->uri->scheme);
    return NULL;
  }
  /* Construct addresses. */
  grpc_slice path_slice =
      grpc_slice_new(args->uri->path, strlen(args->uri->path), do_nothing);
  grpc_slice_buffer path_parts;
  grpc_slice_buffer_init(&path_parts);
  grpc_slice_split(path_slice, ",", &path_parts);
  grpc_lb_addresses *addresses =
      grpc_lb_addresses_create(path_parts.count, NULL /* user_data_vtable */);
  bool errors_found = false;
  for (size_t i = 0; i < addresses->num_addresses; i++) {
    grpc_uri ith_uri = *args->uri;
    char *part_str = grpc_slice_to_c_string(path_parts.slices[i]);
    ith_uri.path = part_str;
    if (!parse(&ith_uri, &addresses->addresses[i].address)) {
      errors_found = true; /* GPR_TRUE */
    }
    gpr_free(part_str);
    if (errors_found) break;
  }
  grpc_slice_buffer_destroy_internal(exec_ctx, &path_parts);
  grpc_slice_unref_internal(exec_ctx, path_slice);
  if (errors_found) {
    grpc_lb_addresses_destroy(exec_ctx, addresses);
    return NULL;
  }
  /* Instantiate resolver. */
  sockaddr_resolver *r = gpr_zalloc(sizeof(sockaddr_resolver));
  r->addresses = addresses;
  r->channel_args = grpc_channel_args_copy(args->args);
  grpc_resolver_init(&r->base, &sockaddr_resolver_vtable, args->combiner);
  return &r->base;
}
Example #27
0
static void multiple_shutdown_test(grpc_endpoint_test_config config) {
  grpc_endpoint_test_fixture f =
      begin_test(config, "multiple_shutdown_test", 128);
  int fail_count = 0;

  grpc_slice_buffer slice_buffer;
  grpc_slice_buffer_init(&slice_buffer);

  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
  grpc_endpoint_add_to_pollset(&exec_ctx, f.client_ep, g_pollset);
  grpc_endpoint_read(&exec_ctx, f.client_ep, &slice_buffer,
                     GRPC_CLOSURE_CREATE(inc_on_failure, &fail_count,
                                         grpc_schedule_on_exec_ctx));
  wait_for_fail_count(&exec_ctx, &fail_count, 0);
  grpc_endpoint_shutdown(&exec_ctx, f.client_ep,
                         GRPC_ERROR_CREATE_FROM_STATIC_STRING("Test Shutdown"));
  wait_for_fail_count(&exec_ctx, &fail_count, 1);
  grpc_endpoint_read(&exec_ctx, f.client_ep, &slice_buffer,
                     GRPC_CLOSURE_CREATE(inc_on_failure, &fail_count,
                                         grpc_schedule_on_exec_ctx));
  wait_for_fail_count(&exec_ctx, &fail_count, 2);
  grpc_slice_buffer_add(&slice_buffer, grpc_slice_from_copied_string("a"));
  grpc_endpoint_write(&exec_ctx, f.client_ep, &slice_buffer,
                      GRPC_CLOSURE_CREATE(inc_on_failure, &fail_count,
                                          grpc_schedule_on_exec_ctx));
  wait_for_fail_count(&exec_ctx, &fail_count, 3);
  grpc_endpoint_shutdown(&exec_ctx, f.client_ep,
                         GRPC_ERROR_CREATE_FROM_STATIC_STRING("Test Shutdown"));
  wait_for_fail_count(&exec_ctx, &fail_count, 3);

  grpc_slice_buffer_destroy_internal(&exec_ctx, &slice_buffer);

  grpc_endpoint_destroy(&exec_ctx, f.client_ep);
  grpc_endpoint_destroy(&exec_ctx, f.server_ep);
  grpc_exec_ctx_finish(&exec_ctx);
}
Example #28
0
/* Do a read_test, then release fd and try to read/write again. Verify that
   grpc_tcp_fd() is available before the fd is released. */
static void release_fd_test(size_t num_bytes, size_t slice_size) {
  int sv[2];
  grpc_endpoint *ep;
  struct read_socket_state state;
  size_t written_bytes;
  int fd;
  gpr_timespec deadline = grpc_timeout_seconds_to_deadline(20);
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
  grpc_closure fd_released_cb;
  int fd_released_done = 0;
  grpc_closure_init(&fd_released_cb, &on_fd_released, &fd_released_done,
                    grpc_schedule_on_exec_ctx);

  gpr_log(GPR_INFO,
          "Release fd read_test of size %" PRIuPTR ", slice size %" PRIuPTR,
          num_bytes, slice_size);

  create_sockets(sv);

  grpc_resource_quota *resource_quota =
      grpc_resource_quota_create("release_fd_test");
  ep = grpc_tcp_create(grpc_fd_create(sv[1], "read_test"), resource_quota,
                       slice_size, "test");
  GPR_ASSERT(grpc_tcp_fd(ep) == sv[1] && sv[1] >= 0);
  grpc_resource_quota_unref_internal(&exec_ctx, resource_quota);
  grpc_endpoint_add_to_pollset(&exec_ctx, ep, g_pollset);

  written_bytes = fill_socket_partial(sv[0], num_bytes);
  gpr_log(GPR_INFO, "Wrote %" PRIuPTR " bytes", written_bytes);

  state.ep = ep;
  state.read_bytes = 0;
  state.target_read_bytes = written_bytes;
  grpc_slice_buffer_init(&state.incoming);
  grpc_closure_init(&state.read_cb, read_cb, &state, grpc_schedule_on_exec_ctx);

  grpc_endpoint_read(&exec_ctx, ep, &state.incoming, &state.read_cb);

  gpr_mu_lock(g_mu);
  while (state.read_bytes < state.target_read_bytes) {
    grpc_pollset_worker *worker = NULL;
    GPR_ASSERT(GRPC_LOG_IF_ERROR(
        "pollset_work",
        grpc_pollset_work(&exec_ctx, g_pollset, &worker,
                          gpr_now(GPR_CLOCK_MONOTONIC), deadline)));
    gpr_log(GPR_DEBUG, "wakeup: read=%" PRIdPTR " target=%" PRIdPTR,
            state.read_bytes, state.target_read_bytes);
    gpr_mu_unlock(g_mu);
    grpc_exec_ctx_flush(&exec_ctx);
    gpr_mu_lock(g_mu);
  }
  GPR_ASSERT(state.read_bytes == state.target_read_bytes);
  gpr_mu_unlock(g_mu);

  grpc_slice_buffer_destroy_internal(&exec_ctx, &state.incoming);
  grpc_tcp_destroy_and_release_fd(&exec_ctx, ep, &fd, &fd_released_cb);
  grpc_exec_ctx_flush(&exec_ctx);
  gpr_mu_lock(g_mu);
  while (!fd_released_done) {
    grpc_pollset_worker *worker = NULL;
    GPR_ASSERT(GRPC_LOG_IF_ERROR(
        "pollset_work",
        grpc_pollset_work(&exec_ctx, g_pollset, &worker,
                          gpr_now(GPR_CLOCK_MONOTONIC), deadline)));
    gpr_log(GPR_DEBUG, "wakeup: fd_released_done=%d", fd_released_done);
  }
  gpr_mu_unlock(g_mu);
  GPR_ASSERT(fd_released_done == 1);
  GPR_ASSERT(fd == sv[1]);
  grpc_exec_ctx_finish(&exec_ctx);

  written_bytes = fill_socket_partial(sv[0], num_bytes);
  drain_socket_blocking(fd, written_bytes, written_bytes);
  written_bytes = fill_socket_partial(fd, num_bytes);
  drain_socket_blocking(sv[0], written_bytes, written_bytes);
  close(fd);
}
// Callback invoked for reading HTTP CONNECT response.
static void on_read_done(grpc_exec_ctx* exec_ctx, void* arg,
                         grpc_error* error) {
  http_connect_handshaker* handshaker = arg;
  gpr_mu_lock(&handshaker->mu);
  if (error != GRPC_ERROR_NONE || handshaker->shutdown) {
    // If the read failed or we're shutting down, clean up and invoke the
    // callback with the error.
    handshake_failed_locked(exec_ctx, handshaker, GRPC_ERROR_REF(error));
    goto done;
  }
  // Add buffer to parser.
  for (size_t i = 0; i < handshaker->args->read_buffer->count; ++i) {
    if (GRPC_SLICE_LENGTH(handshaker->args->read_buffer->slices[i]) > 0) {
      size_t body_start_offset = 0;
      error = grpc_http_parser_parse(&handshaker->http_parser,
                                     handshaker->args->read_buffer->slices[i],
                                     &body_start_offset);
      if (error != GRPC_ERROR_NONE) {
        handshake_failed_locked(exec_ctx, handshaker, error);
        goto done;
      }
      if (handshaker->http_parser.state == GRPC_HTTP_BODY) {
        // Remove the data we've already read from the read buffer,
        // leaving only the leftover bytes (if any).
        grpc_slice_buffer tmp_buffer;
        grpc_slice_buffer_init(&tmp_buffer);
        if (body_start_offset <
            GRPC_SLICE_LENGTH(handshaker->args->read_buffer->slices[i])) {
          grpc_slice_buffer_add(
              &tmp_buffer,
              grpc_slice_split_tail(&handshaker->args->read_buffer->slices[i],
                                    body_start_offset));
        }
        grpc_slice_buffer_addn(&tmp_buffer,
                               &handshaker->args->read_buffer->slices[i + 1],
                               handshaker->args->read_buffer->count - i - 1);
        grpc_slice_buffer_swap(handshaker->args->read_buffer, &tmp_buffer);
        grpc_slice_buffer_destroy_internal(exec_ctx, &tmp_buffer);
        break;
      }
    }
  }
  // If we're not done reading the response, read more data.
  // TODO(roth): In practice, I suspect that the response to a CONNECT
  // request will never include a body, in which case this check is
  // sufficient.  However, the language of RFC-2817 doesn't explicitly
  // forbid the response from including a body.  If there is a body,
  // it's possible that we might have parsed part but not all of the
  // body, in which case this check will cause us to fail to parse the
  // remainder of the body.  If that ever becomes an issue, we may
  // need to fix the HTTP parser to understand when the body is
  // complete (e.g., handling chunked transfer encoding or looking
  // at the Content-Length: header).
  if (handshaker->http_parser.state != GRPC_HTTP_BODY) {
    grpc_slice_buffer_reset_and_unref_internal(exec_ctx,
                                               handshaker->args->read_buffer);
    grpc_endpoint_read(exec_ctx, handshaker->args->endpoint,
                       handshaker->args->read_buffer,
                       &handshaker->response_read_closure);
    gpr_mu_unlock(&handshaker->mu);
    return;
  }
  // Make sure we got a 2xx response.
  if (handshaker->http_response.status < 200 ||
      handshaker->http_response.status >= 300) {
    char* msg;
    gpr_asprintf(&msg, "HTTP proxy returned response code %d",
                 handshaker->http_response.status);
    error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
    gpr_free(msg);
    handshake_failed_locked(exec_ctx, handshaker, error);
    goto done;
  }
  // Success.  Invoke handshake-done callback.
  grpc_closure_sched(exec_ctx, handshaker->on_handshake_done, error);
done:
  // Set shutdown to true so that subsequent calls to
  // http_connect_handshaker_shutdown() do nothing.
  handshaker->shutdown = true;
  gpr_mu_unlock(&handshaker->mu);
  http_connect_handshaker_unref(exec_ctx, handshaker);
}
Example #30
0
static grpc_resolver* fake_resolver_create(grpc_exec_ctx* exec_ctx,
                                           grpc_resolver_factory* factory,
                                           grpc_resolver_args* args) {
  if (0 != strcmp(args->uri->authority, "")) {
    gpr_log(GPR_ERROR, "authority based uri's not supported by the %s scheme",
            args->uri->scheme);
    return NULL;
  }
  // Get lb_enabled arg.  Anything other than "0" is interpreted as true.
  const char* lb_enabled_qpart =
      grpc_uri_get_query_arg(args->uri, "lb_enabled");
  const bool lb_enabled =
      lb_enabled_qpart != NULL && strcmp("0", lb_enabled_qpart) != 0;

  // Get the balancer's names.
  const char* balancer_names =
      grpc_uri_get_query_arg(args->uri, "balancer_names");
  grpc_slice_buffer balancer_names_parts;
  grpc_slice_buffer_init(&balancer_names_parts);
  if (balancer_names != NULL) {
    const grpc_slice balancer_names_slice =
        grpc_slice_from_copied_string(balancer_names);
    grpc_slice_split(balancer_names_slice, ",", &balancer_names_parts);
    grpc_slice_unref(balancer_names_slice);
  }

  // Construct addresses.
  grpc_slice path_slice =
      grpc_slice_new(args->uri->path, strlen(args->uri->path), do_nothing);
  grpc_slice_buffer path_parts;
  grpc_slice_buffer_init(&path_parts);
  grpc_slice_split(path_slice, ",", &path_parts);
  if (balancer_names_parts.count > 0 &&
      path_parts.count != balancer_names_parts.count) {
    gpr_log(GPR_ERROR,
            "Balancer names present but mismatched with number of addresses: "
            "%lu balancer names != %lu addresses",
            (unsigned long)balancer_names_parts.count,
            (unsigned long)path_parts.count);
    return NULL;
  }
  grpc_lb_addresses* addresses =
      grpc_lb_addresses_create(path_parts.count, NULL /* user_data_vtable */);
  bool errors_found = false;
  for (size_t i = 0; i < addresses->num_addresses; i++) {
    grpc_uri ith_uri = *args->uri;
    char* part_str = grpc_slice_to_c_string(path_parts.slices[i]);
    ith_uri.path = part_str;
    if (!parse_ipv4(&ith_uri, &addresses->addresses[i].address)) {
      errors_found = true;
    }
    gpr_free(part_str);
    if (errors_found) break;
    addresses->addresses[i].is_balancer = lb_enabled;
    addresses->addresses[i].balancer_name =
        balancer_names_parts.count > 0
            ? grpc_dump_slice(balancer_names_parts.slices[i], GPR_DUMP_ASCII)
            : NULL;
  }
  grpc_slice_buffer_destroy_internal(exec_ctx, &path_parts);
  grpc_slice_buffer_destroy_internal(exec_ctx, &balancer_names_parts);
  grpc_slice_unref(path_slice);
  if (errors_found) {
    grpc_lb_addresses_destroy(exec_ctx, addresses);
    return NULL;
  }
  // Instantiate resolver.
  fake_resolver* r = gpr_malloc(sizeof(fake_resolver));
  memset(r, 0, sizeof(*r));
  r->channel_args = grpc_channel_args_copy(args->args);
  r->addresses = addresses;
  gpr_mu_init(&r->mu);
  grpc_resolver_init(&r->base, &fake_resolver_vtable);
  return &r->base;
}