static void subchannel_ready(grpc_exec_ctx *exec_ctx, void *arg, int success) {
  grpc_subchannel_call_holder *holder = arg;
  grpc_subchannel_call *call;
  gpr_mu_lock(&holder->mu);
  GPR_ASSERT(holder->creation_phase ==
             GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL);
  call = GET_CALL(holder);
  GPR_ASSERT(call == NULL || call == CANCELLED_CALL);
  if (holder->subchannel == NULL) {
    holder->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING;
    fail_locked(exec_ctx, holder);
  } else {
    grpc_closure_init(&holder->next_step, call_ready, holder);
    if (grpc_subchannel_create_call(exec_ctx, holder->subchannel,
                                    holder->pollset, &holder->subchannel_call,
                                    &holder->next_step)) {
      holder->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING;
      /* got one immediately - continue the op (and any waiting ops) */
      retry_waiting_locked(exec_ctx, holder);
    }
  }
  gpr_mu_unlock(&holder->mu);
}
Exemple #2
0
static void pf_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
                           grpc_connected_subchannel **target) {
  pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
  pending_pick *pp;
  gpr_mu_lock(&p->mu);
  pp = p->pending_picks;
  p->pending_picks = NULL;
  while (pp != NULL) {
    pending_pick *next = pp->next;
    if (pp->target == target) {
      grpc_pollset_set_del_pollset(exec_ctx, &p->base.interested_parties,
                                   pp->pollset);
      *target = NULL;
      grpc_exec_ctx_enqueue(exec_ctx, pp->on_complete, 0);
      gpr_free(pp);
    } else {
      pp->next = p->pending_picks;
      p->pending_picks = pp;
    }
    pp = next;
  }
  gpr_mu_unlock(&p->mu);
}
Exemple #3
0
void grpc_fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_closure *on_done,
                    int *release_fd, const char *reason) {
  fd->on_done_closure = on_done;
  fd->released = release_fd != NULL;
  if (!fd->released) {
    shutdown(fd->fd, SHUT_RDWR);
  } else {
    *release_fd = fd->fd;
  }
  gpr_mu_lock(&fd->mu);
  REF_BY(fd, 1, reason); /* remove active status, but keep referenced */
  if (!has_watchers(fd)) {
    fd->closed = 1;
    if (!fd->released) {
      close(fd->fd);
    }
    grpc_exec_ctx_enqueue(exec_ctx, fd->on_done_closure, 1);
  } else {
    wake_all_watchers_locked(fd);
  }
  gpr_mu_unlock(&fd->mu);
  UNREF_BY(fd, 2, reason); /* drop the reference */
}
void grpc_tcp_server_start(grpc_tcp_server *s, grpc_pollset **pollsets,
                           size_t pollset_count, grpc_tcp_server_cb cb,
                           void *cb_arg) {
  size_t i, j;
  GPR_ASSERT(cb);
  gpr_mu_lock(&s->mu);
  GPR_ASSERT(!s->cb);
  GPR_ASSERT(s->active_ports == 0);
  s->cb = cb;
  s->cb_arg = cb_arg;
  s->pollsets = pollsets;
  s->pollset_count = pollset_count;
  for (i = 0; i < s->nports; i++) {
    for (j = 0; j < pollset_count; j++) {
      grpc_pollset_add_fd(pollsets[j], s->ports[i].emfd);
    }
    s->ports[i].read_closure.cb = on_read;
    s->ports[i].read_closure.cb_arg = &s->ports[i];
    grpc_fd_notify_on_read(s->ports[i].emfd, &s->ports[i].read_closure);
    s->active_ports++;
  }
  gpr_mu_unlock(&s->mu);
}
Exemple #5
0
/* Get stats from input stats store */
static void get_stats(census_ht *store, census_aggregated_rpc_stats *data) {
  GPR_ASSERT(data != NULL);
  if (data->num_entries != 0) {
    census_aggregated_rpc_stats_set_empty(data);
  }
  gpr_mu_lock(&g_mu);
  if (store != NULL) {
    size_t n;
    unsigned i, j;
    gpr_timespec now = gpr_now(GPR_CLOCK_REALTIME);
    census_ht_kv *kv = census_ht_get_all_elements(store, &n);
    if (kv != NULL) {
      data->num_entries = n;
      data->stats =
          (per_method_stats *)gpr_malloc(sizeof(per_method_stats) * n);
      for (i = 0; i < n; i++) {
        census_window_stats_sums sums[NUM_INTERVALS];
        for (j = 0; j < NUM_INTERVALS; j++) {
          sums[j].statistic = (void *)census_rpc_stats_create_empty();
        }
        data->stats[i].method = gpr_strdup(kv[i].k.ptr);
        census_window_stats_get_sums(kv[i].v, now, sums);
        data->stats[i].minute_stats =
            *(census_rpc_stats *)sums[MINUTE_INTERVAL].statistic;
        data->stats[i].hour_stats =
            *(census_rpc_stats *)sums[HOUR_INTERVAL].statistic;
        data->stats[i].total_stats =
            *(census_rpc_stats *)sums[TOTAL_INTERVAL].statistic;
        for (j = 0; j < NUM_INTERVALS; j++) {
          gpr_free(sums[j].statistic);
        }
      }
      gpr_free(kv);
    }
  }
  gpr_mu_unlock(&g_mu);
}
Exemple #6
0
void *census_log_start_write(size_t size) {
  /* Used to bound number of times block allocation is attempted. */
  gpr_int32 attempts_remaining = g_log.num_blocks;
  /* TODO(aveitch): move this inside the do loop when current_cpu is fixed */
  gpr_int32 core_id = gpr_cpu_current_cpu();
  GPR_ASSERT(g_log.initialized);
  if (size > CENSUS_LOG_MAX_RECORD_SIZE) {
    return NULL;
  }
  do {
    int allocated;
    void *record = NULL;
    cl_block *block =
        cl_core_local_block_get_block(&g_log.core_local_blocks[core_id]);
    if (block && (record = cl_block_start_write(block, size))) {
      return record;
    }
    /* Need to allocate a new block. We are here if:
       - No block associated with the core OR
       - Write in-progress on the block OR
       - block is out of space */
    if (gpr_atm_acq_load(&g_log.is_full)) {
      gpr_atm_no_barrier_fetch_add(&g_log.out_of_space_count, 1);
      return NULL;
    }
    gpr_mu_lock(&g_log.lock);
    allocated = cl_allocate_core_local_block(core_id, block);
    gpr_mu_unlock(&g_log.lock);
    if (!allocated) {
      gpr_atm_no_barrier_fetch_add(&g_log.out_of_space_count, 1);
      return NULL;
    }
  } while (attempts_remaining--);
  /* Give up. */
  gpr_atm_no_barrier_fetch_add(&g_log.out_of_space_count, 1);
  return NULL;
}
Exemple #7
0
int main(int argc, char **argv) {
  synchronizer sync;
  grpc_jwt_verifier *verifier;
  gpr_cmdline *cl;
  char *jwt = NULL;
  char *aud = NULL;

  cl = gpr_cmdline_create("JWT verifier tool");
  gpr_cmdline_add_string(cl, "jwt", "JSON web token to verify", &jwt);
  gpr_cmdline_add_string(cl, "aud", "Audience for the JWT", &aud);
  gpr_cmdline_parse(cl, argc, argv);
  if (jwt == NULL || aud == NULL) {
    print_usage_and_exit(cl, argv[0]);
  }

  verifier = grpc_jwt_verifier_create(NULL, 0);

  grpc_init();

  grpc_pollset_init(&sync.pollset);
  sync.is_done = 0;

  grpc_jwt_verifier_verify(verifier, &sync.pollset, jwt, aud,
                           on_jwt_verification_done, &sync);

  gpr_mu_lock(GRPC_POLLSET_MU(&sync.pollset));
  while (!sync.is_done) {
    grpc_pollset_worker worker;
    grpc_pollset_work(&sync.pollset, &worker,
                      gpr_inf_future(GPR_CLOCK_MONOTONIC));
  }
  gpr_mu_unlock(GRPC_POLLSET_MU(&sync.pollset));

  grpc_jwt_verifier_destroy(verifier);
  gpr_cmdline_destroy(cl);
  return !sync.success;
}
Exemple #8
0
static void on_connect(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) {
  async_connect *ac = acp;
  grpc_endpoint **ep = ac->endpoint;
  GPR_ASSERT(*ep == NULL);
  grpc_closure *on_done = ac->on_done;

  GRPC_ERROR_REF(error);

  gpr_mu_lock(&ac->mu);
  grpc_winsocket *socket = ac->socket;
  ac->socket = NULL;
  gpr_mu_unlock(&ac->mu);

  grpc_timer_cancel(exec_ctx, &ac->alarm);

  gpr_mu_lock(&ac->mu);

  if (error == GRPC_ERROR_NONE && socket != NULL) {
    DWORD transfered_bytes = 0;
    DWORD flags;
    BOOL wsa_success =
        WSAGetOverlappedResult(socket->socket, &socket->write_info.overlapped,
                               &transfered_bytes, FALSE, &flags);
    GPR_ASSERT(transfered_bytes == 0);
    if (!wsa_success) {
      error = GRPC_WSA_ERROR(WSAGetLastError(), "ConnectEx");
    } else {
      *ep = grpc_tcp_create(socket, ac->resource_quota, ac->addr_name);
      socket = NULL;
    }
  }

  async_connect_unlock_and_cleanup(exec_ctx, ac, socket);
  /* If the connection was aborted, the callback was already called when
     the deadline was met. */
  grpc_exec_ctx_sched(exec_ctx, on_done, error, NULL);
}
Exemple #9
0
static void destroy_stream(grpc_transport *gt, grpc_stream *gs) {
  grpc_chttp2_transport *t = (grpc_chttp2_transport *)gt;
  grpc_chttp2_stream *s = (grpc_chttp2_stream *)gs;
  int i;

  gpr_mu_lock(&t->mu);

  GPR_ASSERT(s->global.published_state == GRPC_STREAM_CLOSED ||
             s->global.id == 0);
  GPR_ASSERT(!s->global.in_stream_map);
  grpc_chttp2_unregister_stream(t, s);
  if (!t->parsing_active && s->global.id) {
    GPR_ASSERT(grpc_chttp2_stream_map_find(&t->parsing_stream_map,
                                           s->global.id) == NULL);
  }

  grpc_chttp2_list_remove_incoming_window_updated(&t->global, &s->global);
  grpc_chttp2_list_remove_writable_window_update_stream(&t->global, &s->global);

  gpr_mu_unlock(&t->mu);

  for (i = 0; i < STREAM_LIST_COUNT; i++) {
    GPR_ASSERT(!s->included[i]);
  }

  GPR_ASSERT(s->global.outgoing_sopb == NULL);
  GPR_ASSERT(s->global.publish_sopb == NULL);
  grpc_sopb_destroy(&s->writing.sopb);
  grpc_sopb_destroy(&s->global.incoming_sopb);
  grpc_chttp2_data_parser_destroy(&s->parsing.data_parser);
  grpc_chttp2_incoming_metadata_buffer_destroy(&s->parsing.incoming_metadata);
  grpc_chttp2_incoming_metadata_buffer_destroy(&s->global.incoming_metadata);
  grpc_chttp2_incoming_metadata_live_op_buffer_end(
      &s->global.outstanding_metadata);

  UNREF_TRANSPORT(t, "stream");
}
Exemple #10
0
static void on_secure_handshake_done(grpc_exec_ctx *exec_ctx, void *statep,
                                     grpc_security_status status,
                                     grpc_endpoint *secure_endpoint,
                                     grpc_auth_context *auth_context) {
  server_secure_connect *state = statep;
  grpc_transport *transport;
  if (status == GRPC_SECURITY_OK) {
    if (secure_endpoint) {
      gpr_mu_lock(&state->state->mu);
      if (!state->state->is_shutdown) {
        transport = grpc_create_chttp2_transport(
            exec_ctx, grpc_server_get_channel_args(state->state->server),
            secure_endpoint, 0);
        grpc_channel_args *args_copy;
        grpc_arg args_to_add[2];
        args_to_add[0] = grpc_server_credentials_to_arg(state->state->creds);
        args_to_add[1] = grpc_auth_context_to_arg(auth_context);
        args_copy = grpc_channel_args_copy_and_add(
            grpc_server_get_channel_args(state->state->server), args_to_add,
            GPR_ARRAY_SIZE(args_to_add));
        grpc_server_setup_transport(exec_ctx, state->state->server, transport,
                                    state->accepting_pollset, args_copy);
        grpc_channel_args_destroy(args_copy);
        grpc_chttp2_transport_start_reading(exec_ctx, transport, NULL, 0);
      } else {
        /* We need to consume this here, because the server may already have
         * gone away. */
        grpc_endpoint_destroy(exec_ctx, secure_endpoint);
      }
      gpr_mu_unlock(&state->state->mu);
    }
  } else {
    gpr_log(GPR_ERROR, "Secure transport failed with error %d", status);
  }
  state_unref(state->state);
  gpr_free(state);
}
Exemple #11
0
void drain_socket_blocking(int fd, size_t num_bytes, size_t read_size) {
  unsigned char *buf = malloc(read_size);
  ssize_t bytes_read;
  size_t bytes_left = num_bytes;
  int flags;
  int current = 0;
  int i;
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;

  flags = fcntl(fd, F_GETFL, 0);
  GPR_ASSERT(fcntl(fd, F_SETFL, flags & ~O_NONBLOCK) == 0);

  for (;;) {
    grpc_pollset_worker worker;
    gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
    grpc_pollset_work(&exec_ctx, &g_pollset, &worker,
                      gpr_now(GPR_CLOCK_MONOTONIC),
                      GRPC_TIMEOUT_MILLIS_TO_DEADLINE(10));
    gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
    grpc_exec_ctx_finish(&exec_ctx);
    do {
      bytes_read =
          read(fd, buf, bytes_left > read_size ? read_size : bytes_left);
    } while (bytes_read < 0 && errno == EINTR);
    GPR_ASSERT(bytes_read >= 0);
    for (i = 0; i < bytes_read; ++i) {
      GPR_ASSERT(buf[i] == current);
      current = (current + 1) % 256;
    }
    bytes_left -= (size_t)bytes_read;
    if (bytes_left == 0) break;
  }
  flags = fcntl(fd, F_GETFL, 0);
  GPR_ASSERT(fcntl(fd, F_SETFL, flags | O_NONBLOCK) == 0);

  gpr_free(buf);
}
Exemple #12
0
static void on_oauth2_token_fetcher_http_response(
    void *user_data, const grpc_httpcli_response *response) {
  grpc_credentials_metadata_request *r =
      (grpc_credentials_metadata_request *)user_data;
  grpc_oauth2_token_fetcher_credentials *c =
      (grpc_oauth2_token_fetcher_credentials *)r->creds;
  gpr_timespec token_lifetime;
  grpc_credentials_status status;

  gpr_mu_lock(&c->mu);
  status = grpc_oauth2_token_fetcher_credentials_parse_server_response(
      response, &c->access_token_md, &token_lifetime);
  if (status == GRPC_CREDENTIALS_OK) {
    c->token_expiration =
        gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), token_lifetime);
    r->cb(r->user_data, c->access_token_md->entries,
          c->access_token_md->num_entries, status);
  } else {
    c->token_expiration = gpr_inf_past;
    r->cb(r->user_data, NULL, 0, status);
  }
  gpr_mu_unlock(&c->mu);
  grpc_credentials_metadata_request_destroy(r);
}
Exemple #13
0
/* Public function. Stops and destroys a grpc_tcp_server. */
void grpc_tcp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s,
                             grpc_closure *shutdown_complete) {
  size_t i;
  int immediately_done = 0;
  gpr_mu_lock(&s->mu);

  s->shutdown_complete = shutdown_complete;

  /* First, shutdown all fd's. This will queue abortion calls for all
     of the pending accepts due to the normal operation mechanism. */
  if (s->active_ports == 0) {
    immediately_done = 1;
  }
  for (i = 0; i < s->nports; i++) {
    server_port *sp = &s->ports[i];
    sp->shutting_down = 1;
    grpc_winsocket_shutdown(sp->socket);
  }
  gpr_mu_unlock(&s->mu);

  if (immediately_done) {
    finish_shutdown(exec_ctx, s);
  }
}
Exemple #14
0
void grpc_tcp_server_destroy(grpc_tcp_server *s) {
  size_t i;
  gpr_mu_lock(&s->mu);
  /* shutdown all fd's */
  for (i = 0; i < s->nports; i++) {
    grpc_fd_shutdown(s->ports[i].emfd);
  }
  /* wait while that happens */
  while (s->active_ports) {
    gpr_cv_wait(&s->cv, &s->mu, gpr_inf_future);
  }
  gpr_mu_unlock(&s->mu);

  /* delete ALL the things */
  for (i = 0; i < s->nports; i++) {
    server_port *sp = &s->ports[i];
    if (sp->addr.sockaddr.sa_family == AF_UNIX) {
      unlink_if_unix_domain_socket(&sp->addr.un);
    }
    grpc_fd_orphan(sp->emfd, NULL, NULL);
  }
  gpr_free(s->ports);
  gpr_free(s);
}
void *grpc_channel_register_call(grpc_channel *channel, const char *method,
                                 const char *host, void *reserved) {
  registered_call *rc = gpr_malloc(sizeof(registered_call));
  GRPC_API_TRACE(
      "grpc_channel_register_call(channel=%p, method=%s, host=%s, reserved=%p)",
      4, (channel, method, host, reserved));
  GPR_ASSERT(!reserved);
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;

  rc->path = grpc_mdelem_from_slices(
      &exec_ctx, GRPC_MDSTR_PATH,
      grpc_slice_intern(grpc_slice_from_static_string(method)));
  rc->authority =
      host ? grpc_mdelem_from_slices(
                 &exec_ctx, GRPC_MDSTR_AUTHORITY,
                 grpc_slice_intern(grpc_slice_from_static_string(host)))
           : GRPC_MDNULL;
  gpr_mu_lock(&channel->registered_call_mu);
  rc->next = channel->registered_calls;
  channel->registered_calls = rc;
  gpr_mu_unlock(&channel->registered_call_mu);
  grpc_exec_ctx_finish(&exec_ctx);
  return rc;
}
Exemple #16
0
static void dns_on_resolved(void *arg, grpc_resolved_addresses *addresses) {
  dns_resolver *r = arg;
  grpc_client_config *config = NULL;
  grpc_subchannel **subchannels;
  grpc_subchannel_args args;
  grpc_lb_policy *lb_policy;
  size_t i;
  if (addresses) {
    config = grpc_client_config_create();
    subchannels = gpr_malloc(sizeof(grpc_subchannel *) * addresses->naddrs);
    for (i = 0; i < addresses->naddrs; i++) {
      memset(&args, 0, sizeof(args));
      args.addr = (struct sockaddr *)(addresses->addrs[i].addr);
      args.addr_len = addresses->addrs[i].len;
      subchannels[i] = grpc_subchannel_factory_create_subchannel(
          r->subchannel_factory, &args);
    }
    lb_policy = r->lb_policy_factory(subchannels, addresses->naddrs);
    grpc_client_config_set_lb_policy(config, lb_policy);
    GRPC_LB_POLICY_UNREF(lb_policy, "construction");
    grpc_resolved_addresses_destroy(addresses);
    gpr_free(subchannels);
  }
  gpr_mu_lock(&r->mu);
  GPR_ASSERT(r->resolving);
  r->resolving = 0;
  if (r->resolved_config) {
    grpc_client_config_unref(r->resolved_config);
  }
  r->resolved_config = config;
  r->resolved_version++;
  dns_maybe_finish_next_locked(r);
  gpr_mu_unlock(&r->mu);

  GRPC_RESOLVER_UNREF(&r->base, "dns-resolving");
}
Exemple #17
0
void grpc_pollset_set_add_pollset(grpc_exec_ctx *exec_ctx,
                                  grpc_pollset_set *pollset_set,
                                  grpc_pollset *pollset) {
  size_t i, j;
  gpr_mu_lock(&pollset_set->mu);
  if (pollset_set->pollset_count == pollset_set->pollset_capacity) {
    pollset_set->pollset_capacity =
        GPR_MAX(8, 2 * pollset_set->pollset_capacity);
    pollset_set->pollsets =
        gpr_realloc(pollset_set->pollsets, pollset_set->pollset_capacity *
                                               sizeof(*pollset_set->pollsets));
  }
  pollset_set->pollsets[pollset_set->pollset_count++] = pollset;
  for (i = 0, j = 0; i < pollset_set->fd_count; i++) {
    if (grpc_fd_is_orphaned(pollset_set->fds[i])) {
      GRPC_FD_UNREF(pollset_set->fds[i], "pollset_set");
    } else {
      grpc_pollset_add_fd(exec_ctx, pollset, pollset_set->fds[i]);
      pollset_set->fds[j++] = pollset_set->fds[i];
    }
  }
  pollset_set->fd_count = j;
  gpr_mu_unlock(&pollset_set->mu);
}
Exemple #18
0
static void unlock(grpc_chttp2_transport *t) {
  grpc_iomgr_closure *run_closures;

  unlock_check_read_write_state(t);
  if (!t->writing_active && !t->closed &&
      grpc_chttp2_unlocking_check_writes(&t->global, &t->writing)) {
    t->writing_active = 1;
    REF_TRANSPORT(t, "writing");
    grpc_chttp2_schedule_closure(&t->global, &t->writing_action, 1);
    prevent_endpoint_shutdown(t);
  }

  run_closures = t->global.pending_closures_head;
  t->global.pending_closures_head = NULL;
  t->global.pending_closures_tail = NULL;

  gpr_mu_unlock(&t->mu);

  while (run_closures) {
    grpc_iomgr_closure *next = run_closures->next;
    run_closures->cb(run_closures->cb_arg, run_closures->success);
    run_closures = next;
  }
}
Exemple #19
0
static void test_flush(void) {
  grpc_closure c;
  int done = 0;
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
  grpc_workqueue *wq = grpc_workqueue_create(&exec_ctx);
  gpr_timespec deadline = GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5);
  grpc_pollset_worker worker;
  grpc_closure_init(&c, must_succeed, &done);

  grpc_exec_ctx_enqueue(&exec_ctx, &c, 1);
  grpc_workqueue_flush(&exec_ctx, wq);
  grpc_workqueue_add_to_pollset(&exec_ctx, wq, &g_pollset);

  gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
  GPR_ASSERT(!done);
  grpc_pollset_work(&exec_ctx, &g_pollset, &worker,
                    gpr_now(deadline.clock_type), deadline);
  gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
  grpc_exec_ctx_finish(&exec_ctx);
  GPR_ASSERT(done);

  GRPC_WORKQUEUE_UNREF(&exec_ctx, wq, "destroy");
  grpc_exec_ctx_finish(&exec_ctx);
}
Exemple #20
0
static void pf_cancel_picks(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
                            uint32_t initial_metadata_flags_mask,
                            uint32_t initial_metadata_flags_eq) {
  pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
  pending_pick *pp;
  gpr_mu_lock(&p->mu);
  pp = p->pending_picks;
  p->pending_picks = NULL;
  while (pp != NULL) {
    pending_pick *next = pp->next;
    if ((pp->initial_metadata_flags & initial_metadata_flags_mask) ==
        initial_metadata_flags_eq) {
      grpc_pollset_set_del_pollset(exec_ctx, p->base.interested_parties,
                                   pp->pollset);
      grpc_exec_ctx_enqueue(exec_ctx, pp->on_complete, false, NULL);
      gpr_free(pp);
    } else {
      pp->next = p->pending_picks;
      p->pending_picks = pp;
    }
    pp = next;
  }
  gpr_mu_unlock(&p->mu);
}
Exemple #21
0
/* Asynchronous callback from the IOCP, or the background thread. */
static void on_write(grpc_exec_ctx *exec_ctx, void *tcpp, grpc_error *error) {
  grpc_tcp *tcp = (grpc_tcp *)tcpp;
  grpc_winsocket *handle = tcp->socket;
  grpc_winsocket_callback_info *info = &handle->write_info;
  grpc_closure *cb;

  GRPC_ERROR_REF(error);

  gpr_mu_lock(&tcp->mu);
  cb = tcp->write_cb;
  tcp->write_cb = NULL;
  gpr_mu_unlock(&tcp->mu);

  if (error == GRPC_ERROR_NONE) {
    if (info->wsa_error != 0) {
      error = GRPC_WSA_ERROR(info->wsa_error, "WSASend");
    } else {
      GPR_ASSERT(info->bytes_transfered == tcp->write_slices->length);
    }
  }

  TCP_UNREF(exec_ctx, tcp, "write");
  GRPC_CLOSURE_SCHED(exec_ctx, cb, error);
}
Exemple #22
0
void grpc_network_status_unregister_endpoint(grpc_endpoint *ep) {
  gpr_mu_lock(&g_endpoint_mutex);
  GPR_ASSERT(head);
  bool found = false;
  endpoint_ll_node *prev = head;
  // if we're unregistering the head, just move head to the next
  if (ep == head->ep) {
    head = head->next;
    gpr_free(prev);
    found = true;
  } else {
    for (endpoint_ll_node *curr = head->next; curr != NULL; curr = curr->next) {
      if (ep == curr->ep) {
        prev->next = curr->next;
        gpr_free(curr);
        found = true;
        break;
      }
      prev = curr;
    }
  }
  gpr_mu_unlock(&g_endpoint_mutex);
  GPR_ASSERT(found);
}
Exemple #23
0
void grpc_resource_user_alloc(grpc_exec_ctx *exec_ctx,
                              grpc_resource_user *resource_user, size_t size,
                              grpc_closure *optional_on_done) {
  gpr_mu_lock(&resource_user->mu);
  ru_ref_by(resource_user, (gpr_atm)size);
  resource_user->free_pool -= (int64_t)size;
  if (grpc_resource_quota_trace) {
    gpr_log(GPR_DEBUG, "RQ %s %s: alloc %" PRIdPTR "; free_pool -> %" PRId64,
            resource_user->resource_quota->name, resource_user->name, size,
            resource_user->free_pool);
  }
  if (resource_user->free_pool < 0) {
    grpc_closure_list_append(&resource_user->on_allocated, optional_on_done,
                             GRPC_ERROR_NONE);
    if (!resource_user->allocating) {
      resource_user->allocating = true;
      grpc_closure_sched(exec_ctx, &resource_user->allocate_closure,
                         GRPC_ERROR_NONE);
    }
  } else {
    grpc_closure_sched(exec_ctx, optional_on_done, GRPC_ERROR_NONE);
  }
  gpr_mu_unlock(&resource_user->mu);
}
Exemple #24
0
static void unlock(grpc_chttp2_transport *t) {
  grpc_iomgr_closure *run_closures;

  unlock_check_read_write_state(t);
  if (!t->writing_active && t->global.error_state == GRPC_CHTTP2_ERROR_STATE_NONE &&
      grpc_chttp2_unlocking_check_writes(&t->global, &t->writing)) {
    t->writing_active = 1;
    REF_TRANSPORT(t, "writing");
    grpc_chttp2_schedule_closure(&t->global, &t->writing_action, 1);
  }
  /* unlock_check_parser(t); */
  unlock_check_channel_callbacks(t);

  run_closures = t->global.pending_closures;
  t->global.pending_closures = NULL;

  gpr_mu_unlock(&t->mu);

  while (run_closures) {
    grpc_iomgr_closure *next = run_closures->next;
    run_closures->cb(run_closures->cb_arg, run_closures->success);
    run_closures = next;
  }
}
Exemple #25
0
/* tcp read callback */
static void recv_data(void *tp, gpr_slice *slices, size_t nslices,
                      grpc_endpoint_cb_status error) {
  grpc_chttp2_transport *t = tp;
  size_t i;
  int unref = 0;

  switch (error) {
    case GRPC_ENDPOINT_CB_SHUTDOWN:
    case GRPC_ENDPOINT_CB_EOF:
    case GRPC_ENDPOINT_CB_ERROR:
      lock(t);
      drop_connection(t);
      read_error_locked(t);
      unlock(t);
      unref = 1;
      for (i = 0; i < nslices; i++) gpr_slice_unref(slices[i]);
      break;
    case GRPC_ENDPOINT_CB_OK:
      lock(t);
      i = 0;
      GPR_ASSERT(!t->parsing_active);
      if (t->global.error_state == GRPC_CHTTP2_ERROR_STATE_NONE) {
        t->parsing_active = 1;
        /* merge stream lists */
        grpc_chttp2_stream_map_move_into(&t->new_stream_map,
                                         &t->parsing_stream_map);
        grpc_chttp2_prepare_to_read(&t->global, &t->parsing);
        gpr_mu_unlock(&t->mu);
        for (; i < nslices && grpc_chttp2_perform_read(&t->parsing, slices[i]);
             i++) {
          gpr_slice_unref(slices[i]);
        }
        gpr_mu_lock(&t->mu);
        if (i != nslices) {
          drop_connection(t);
        }
        /* merge stream lists */
        grpc_chttp2_stream_map_move_into(&t->new_stream_map,
                                         &t->parsing_stream_map);
        t->global.concurrent_stream_count = grpc_chttp2_stream_map_size(&t->parsing_stream_map);
        if (t->parsing.initial_window_update != 0) {
          grpc_chttp2_stream_map_for_each(&t->parsing_stream_map,
                                          update_global_window, t);
        }
        /* handle higher level things */
        grpc_chttp2_publish_reads(&t->global, &t->parsing);
        t->parsing_active = 0;
      }
      if (i == nslices) {
        grpc_chttp2_schedule_closure(&t->global, &t->reading_action, 1);
      } else {
        read_error_locked(t);
        unref = 1;
      }
      unlock(t);
      for (; i < nslices; i++) gpr_slice_unref(slices[i]);
      break;
  }
  if (unref) {
    UNREF_TRANSPORT(t, "recv_data");
  }
}
Exemple #26
0
static bool pick_subchannel(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
                            grpc_metadata_batch *initial_metadata,
                            uint32_t initial_metadata_flags,
                            grpc_connected_subchannel **connected_subchannel,
                            grpc_closure *on_ready, grpc_error *error) {
  GPR_TIMER_BEGIN("pick_subchannel", 0);

  channel_data *chand = elem->channel_data;
  call_data *calld = elem->call_data;
  continue_picking_args *cpa;
  grpc_closure *closure;

  GPR_ASSERT(connected_subchannel);

  gpr_mu_lock(&chand->mu);
  if (initial_metadata == NULL) {
    if (chand->lb_policy != NULL) {
      grpc_lb_policy_cancel_pick(exec_ctx, chand->lb_policy,
                                 connected_subchannel, GRPC_ERROR_REF(error));
    }
    for (closure = chand->waiting_for_config_closures.head; closure != NULL;
         closure = closure->next_data.next) {
      cpa = closure->cb_arg;
      if (cpa->connected_subchannel == connected_subchannel) {
        cpa->connected_subchannel = NULL;
        grpc_exec_ctx_sched(
            exec_ctx, cpa->on_ready,
            GRPC_ERROR_CREATE_REFERENCING("Pick cancelled", &error, 1), NULL);
      }
    }
    gpr_mu_unlock(&chand->mu);
    GPR_TIMER_END("pick_subchannel", 0);
    GRPC_ERROR_UNREF(error);
    return true;
  }
  GPR_ASSERT(error == GRPC_ERROR_NONE);
  if (chand->lb_policy != NULL) {
    grpc_lb_policy *lb_policy = chand->lb_policy;
    GRPC_LB_POLICY_REF(lb_policy, "pick_subchannel");
    gpr_mu_unlock(&chand->mu);
    // If the application explicitly set wait_for_ready, use that.
    // Otherwise, if the service config specified a value for this
    // method, use that.
    const bool wait_for_ready_set_from_api =
        initial_metadata_flags &
        GRPC_INITIAL_METADATA_WAIT_FOR_READY_EXPLICITLY_SET;
    const bool wait_for_ready_set_from_service_config =
        calld->wait_for_ready_from_service_config != WAIT_FOR_READY_UNSET;
    if (!wait_for_ready_set_from_api &&
        wait_for_ready_set_from_service_config) {
      if (calld->wait_for_ready_from_service_config == WAIT_FOR_READY_TRUE) {
        initial_metadata_flags |= GRPC_INITIAL_METADATA_WAIT_FOR_READY;
      } else {
        initial_metadata_flags &= ~GRPC_INITIAL_METADATA_WAIT_FOR_READY;
      }
    }
    // TODO(dgq): make this deadline configurable somehow.
    const grpc_lb_policy_pick_args inputs = {
        initial_metadata, initial_metadata_flags, &calld->lb_token_mdelem,
        gpr_inf_future(GPR_CLOCK_MONOTONIC)};
    const bool result = grpc_lb_policy_pick(
        exec_ctx, lb_policy, &inputs, connected_subchannel, NULL, on_ready);
    GRPC_LB_POLICY_UNREF(exec_ctx, lb_policy, "pick_subchannel");
    GPR_TIMER_END("pick_subchannel", 0);
    return result;
  }
  if (chand->resolver != NULL && !chand->started_resolving) {
    chand->started_resolving = true;
    GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver");
    grpc_resolver_next(exec_ctx, chand->resolver, &chand->resolver_result,
                       &chand->on_resolver_result_changed);
  }
  if (chand->resolver != NULL) {
    cpa = gpr_malloc(sizeof(*cpa));
    cpa->initial_metadata = initial_metadata;
    cpa->initial_metadata_flags = initial_metadata_flags;
    cpa->connected_subchannel = connected_subchannel;
    cpa->on_ready = on_ready;
    cpa->elem = elem;
    grpc_closure_init(&cpa->closure, continue_picking, cpa);
    grpc_closure_list_append(&chand->waiting_for_config_closures, &cpa->closure,
                             GRPC_ERROR_NONE);
  } else {
    grpc_exec_ctx_sched(exec_ctx, on_ready, GRPC_ERROR_CREATE("Disconnected"),
                        NULL);
  }
  gpr_mu_unlock(&chand->mu);

  GPR_TIMER_END("pick_subchannel", 0);
  return false;
}
Exemple #27
0
static void close_transport(grpc_transport *gt) {
  grpc_chttp2_transport *t = (grpc_chttp2_transport *)gt;
  gpr_mu_lock(&t->mu);
  close_transport_locked(t);
  gpr_mu_unlock(&t->mu);
}
Exemple #28
0
static void init_transport(grpc_chttp2_transport *t,
                           grpc_transport_setup_callback setup, void *arg,
                           const grpc_channel_args *channel_args,
                           grpc_endpoint *ep, gpr_slice *slices, size_t nslices,
                           grpc_mdctx *mdctx, int is_client) {
  size_t i;
  int j;
  grpc_transport_setup_result sr;

  GPR_ASSERT(strlen(GRPC_CHTTP2_CLIENT_CONNECT_STRING) ==
             GRPC_CHTTP2_CLIENT_CONNECT_STRLEN);

  memset(t, 0, sizeof(*t));

  t->base.vtable = &vtable;
  t->ep = ep;
  /* one ref is for destroy, the other for when ep becomes NULL */
  gpr_ref_init(&t->refs, 2);
  gpr_mu_init(&t->mu);
  grpc_mdctx_ref(mdctx);
  t->metadata_context = mdctx;
  t->endpoint_reading = 1;
  t->global.error_state = GRPC_CHTTP2_ERROR_STATE_NONE;
  t->global.next_stream_id = is_client ? 1 : 2;
  t->global.is_client = is_client;
  t->global.outgoing_window = DEFAULT_WINDOW;
  t->global.incoming_window = DEFAULT_WINDOW;
  t->global.connection_window_target = DEFAULT_CONNECTION_WINDOW_TARGET;
  t->global.ping_counter = 1;
  t->global.pings.next = t->global.pings.prev = &t->global.pings;
  t->parsing.is_client = is_client;
  t->parsing.str_grpc_timeout =
      grpc_mdstr_from_string(t->metadata_context, "grpc-timeout");
  t->parsing.deframe_state =
      is_client ? GRPC_DTS_FH_0 : GRPC_DTS_CLIENT_PREFIX_0;
  t->writing.is_client = is_client;

  gpr_slice_buffer_init(&t->global.qbuf);

  gpr_slice_buffer_init(&t->writing.outbuf);
  grpc_chttp2_hpack_compressor_init(&t->writing.hpack_compressor, mdctx);
  grpc_iomgr_closure_init(&t->writing_action, writing_action, t);
  grpc_iomgr_closure_init(&t->reading_action, reading_action, t);

  gpr_slice_buffer_init(&t->parsing.qbuf);
  grpc_chttp2_goaway_parser_init(&t->parsing.goaway_parser);
  grpc_chttp2_hpack_parser_init(&t->parsing.hpack_parser, t->metadata_context);

  grpc_iomgr_closure_init(&t->channel_callback.notify_closed, notify_closed, t);
  if (is_client) {
    gpr_slice_buffer_add(
        &t->global.qbuf,
        gpr_slice_from_copied_string(GRPC_CHTTP2_CLIENT_CONNECT_STRING));
  }
  /* 8 is a random stab in the dark as to a good initial size: it's small enough
     that it shouldn't waste memory for infrequently used connections, yet
     large enough that the exponential growth should happen nicely when it's
     needed.
     TODO(ctiller): tune this */
  grpc_chttp2_stream_map_init(&t->parsing_stream_map, 8);
  grpc_chttp2_stream_map_init(&t->new_stream_map, 8);

  /* copy in initial settings to all setting sets */
  for (i = 0; i < GRPC_CHTTP2_NUM_SETTINGS; i++) {
    t->parsing.settings[i] = grpc_chttp2_settings_parameters[i].default_value;
    for (j = 0; j < GRPC_NUM_SETTING_SETS; j++) {
      t->global.settings[j][i] =
          grpc_chttp2_settings_parameters[i].default_value;
    }
  }
  t->global.dirtied_local_settings = 1;
  /* Hack: it's common for implementations to assume 65536 bytes initial send
     window -- this should by rights be 0 */
  t->global.force_send_settings = 1 << GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE;
  t->global.sent_local_settings = 0;

  /* configure http2 the way we like it */
  if (is_client) {
    push_setting(t, GRPC_CHTTP2_SETTINGS_ENABLE_PUSH, 0);
    push_setting(t, GRPC_CHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS, 0);
  }
  push_setting(t, GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE, DEFAULT_WINDOW);

  if (channel_args) {
    for (i = 0; i < channel_args->num_args; i++) {
      if (0 ==
          strcmp(channel_args->args[i].key, GRPC_ARG_MAX_CONCURRENT_STREAMS)) {
        if (is_client) {
          gpr_log(GPR_ERROR, "%s: is ignored on the client",
                  GRPC_ARG_MAX_CONCURRENT_STREAMS);
        } else if (channel_args->args[i].type != GRPC_ARG_INTEGER) {
          gpr_log(GPR_ERROR, "%s: must be an integer",
                  GRPC_ARG_MAX_CONCURRENT_STREAMS);
        } else {
          push_setting(t, GRPC_CHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS,
                       channel_args->args[i].value.integer);
        }
      } else if (0 == strcmp(channel_args->args[i].key,
                             GRPC_ARG_HTTP2_INITIAL_SEQUENCE_NUMBER)) {
        if (channel_args->args[i].type != GRPC_ARG_INTEGER) {
          gpr_log(GPR_ERROR, "%s: must be an integer",
                  GRPC_ARG_HTTP2_INITIAL_SEQUENCE_NUMBER);
        } else if ((t->global.next_stream_id & 1) !=
                   (channel_args->args[i].value.integer & 1)) {
          gpr_log(GPR_ERROR, "%s: low bit must be %d on %s",
                  GRPC_ARG_HTTP2_INITIAL_SEQUENCE_NUMBER,
                  t->global.next_stream_id & 1,
                  is_client ? "client" : "server");
        } else {
          t->global.next_stream_id = channel_args->args[i].value.integer;
        }
      }
    }
  }

  gpr_mu_lock(&t->mu);
  t->channel_callback.executing = 1;
  REF_TRANSPORT(t, "init"); /* matches unref at end of this function */
  gpr_mu_unlock(&t->mu);

  sr = setup(arg, &t->base, t->metadata_context);

  lock(t);
  t->channel_callback.cb = sr.callbacks;
  t->channel_callback.cb_user_data = sr.user_data;
  t->channel_callback.executing = 0;
  unlock(t);

  REF_TRANSPORT(t, "recv_data"); /* matches unref inside recv_data */
  recv_data(t, slices, nslices, GRPC_ENDPOINT_CB_OK);

  UNREF_TRANSPORT(t, "init");
}
Exemple #29
0
/* Constructor for call_data */
static grpc_error *cc_init_call_elem(grpc_exec_ctx *exec_ctx,
                                     grpc_call_element *elem,
                                     grpc_call_element_args *args) {
  channel_data *chand = elem->channel_data;
  call_data *calld = elem->call_data;
  // Initialize data members.
  grpc_deadline_state_init(exec_ctx, elem, args->call_stack);
  calld->path = GRPC_MDSTR_REF(args->path);
  calld->call_start_time = args->start_time;
  calld->deadline = gpr_convert_clock_type(args->deadline, GPR_CLOCK_MONOTONIC);
  calld->wait_for_ready_from_service_config = WAIT_FOR_READY_UNSET;
  calld->cancel_error = GRPC_ERROR_NONE;
  gpr_atm_rel_store(&calld->subchannel_call, 0);
  gpr_mu_init(&calld->mu);
  calld->connected_subchannel = NULL;
  calld->waiting_ops = NULL;
  calld->waiting_ops_count = 0;
  calld->waiting_ops_capacity = 0;
  calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING;
  calld->owning_call = args->call_stack;
  calld->pollent = NULL;
  // If the resolver has already returned results, then we can access
  // the service config parameters immediately.  Otherwise, we need to
  // defer that work until the resolver returns an initial result.
  // TODO(roth): This code is almost but not quite identical to the code
  // in read_service_config() above.  It would be nice to find a way to
  // combine them, to avoid having to maintain it twice.
  gpr_mu_lock(&chand->mu);
  if (chand->lb_policy != NULL) {
    // We already have a resolver result, so check for service config.
    if (chand->method_params_table != NULL) {
      grpc_mdstr_hash_table *method_params_table =
          grpc_mdstr_hash_table_ref(chand->method_params_table);
      gpr_mu_unlock(&chand->mu);
      method_parameters *method_params =
          grpc_method_config_table_get(method_params_table, args->path);
      if (method_params != NULL) {
        if (gpr_time_cmp(method_params->timeout,
                         gpr_time_0(GPR_CLOCK_MONOTONIC)) != 0) {
          gpr_timespec per_method_deadline =
              gpr_time_add(calld->call_start_time, method_params->timeout);
          calld->deadline = gpr_time_min(calld->deadline, per_method_deadline);
        }
        if (method_params->wait_for_ready != WAIT_FOR_READY_UNSET) {
          calld->wait_for_ready_from_service_config =
              method_params->wait_for_ready;
        }
      }
      grpc_mdstr_hash_table_unref(method_params_table);
    } else {
      gpr_mu_unlock(&chand->mu);
    }
  } else {
    // We don't yet have a resolver result, so register a callback to
    // get the service config data once the resolver returns.
    // Take a reference to the call stack to be owned by the callback.
    GRPC_CALL_STACK_REF(calld->owning_call, "read_service_config");
    grpc_closure_init(&calld->read_service_config, read_service_config, elem);
    grpc_closure_list_append(&chand->waiting_for_config_closures,
                             &calld->read_service_config, GRPC_ERROR_NONE);
    gpr_mu_unlock(&chand->mu);
  }
  // Start the deadline timer with the current deadline value.  If we
  // do not yet have service config data, then the timer may be reset
  // later.
  grpc_deadline_state_start(exec_ctx, elem, calld->deadline);
  return GRPC_ERROR_NONE;
}
Exemple #30
0
// The logic here is fairly complicated, due to (a) the fact that we
// need to handle the case where we receive the send op before the
// initial metadata op, and (b) the need for efficiency, especially in
// the streaming case.
// TODO(ctiller): Explain this more thoroughly.
static void cc_start_transport_stream_op(grpc_exec_ctx *exec_ctx,
                                         grpc_call_element *elem,
                                         grpc_transport_stream_op *op) {
  call_data *calld = elem->call_data;
  channel_data *chand = elem->channel_data;
  GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
  grpc_deadline_state_client_start_transport_stream_op(exec_ctx, elem, op);
  /* try to (atomically) get the call */
  grpc_subchannel_call *call = GET_CALL(calld);
  GPR_TIMER_BEGIN("cc_start_transport_stream_op", 0);
  if (call == CANCELLED_CALL) {
    grpc_transport_stream_op_finish_with_failure(
        exec_ctx, op, GRPC_ERROR_REF(calld->cancel_error));
    GPR_TIMER_END("cc_start_transport_stream_op", 0);
    return;
  }
  if (call != NULL) {
    grpc_subchannel_call_process_op(exec_ctx, call, op);
    GPR_TIMER_END("cc_start_transport_stream_op", 0);
    return;
  }
  /* we failed; lock and figure out what to do */
  gpr_mu_lock(&calld->mu);
retry:
  /* need to recheck that another thread hasn't set the call */
  call = GET_CALL(calld);
  if (call == CANCELLED_CALL) {
    gpr_mu_unlock(&calld->mu);
    grpc_transport_stream_op_finish_with_failure(
        exec_ctx, op, GRPC_ERROR_REF(calld->cancel_error));
    GPR_TIMER_END("cc_start_transport_stream_op", 0);
    return;
  }
  if (call != NULL) {
    gpr_mu_unlock(&calld->mu);
    grpc_subchannel_call_process_op(exec_ctx, call, op);
    GPR_TIMER_END("cc_start_transport_stream_op", 0);
    return;
  }
  /* if this is a cancellation, then we can raise our cancelled flag */
  if (op->cancel_error != GRPC_ERROR_NONE) {
    if (!gpr_atm_rel_cas(&calld->subchannel_call, 0,
                         (gpr_atm)(uintptr_t)CANCELLED_CALL)) {
      goto retry;
    } else {
      // Stash a copy of cancel_error in our call data, so that we can use
      // it for subsequent operations.  This ensures that if the call is
      // cancelled before any ops are passed down (e.g., if the deadline
      // is in the past when the call starts), we can return the right
      // error to the caller when the first op does get passed down.
      calld->cancel_error = GRPC_ERROR_REF(op->cancel_error);
      switch (calld->creation_phase) {
        case GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING:
          fail_locked(exec_ctx, calld, GRPC_ERROR_REF(op->cancel_error));
          break;
        case GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL:
          pick_subchannel(exec_ctx, elem, NULL, 0, &calld->connected_subchannel,
                          NULL, GRPC_ERROR_REF(op->cancel_error));
          break;
      }
      gpr_mu_unlock(&calld->mu);
      grpc_transport_stream_op_finish_with_failure(
          exec_ctx, op, GRPC_ERROR_REF(op->cancel_error));
      GPR_TIMER_END("cc_start_transport_stream_op", 0);
      return;
    }
  }
  /* if we don't have a subchannel, try to get one */
  if (calld->creation_phase == GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING &&
      calld->connected_subchannel == NULL &&
      op->send_initial_metadata != NULL) {
    calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL;
    grpc_closure_init(&calld->next_step, subchannel_ready, elem);
    GRPC_CALL_STACK_REF(calld->owning_call, "pick_subchannel");
    /* If a subchannel is not available immediately, the polling entity from
       call_data should be provided to channel_data's interested_parties, so
       that IO of the lb_policy and resolver could be done under it. */
    if (pick_subchannel(exec_ctx, elem, op->send_initial_metadata,
                        op->send_initial_metadata_flags,
                        &calld->connected_subchannel, &calld->next_step,
                        GRPC_ERROR_NONE)) {
      calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING;
      GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "pick_subchannel");
    } else {
      grpc_polling_entity_add_to_pollset_set(exec_ctx, calld->pollent,
                                             chand->interested_parties);
    }
  }
  /* if we've got a subchannel, then let's ask it to create a call */
  if (calld->creation_phase == GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING &&
      calld->connected_subchannel != NULL) {
    grpc_subchannel_call *subchannel_call = NULL;
    grpc_error *error = grpc_connected_subchannel_create_call(
        exec_ctx, calld->connected_subchannel, calld->pollent, calld->path,
        calld->deadline, &subchannel_call);
    if (error != GRPC_ERROR_NONE) {
      subchannel_call = CANCELLED_CALL;
      fail_locked(exec_ctx, calld, GRPC_ERROR_REF(error));
      grpc_transport_stream_op_finish_with_failure(exec_ctx, op, error);
    }
    gpr_atm_rel_store(&calld->subchannel_call,
                      (gpr_atm)(uintptr_t)subchannel_call);
    retry_waiting_locked(exec_ctx, calld);
    goto retry;
  }
  /* nothing to be done but wait */
  add_waiting_locked(calld, op);
  gpr_mu_unlock(&calld->mu);
  GPR_TIMER_END("cc_start_transport_stream_op", 0);
}