示例#1
0
static void unlock_check_channel_callbacks(grpc_chttp2_transport *t) {
  if (t->channel_callback.executing) {
    return;
  }
  if (t->global.goaway_state != GRPC_CHTTP2_ERROR_STATE_NONE) {
    if (t->global.goaway_state == GRPC_CHTTP2_ERROR_STATE_SEEN &&
        t->global.error_state != GRPC_CHTTP2_ERROR_STATE_NOTIFIED) {
      notify_goaways_args *a = gpr_malloc(sizeof(*a));
      a->t = t;
      a->error = t->global.goaway_error;
      a->text = t->global.goaway_text;
      t->global.goaway_state = GRPC_CHTTP2_ERROR_STATE_NOTIFIED;
      t->channel_callback.executing = 1;
      grpc_iomgr_closure_init(&a->closure, notify_goaways, a);
      REF_TRANSPORT(t, "notify_goaways");
      grpc_chttp2_schedule_closure(&t->global, &a->closure, 1);
      return;
    } else if (t->global.goaway_state != GRPC_CHTTP2_ERROR_STATE_NOTIFIED) {
      return;
    }
  }
  if (t->global.error_state == GRPC_CHTTP2_ERROR_STATE_SEEN) {
    t->global.error_state = GRPC_CHTTP2_ERROR_STATE_NOTIFIED;
    t->channel_callback.executing = 1;
    REF_TRANSPORT(t, "notify_closed");
    grpc_chttp2_schedule_closure(&t->global, &t->channel_callback.notify_closed,
                                 1);
  }
}
示例#2
0
/* Constructor for call_data */
static void init_call_elem(grpc_call_element *elem,
                           const void *server_transport_data,
                           grpc_transport_stream_op *initial_op) {
  /* grab pointers to our data from the call element */
  call_data *calld = elem->call_data;
  channel_data *chand = elem->channel_data;
  grpc_server_security_context *server_ctx = NULL;

  /* initialize members */
  memset(calld, 0, sizeof(*calld));
  grpc_iomgr_closure_init(&calld->auth_on_recv, auth_on_recv, elem);

  GPR_ASSERT(initial_op && initial_op->context != NULL &&
             initial_op->context[GRPC_CONTEXT_SECURITY].value == NULL);

  /* Create a security context for the call and reference the auth context from
     the channel. */
  if (initial_op->context[GRPC_CONTEXT_SECURITY].value != NULL) {
    initial_op->context[GRPC_CONTEXT_SECURITY].destroy(
        initial_op->context[GRPC_CONTEXT_SECURITY].value);
  }
  server_ctx = grpc_server_security_context_create();
  server_ctx->auth_context =
      grpc_auth_context_create(chand->security_connector->auth_context);
  server_ctx->auth_context->pollset = initial_op->bind_pollset;
  initial_op->context[GRPC_CONTEXT_SECURITY].value = server_ctx;
  initial_op->context[GRPC_CONTEXT_SECURITY].destroy =
      grpc_server_security_context_destroy;
  calld->auth_context = server_ctx->auth_context;

  /* Set the metadata callbacks. */
  set_recv_ops_md_callbacks(elem, initial_op);
}
示例#3
0
/* Write to a socket using the grpc_tcp API, then drain it directly.
   Note that if the write does not complete immediately we need to drain the
   socket in parallel with the read. */
static void write_test(ssize_t num_bytes, ssize_t slice_size) {
  int sv[2];
  grpc_endpoint *ep;
  struct write_socket_state state;
  ssize_t read_bytes;
  size_t num_blocks;
  gpr_slice *slices;
  int current_data = 0;
  gpr_slice_buffer outgoing;
  grpc_iomgr_closure write_done_closure;
  gpr_timespec deadline = GRPC_TIMEOUT_SECONDS_TO_DEADLINE(20);

  gpr_log(GPR_INFO, "Start write test with %d bytes, slice size %d", num_bytes,
          slice_size);

  create_sockets(sv);

  ep = grpc_tcp_create(grpc_fd_create(sv[1], "write_test"),
                       GRPC_TCP_DEFAULT_READ_SLICE_SIZE, "test");
  grpc_endpoint_add_to_pollset(ep, &g_pollset);

  state.ep = ep;
  state.write_done = 0;

  slices = allocate_blocks(num_bytes, slice_size, &num_blocks, &current_data);

  gpr_slice_buffer_init(&outgoing);
  gpr_slice_buffer_addn(&outgoing, slices, num_blocks);
  grpc_iomgr_closure_init(&write_done_closure, write_done, &state);

  switch (grpc_endpoint_write(ep, &outgoing, &write_done_closure)) {
    case GRPC_ENDPOINT_DONE:
      /* Write completed immediately */
      read_bytes = drain_socket(sv[0]);
      GPR_ASSERT(read_bytes == num_bytes);
      break;
    case GRPC_ENDPOINT_PENDING:
      drain_socket_blocking(sv[0], num_bytes, num_bytes);
      gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
      for (;;) {
        grpc_pollset_worker worker;
        if (state.write_done) {
          break;
        }
        grpc_pollset_work(&g_pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC),
                          deadline);
      }
      gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
      break;
    case GRPC_ENDPOINT_ERROR:
      gpr_log(GPR_ERROR, "endpoint got error");
      abort();
  }

  gpr_slice_buffer_destroy(&outgoing);
  grpc_endpoint_destroy(ep);
  gpr_free(slices);
}
示例#4
0
/* Constructor for call_data */
static void init_call_elem(grpc_call_element *elem,
                           const void *server_transport_data,
                           grpc_transport_stream_op *initial_op) {
  call_data *calld = elem->call_data;
  calld->sent_initial_metadata = 0;
  calld->got_initial_metadata = 0;
  calld->on_done_recv = NULL;
  grpc_iomgr_closure_init(&calld->hc_on_recv, hc_on_recv, elem);
  if (initial_op) hc_mutate_op(elem, initial_op);
}
示例#5
0
文件: fd_posix.c 项目: Infixz/grpc
void grpc_fd_orphan(grpc_fd *fd, grpc_iomgr_cb_func on_done, void *user_data) {
  grpc_iomgr_closure_init(&fd->on_done_closure, on_done ? on_done : do_nothing,
                          user_data);
  shutdown(fd->fd, SHUT_RDWR);
  ref_by(fd, 1); /* remove active status, but keep referenced */
  gpr_mu_lock(&fd->watcher_mu);
  wake_all_watchers_locked(fd);
  gpr_mu_unlock(&fd->watcher_mu);
  unref_by(fd, 2); /* drop the reference */
}
示例#6
0
static void server_init_call_elem(grpc_call_element* elem,
                                  const void* server_transport_data,
                                  grpc_transport_stream_op* initial_op) {
  call_data* d = elem->call_data;
  GPR_ASSERT(d != NULL);
  d->start_ts = gpr_now(GPR_CLOCK_REALTIME);
  /* TODO(hongyu): call census_tracing_start_op here. */
  grpc_iomgr_closure_init(d->on_done_recv, server_on_done_recv, elem);
  if (initial_op) server_mutate_op(elem, initial_op);
}
/* Constructor for call_data */
static void init_call_elem(grpc_call_element *elem,
                           const void *server_transport_data,
                           grpc_transport_stream_op *initial_op) {
  /* grab pointers to our data from the call element */
  call_data *calld = elem->call_data;
  /* initialize members */
  memset(calld, 0, sizeof(*calld));
  grpc_iomgr_closure_init(&calld->hs_on_recv, hs_on_recv, elem);
  if (initial_op) hs_mutate_op(elem, initial_op);
}
示例#8
0
/* Schedule a shutdown of the socket operations. Will call the pending
   operations to abort them. We need to do that this way because of the
   various callsites of that function, which happens to be in various
   mutex hold states, and that'd be unsafe to call them directly. */
int grpc_winsocket_shutdown(grpc_winsocket *winsocket) {
  int callbacks_set = 0;
  SOCKET socket;
  gpr_mu_lock(&winsocket->state_mu);
  socket = winsocket->socket;
  if (winsocket->read_info.cb) {
    callbacks_set++;
    grpc_iomgr_closure_init(&winsocket->shutdown_closure,
                            winsocket->read_info.cb,
                            winsocket->read_info.opaque);
    grpc_iomgr_add_delayed_callback(&winsocket->shutdown_closure, 0);
  }
  if (winsocket->write_info.cb) {
    callbacks_set++;
    grpc_iomgr_closure_init(&winsocket->shutdown_closure,
                            winsocket->write_info.cb,
                            winsocket->write_info.opaque);
    grpc_iomgr_add_delayed_callback(&winsocket->shutdown_closure, 0);
  }
  gpr_mu_unlock(&winsocket->state_mu);
  closesocket(socket);
  return callbacks_set;
}
示例#9
0
文件: credentials.c 项目: Infixz/grpc
static void fake_oauth2_get_request_metadata(grpc_credentials *creds,
                                             const char *service_url,
                                             grpc_credentials_metadata_cb cb,
                                             void *user_data) {
  grpc_fake_oauth2_credentials *c = (grpc_fake_oauth2_credentials *)creds;

  if (c->is_async) {
    grpc_credentials_metadata_request *cb_arg =
        grpc_credentials_metadata_request_create(creds, cb, user_data);
    grpc_iomgr_closure_init(cb_arg->on_simulated_token_fetch_done_closure,
                            on_simulated_token_fetch_done, cb_arg);
    grpc_iomgr_add_callback(cb_arg->on_simulated_token_fetch_done_closure);
  } else {
    cb(user_data, c->access_token_md->entries, 1, GRPC_CREDENTIALS_OK);
  }
}
static void multipoll_with_epoll_pollset_add_fd(grpc_pollset *pollset,
                                                grpc_fd *fd,
                                                int and_unlock_pollset) {
  if (and_unlock_pollset) {
    gpr_mu_unlock(&pollset->mu);
    finally_add_fd(pollset, fd);
  } else {
    delayed_add *da = gpr_malloc(sizeof(*da));
    da->pollset = pollset;
    da->fd = fd;
    GRPC_FD_REF(fd, "delayed_add");
    grpc_iomgr_closure_init(&da->closure, perform_delayed_add, da);
    pollset->in_flight_cbs++;
    grpc_iomgr_add_callback(&da->closure);
  }
}
示例#11
0
文件: pick_first.c 项目: JoeWoo/grpc
static grpc_lb_policy *create_pick_first(grpc_lb_policy_factory *factory,
                                         grpc_lb_policy_args *args) {
  pick_first_lb_policy *p = gpr_malloc(sizeof(*p));
  GPR_ASSERT(args->num_subchannels > 0);
  memset(p, 0, sizeof(*p));
  grpc_lb_policy_init(&p->base, &pick_first_lb_policy_vtable);
  p->subchannels = gpr_malloc(sizeof(grpc_subchannel *) * args->num_subchannels);
  p->num_subchannels = args->num_subchannels;
  grpc_connectivity_state_init(&p->state_tracker, GRPC_CHANNEL_IDLE,
                               "pick_first");
  memcpy(p->subchannels, args->subchannels,
         sizeof(grpc_subchannel *) * args->num_subchannels);
  grpc_iomgr_closure_init(&p->connectivity_changed, pf_connectivity_changed, p);
  gpr_mu_init(&p->mu);
  return &p->base;
}
示例#12
0
/* Write to a socket until it fills up, then read from it using the grpc_tcp
   API. */
static void large_read_test(ssize_t slice_size) {
  int sv[2];
  grpc_endpoint *ep;
  struct read_socket_state state;
  ssize_t written_bytes;
  gpr_timespec deadline = GRPC_TIMEOUT_SECONDS_TO_DEADLINE(20);

  gpr_log(GPR_INFO, "Start large read test, slice size %d", slice_size);

  create_sockets(sv);

  ep = grpc_tcp_create(grpc_fd_create(sv[1], "large_read_test"), slice_size,
                       "test");
  grpc_endpoint_add_to_pollset(ep, &g_pollset);

  written_bytes = fill_socket(sv[0]);
  gpr_log(GPR_INFO, "Wrote %d bytes", written_bytes);

  state.ep = ep;
  state.read_bytes = 0;
  state.target_read_bytes = written_bytes;
  gpr_slice_buffer_init(&state.incoming);
  grpc_iomgr_closure_init(&state.read_cb, read_cb, &state);

  switch (grpc_endpoint_read(ep, &state.incoming, &state.read_cb)) {
    case GRPC_ENDPOINT_DONE:
      read_cb(&state, 1);
      break;
    case GRPC_ENDPOINT_ERROR:
      read_cb(&state, 0);
      break;
    case GRPC_ENDPOINT_PENDING:
      break;
  }

  gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
  while (state.read_bytes < state.target_read_bytes) {
    grpc_pollset_worker worker;
    grpc_pollset_work(&g_pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC),
                      deadline);
  }
  GPR_ASSERT(state.read_bytes == state.target_read_bytes);
  gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));

  gpr_slice_buffer_destroy(&state.incoming);
  grpc_endpoint_destroy(ep);
}
示例#13
0
static void init_transport(grpc_chttp2_transport *t,
                           grpc_transport_setup_callback setup, void *arg,
                           const grpc_channel_args *channel_args,
                           grpc_endpoint *ep, gpr_slice *slices, size_t nslices,
                           grpc_mdctx *mdctx, int is_client) {
  size_t i;
  int j;
  grpc_transport_setup_result sr;

  GPR_ASSERT(strlen(GRPC_CHTTP2_CLIENT_CONNECT_STRING) ==
             GRPC_CHTTP2_CLIENT_CONNECT_STRLEN);

  memset(t, 0, sizeof(*t));

  t->base.vtable = &vtable;
  t->ep = ep;
  /* one ref is for destroy, the other for when ep becomes NULL */
  gpr_ref_init(&t->refs, 2);
  gpr_mu_init(&t->mu);
  grpc_mdctx_ref(mdctx);
  t->metadata_context = mdctx;
  t->endpoint_reading = 1;
  t->global.error_state = GRPC_CHTTP2_ERROR_STATE_NONE;
  t->global.next_stream_id = is_client ? 1 : 2;
  t->global.is_client = is_client;
  t->global.outgoing_window = DEFAULT_WINDOW;
  t->global.incoming_window = DEFAULT_WINDOW;
  t->global.connection_window_target = DEFAULT_CONNECTION_WINDOW_TARGET;
  t->global.ping_counter = 1;
  t->global.pings.next = t->global.pings.prev = &t->global.pings;
  t->parsing.is_client = is_client;
  t->parsing.str_grpc_timeout =
      grpc_mdstr_from_string(t->metadata_context, "grpc-timeout");
  t->parsing.deframe_state =
      is_client ? GRPC_DTS_FH_0 : GRPC_DTS_CLIENT_PREFIX_0;
  t->writing.is_client = is_client;

  gpr_slice_buffer_init(&t->global.qbuf);

  gpr_slice_buffer_init(&t->writing.outbuf);
  grpc_chttp2_hpack_compressor_init(&t->writing.hpack_compressor, mdctx);
  grpc_iomgr_closure_init(&t->writing_action, writing_action, t);
  grpc_iomgr_closure_init(&t->reading_action, reading_action, t);

  gpr_slice_buffer_init(&t->parsing.qbuf);
  grpc_chttp2_goaway_parser_init(&t->parsing.goaway_parser);
  grpc_chttp2_hpack_parser_init(&t->parsing.hpack_parser, t->metadata_context);

  grpc_iomgr_closure_init(&t->channel_callback.notify_closed, notify_closed, t);
  if (is_client) {
    gpr_slice_buffer_add(
        &t->global.qbuf,
        gpr_slice_from_copied_string(GRPC_CHTTP2_CLIENT_CONNECT_STRING));
  }
  /* 8 is a random stab in the dark as to a good initial size: it's small enough
     that it shouldn't waste memory for infrequently used connections, yet
     large enough that the exponential growth should happen nicely when it's
     needed.
     TODO(ctiller): tune this */
  grpc_chttp2_stream_map_init(&t->parsing_stream_map, 8);
  grpc_chttp2_stream_map_init(&t->new_stream_map, 8);

  /* copy in initial settings to all setting sets */
  for (i = 0; i < GRPC_CHTTP2_NUM_SETTINGS; i++) {
    t->parsing.settings[i] = grpc_chttp2_settings_parameters[i].default_value;
    for (j = 0; j < GRPC_NUM_SETTING_SETS; j++) {
      t->global.settings[j][i] =
          grpc_chttp2_settings_parameters[i].default_value;
    }
  }
  t->global.dirtied_local_settings = 1;
  /* Hack: it's common for implementations to assume 65536 bytes initial send
     window -- this should by rights be 0 */
  t->global.force_send_settings = 1 << GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE;
  t->global.sent_local_settings = 0;

  /* configure http2 the way we like it */
  if (is_client) {
    push_setting(t, GRPC_CHTTP2_SETTINGS_ENABLE_PUSH, 0);
    push_setting(t, GRPC_CHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS, 0);
  }
  push_setting(t, GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE, DEFAULT_WINDOW);

  if (channel_args) {
    for (i = 0; i < channel_args->num_args; i++) {
      if (0 ==
          strcmp(channel_args->args[i].key, GRPC_ARG_MAX_CONCURRENT_STREAMS)) {
        if (is_client) {
          gpr_log(GPR_ERROR, "%s: is ignored on the client",
                  GRPC_ARG_MAX_CONCURRENT_STREAMS);
        } else if (channel_args->args[i].type != GRPC_ARG_INTEGER) {
          gpr_log(GPR_ERROR, "%s: must be an integer",
                  GRPC_ARG_MAX_CONCURRENT_STREAMS);
        } else {
          push_setting(t, GRPC_CHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS,
                       channel_args->args[i].value.integer);
        }
      } else if (0 == strcmp(channel_args->args[i].key,
                             GRPC_ARG_HTTP2_INITIAL_SEQUENCE_NUMBER)) {
        if (channel_args->args[i].type != GRPC_ARG_INTEGER) {
          gpr_log(GPR_ERROR, "%s: must be an integer",
                  GRPC_ARG_HTTP2_INITIAL_SEQUENCE_NUMBER);
        } else if ((t->global.next_stream_id & 1) !=
                   (channel_args->args[i].value.integer & 1)) {
          gpr_log(GPR_ERROR, "%s: low bit must be %d on %s",
                  GRPC_ARG_HTTP2_INITIAL_SEQUENCE_NUMBER,
                  t->global.next_stream_id & 1,
                  is_client ? "client" : "server");
        } else {
          t->global.next_stream_id = channel_args->args[i].value.integer;
        }
      }
    }
  }

  gpr_mu_lock(&t->mu);
  t->channel_callback.executing = 1;
  REF_TRANSPORT(t, "init"); /* matches unref at end of this function */
  gpr_mu_unlock(&t->mu);

  sr = setup(arg, &t->base, t->metadata_context);

  lock(t);
  t->channel_callback.cb = sr.callbacks;
  t->channel_callback.cb_user_data = sr.user_data;
  t->channel_callback.executing = 0;
  unlock(t);

  REF_TRANSPORT(t, "recv_data"); /* matches unref inside recv_data */
  recv_data(t, slices, nslices, GRPC_ENDPOINT_CB_OK);

  UNREF_TRANSPORT(t, "init");
}
示例#14
0
/* Do both reading and writing using the grpc_endpoint API.

   This also includes a test of the shutdown behavior.
 */
static void read_and_write_test(grpc_endpoint_test_config config,
                                size_t num_bytes, size_t write_size,
                                size_t slice_size, int shutdown) {
  struct read_and_write_test_state state;
  gpr_timespec deadline = GRPC_TIMEOUT_SECONDS_TO_DEADLINE(20);
  grpc_endpoint_test_fixture f =
      begin_test(config, "read_and_write_test", slice_size);
  gpr_log(GPR_DEBUG, "num_bytes=%d write_size=%d slice_size=%d shutdown=%d",
          num_bytes, write_size, slice_size, shutdown);

  if (shutdown) {
    gpr_log(GPR_INFO, "Start read and write shutdown test");
  } else {
    gpr_log(GPR_INFO, "Start read and write test with %d bytes, slice size %d",
            num_bytes, slice_size);
  }

  state.read_ep = f.client_ep;
  state.write_ep = f.server_ep;
  state.target_bytes = num_bytes;
  state.bytes_read = 0;
  state.current_write_size = write_size;
  state.bytes_written = 0;
  state.read_done = 0;
  state.write_done = 0;
  state.current_read_data = 0;
  state.current_write_data = 0;
  grpc_iomgr_closure_init(&state.done_read, read_and_write_test_read_handler,
                          &state);
  grpc_iomgr_closure_init(&state.done_write, read_and_write_test_write_handler,
                          &state);
  gpr_slice_buffer_init(&state.outgoing);
  gpr_slice_buffer_init(&state.incoming);

  /* Get started by pretending an initial write completed */
  /* NOTE: Sets up initial conditions so we can have the same write handler
     for the first iteration as for later iterations. It does the right thing
     even when bytes_written is unsigned. */
  state.bytes_written -= state.current_write_size;
  read_and_write_test_write_handler(&state, 1);

  switch (
      grpc_endpoint_read(state.read_ep, &state.incoming, &state.done_read)) {
    case GRPC_ENDPOINT_PENDING:
      break;
    case GRPC_ENDPOINT_ERROR:
      read_and_write_test_read_handler(&state, 0);
      break;
    case GRPC_ENDPOINT_DONE:
      read_and_write_test_read_handler(&state, 1);
      break;
  }

  if (shutdown) {
    gpr_log(GPR_DEBUG, "shutdown read");
    grpc_endpoint_shutdown(state.read_ep);
    gpr_log(GPR_DEBUG, "shutdown write");
    grpc_endpoint_shutdown(state.write_ep);
  }

  gpr_mu_lock(GRPC_POLLSET_MU(g_pollset));
  while (!state.read_done || !state.write_done) {
    grpc_pollset_worker worker;
    GPR_ASSERT(gpr_time_cmp(gpr_now(GPR_CLOCK_MONOTONIC), deadline) < 0);
    grpc_pollset_work(g_pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC),
                      deadline);
  }
  gpr_mu_unlock(GRPC_POLLSET_MU(g_pollset));

  end_test(config);
  gpr_slice_buffer_destroy(&state.outgoing);
  gpr_slice_buffer_destroy(&state.incoming);
  grpc_endpoint_destroy(state.read_ep);
  grpc_endpoint_destroy(state.write_ep);
}
示例#15
0
文件: server.c 项目: kdavison/grpc
static void server_on_recv(void *ptr, int success) {
  grpc_call_element *elem = ptr;
  call_data *calld = elem->call_data;
  channel_data *chand = elem->channel_data;
  int remove_res;

  if (success && !calld->got_initial_metadata) {
    size_t i;
    size_t nops = calld->recv_ops->nops;
    grpc_stream_op *ops = calld->recv_ops->ops;
    for (i = 0; i < nops; i++) {
      grpc_stream_op *op = &ops[i];
      if (op->type != GRPC_OP_METADATA) continue;
      grpc_metadata_batch_filter(&op->data.metadata, server_filter, elem);
      if (0 != gpr_time_cmp(op->data.metadata.deadline, gpr_inf_future)) {
        calld->deadline = op->data.metadata.deadline;
      }
      calld->got_initial_metadata = 1;
      start_new_rpc(elem);
      break;
    }
  }

  switch (*calld->recv_state) {
    case GRPC_STREAM_OPEN:
      break;
    case GRPC_STREAM_SEND_CLOSED:
      break;
    case GRPC_STREAM_RECV_CLOSED:
      gpr_mu_lock(&chand->server->mu_call);
      if (calld->state == NOT_STARTED) {
        calld->state = ZOMBIED;
        grpc_iomgr_closure_init(&calld->kill_zombie_closure, kill_zombie, elem);
        grpc_iomgr_add_callback(&calld->kill_zombie_closure);
      }
      gpr_mu_unlock(&chand->server->mu_call);
      break;
    case GRPC_STREAM_CLOSED:
      gpr_mu_lock(&chand->server->mu_call);
      if (calld->state == NOT_STARTED) {
        calld->state = ZOMBIED;
        grpc_iomgr_closure_init(&calld->kill_zombie_closure, kill_zombie, elem);
        grpc_iomgr_add_callback(&calld->kill_zombie_closure);
      } else if (calld->state == PENDING) {
        call_list_remove(calld, PENDING_START);
        calld->state = ZOMBIED;
        grpc_iomgr_closure_init(&calld->kill_zombie_closure, kill_zombie, elem);
        grpc_iomgr_add_callback(&calld->kill_zombie_closure);
      }
      remove_res = call_list_remove(calld, ALL_CALLS);
      gpr_mu_unlock(&chand->server->mu_call);
      gpr_mu_lock(&chand->server->mu_global);
      if (remove_res) {
        decrement_call_count(chand);
      }
      gpr_mu_unlock(&chand->server->mu_global);
      break;
  }

  calld->on_done_recv(calld->recv_user_data, success);
}
示例#16
0
文件: bad_client.c 项目: JoeWoo/grpc
void grpc_run_bad_client_test(grpc_bad_client_server_side_validator validator,
                              const char *client_payload,
                              size_t client_payload_length, gpr_uint32 flags) {
  grpc_endpoint_pair sfd;
  thd_args a;
  gpr_thd_id id;
  char *hex;
  grpc_transport *transport;
  grpc_mdctx *mdctx = grpc_mdctx_create();
  gpr_slice slice =
      gpr_slice_from_copied_buffer(client_payload, client_payload_length);
  gpr_slice_buffer outgoing;
  grpc_iomgr_closure done_write_closure;

  hex = gpr_dump(client_payload, client_payload_length,
                 GPR_DUMP_HEX | GPR_DUMP_ASCII);

  /* Add a debug log */
  gpr_log(GPR_INFO, "TEST: %s", hex);

  gpr_free(hex);

  /* Init grpc */
  grpc_init();

  /* Create endpoints */
  sfd = grpc_iomgr_create_endpoint_pair("fixture", 65536);

  /* Create server, completion events */
  a.server = grpc_server_create_from_filters(NULL, 0, NULL);
  a.cq = grpc_completion_queue_create(NULL);
  gpr_event_init(&a.done_thd);
  gpr_event_init(&a.done_write);
  a.validator = validator;
  grpc_server_register_completion_queue(a.server, a.cq, NULL);
  grpc_server_start(a.server);
  transport = grpc_create_chttp2_transport(NULL, sfd.server, mdctx, 0);
  server_setup_transport(&a, transport, mdctx);
  grpc_chttp2_transport_start_reading(transport, NULL, 0);

  /* Bind everything into the same pollset */
  grpc_endpoint_add_to_pollset(sfd.client, grpc_cq_pollset(a.cq));
  grpc_endpoint_add_to_pollset(sfd.server, grpc_cq_pollset(a.cq));

  /* Check a ground truth */
  GPR_ASSERT(grpc_server_has_open_connections(a.server));

  /* Start validator */
  gpr_thd_new(&id, thd_func, &a, NULL);

  gpr_slice_buffer_init(&outgoing);
  gpr_slice_buffer_add(&outgoing, slice);
  grpc_iomgr_closure_init(&done_write_closure, done_write, &a);

  /* Write data */
  switch (grpc_endpoint_write(sfd.client, &outgoing, &done_write_closure)) {
    case GRPC_ENDPOINT_DONE:
      done_write(&a, 1);
      break;
    case GRPC_ENDPOINT_PENDING:
      break;
    case GRPC_ENDPOINT_ERROR:
      done_write(&a, 0);
      break;
  }

  /* Await completion */
  GPR_ASSERT(
      gpr_event_wait(&a.done_write, GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5)));

  if (flags & GRPC_BAD_CLIENT_DISCONNECT) {
    grpc_endpoint_shutdown(sfd.client);
    grpc_endpoint_destroy(sfd.client);
    sfd.client = NULL;
  }

  GPR_ASSERT(gpr_event_wait(&a.done_thd, GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5)));

  /* Shutdown */
  if (sfd.client) {
    grpc_endpoint_shutdown(sfd.client);
    grpc_endpoint_destroy(sfd.client);
  }
  grpc_server_shutdown_and_notify(a.server, a.cq, NULL);
  GPR_ASSERT(grpc_completion_queue_pluck(
                 a.cq, NULL, GRPC_TIMEOUT_SECONDS_TO_DEADLINE(1), NULL)
                 .type == GRPC_OP_COMPLETE);
  grpc_server_destroy(a.server);
  grpc_completion_queue_destroy(a.cq);
  gpr_slice_buffer_destroy(&outgoing);

  grpc_shutdown();
}