Пример #1
0
grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(size_t read_slice_size) {
  int sv[2];
  grpc_endpoint_pair p;
  create_sockets(sv);
  p.client = grpc_tcp_create(grpc_fd_create(sv[1]), read_slice_size);
  p.server = grpc_tcp_create(grpc_fd_create(sv[0]), read_slice_size);
  return p;
}
Пример #2
0
static grpc_endpoint_test_fixture create_fixture_tcp_socketpair(
    size_t slice_size) {
  int sv[2];
  grpc_endpoint_test_fixture f;

  create_sockets(sv);
  f.client_ep = grpc_tcp_create(grpc_fd_create(sv[0], "fixture:client"),
                                slice_size, "test");
  f.server_ep = grpc_tcp_create(grpc_fd_create(sv[1], "fixture:server"),
                                slice_size, "test");
  grpc_endpoint_add_to_pollset(f.client_ep, &g_pollset);
  grpc_endpoint_add_to_pollset(f.server_ep, &g_pollset);

  return f;
}
Пример #3
0
static void on_connect(grpc_exec_ctx *exec_ctx, void *acp, int from_iocp) {
  async_connect *ac = acp;
  SOCKET sock = ac->socket->socket;
  grpc_endpoint **ep = ac->endpoint;
  grpc_winsocket_callback_info *info = &ac->socket->write_info;
  grpc_closure *on_done = ac->on_done;

  grpc_timer_cancel(exec_ctx, &ac->alarm);

  gpr_mu_lock(&ac->mu);

  if (from_iocp) {
    DWORD transfered_bytes = 0;
    DWORD flags;
    BOOL wsa_success = WSAGetOverlappedResult(sock, &info->overlapped,
                                              &transfered_bytes, FALSE, &flags);
    GPR_ASSERT(transfered_bytes == 0);
    if (!wsa_success) {
      char *utf8_message = gpr_format_message(WSAGetLastError());
      gpr_log(GPR_ERROR, "on_connect error: %s", utf8_message);
      gpr_free(utf8_message);
    } else {
      *ep = grpc_tcp_create(ac->socket, ac->addr_name);
      ac->socket = NULL;
    }
  }

  async_connect_unlock_and_cleanup(ac);
  /* If the connection was aborted, the callback was already called when
     the deadline was met. */
  on_done->cb(exec_ctx, on_done->cb_arg, *ep != NULL);
}
Пример #4
0
grpc_endpoint *grpc_tcp_client_create_from_fd(
    grpc_exec_ctx *exec_ctx, grpc_fd *fd, const grpc_channel_args *channel_args,
    const char *addr_str) {
  size_t tcp_read_chunk_size = GRPC_TCP_DEFAULT_READ_SLICE_SIZE;
  grpc_resource_quota *resource_quota = grpc_resource_quota_create(NULL);
  if (channel_args != NULL) {
    for (size_t i = 0; i < channel_args->num_args; i++) {
      if (0 ==
          strcmp(channel_args->args[i].key, GRPC_ARG_TCP_READ_CHUNK_SIZE)) {
        grpc_integer_options options = {(int)tcp_read_chunk_size, 1,
                                        8 * 1024 * 1024};
        tcp_read_chunk_size = (size_t)grpc_channel_arg_get_integer(
            &channel_args->args[i], options);
      } else if (0 ==
                 strcmp(channel_args->args[i].key, GRPC_ARG_RESOURCE_QUOTA)) {
        grpc_resource_quota_internal_unref(exec_ctx, resource_quota);
        resource_quota = grpc_resource_quota_internal_ref(
            channel_args->args[i].value.pointer.p);
      }
    }
  }

  grpc_endpoint *ep =
      grpc_tcp_create(fd, resource_quota, tcp_read_chunk_size, addr_str);
  grpc_resource_quota_internal_unref(exec_ctx, resource_quota);
  return ep;
}
Пример #5
0
/* event manager callback when reads are ready */
static void on_accept(void *arg, int success) {
  server_port *sp = arg;
  SOCKET sock = sp->new_socket;
  grpc_winsocket_callback_info *info = &sp->socket->read_info;
  grpc_endpoint *ep = NULL;

  if (success) {
    DWORD transfered_bytes = 0;
    DWORD flags;
    BOOL wsa_success = WSAGetOverlappedResult(sock, &info->overlapped,
                                              &transfered_bytes, FALSE,
                                              &flags);
    if (!wsa_success) {
      char *utf8_message = gpr_format_message(WSAGetLastError());
      gpr_log(GPR_ERROR, "on_accept error: %s", utf8_message);
      gpr_free(utf8_message);
      closesocket(sock);
    } else {
      gpr_log(GPR_DEBUG, "on_accept: accepted connection");
      ep = grpc_tcp_create(grpc_winsocket_create(sock));
    }
  } else {
    gpr_log(GPR_DEBUG, "on_accept: shutting down");
    closesocket(sock);
    gpr_mu_lock(&sp->server->mu);
    if (0 == --sp->server->active_ports) {
      gpr_cv_broadcast(&sp->server->cv);
    }
    gpr_mu_unlock(&sp->server->mu);
  }

  if (ep) sp->server->cb(sp->server->cb_arg, ep);
  start_accept(sp);
}
Пример #6
0
static void uv_tc_on_connect(uv_connect_t *req, int status) {
  grpc_uv_tcp_connect *connect = req->data;
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
  grpc_error *error = GRPC_ERROR_NONE;
  int done;
  grpc_closure *closure = connect->closure;
  grpc_timer_cancel(&exec_ctx, &connect->alarm);
  if (status == 0) {
    *connect->endpoint = grpc_tcp_create(
        connect->tcp_handle, connect->resource_quota, connect->addr_name);
  } else {
    error = GRPC_ERROR_CREATE("Failed to connect to remote host");
    error = grpc_error_set_int(error, GRPC_ERROR_INT_ERRNO, -status);
    error =
        grpc_error_set_str(error, GRPC_ERROR_STR_OS_ERROR, uv_strerror(status));
    if (status == UV_ECANCELED) {
      error = grpc_error_set_str(error, GRPC_ERROR_STR_OS_ERROR,
                                 "Timeout occurred");
      // This should only happen if the handle is already closed
    } else {
      error = grpc_error_set_str(error, GRPC_ERROR_STR_OS_ERROR,
                                 uv_strerror(status));
      uv_close((uv_handle_t *)connect->tcp_handle, tcp_close_callback);
    }
  }
  done = (--connect->refs == 0);
  if (done) {
    uv_tcp_connect_cleanup(&exec_ctx, connect);
  }
  grpc_closure_sched(&exec_ctx, closure, error);
  grpc_exec_ctx_finish(&exec_ctx);
}
Пример #7
0
grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(const char *name,
                                                   size_t read_slice_size) {
  int sv[2];
  grpc_endpoint_pair p;
  char *final_name;
  create_sockets(sv);

  gpr_asprintf(&final_name, "%s:client", name);
  p.client = grpc_tcp_create(grpc_fd_create(sv[1], final_name), read_slice_size,
                             "socketpair-server");
  gpr_free(final_name);
  gpr_asprintf(&final_name, "%s:server", name);
  p.server = grpc_tcp_create(grpc_fd_create(sv[0], final_name), read_slice_size,
                             "socketpair-client");
  gpr_free(final_name);
  return p;
}
Пример #8
0
/* Write to a socket using the grpc_tcp API, then drain it directly.
   Note that if the write does not complete immediately we need to drain the
   socket in parallel with the read. */
static void write_test(size_t num_bytes, size_t slice_size) {
  int sv[2];
  grpc_endpoint *ep;
  struct write_socket_state state;
  size_t num_blocks;
  grpc_slice *slices;
  uint8_t current_data = 0;
  grpc_slice_buffer outgoing;
  grpc_closure write_done_closure;
  gpr_timespec deadline = grpc_timeout_seconds_to_deadline(20);
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;

  gpr_log(GPR_INFO,
          "Start write test with %" PRIuPTR " bytes, slice size %" PRIuPTR,
          num_bytes, slice_size);

  create_sockets(sv);

  grpc_resource_quota *resource_quota =
      grpc_resource_quota_create("write_test");
  ep = grpc_tcp_create(grpc_fd_create(sv[1], "write_test"), resource_quota,
                       GRPC_TCP_DEFAULT_READ_SLICE_SIZE, "test");
  grpc_resource_quota_unref_internal(&exec_ctx, resource_quota);
  grpc_endpoint_add_to_pollset(&exec_ctx, ep, g_pollset);

  state.ep = ep;
  state.write_done = 0;

  slices = allocate_blocks(num_bytes, slice_size, &num_blocks, &current_data);

  grpc_slice_buffer_init(&outgoing);
  grpc_slice_buffer_addn(&outgoing, slices, num_blocks);
  grpc_closure_init(&write_done_closure, write_done, &state,
                    grpc_schedule_on_exec_ctx);

  grpc_endpoint_write(&exec_ctx, ep, &outgoing, &write_done_closure);
  drain_socket_blocking(sv[0], num_bytes, num_bytes);
  gpr_mu_lock(g_mu);
  for (;;) {
    grpc_pollset_worker *worker = NULL;
    if (state.write_done) {
      break;
    }
    GPR_ASSERT(GRPC_LOG_IF_ERROR(
        "pollset_work",
        grpc_pollset_work(&exec_ctx, g_pollset, &worker,
                          gpr_now(GPR_CLOCK_MONOTONIC), deadline)));
    gpr_mu_unlock(g_mu);
    grpc_exec_ctx_finish(&exec_ctx);
    gpr_mu_lock(g_mu);
  }
  gpr_mu_unlock(g_mu);

  grpc_slice_buffer_destroy_internal(&exec_ctx, &outgoing);
  grpc_endpoint_destroy(&exec_ctx, ep);
  gpr_free(slices);
  grpc_exec_ctx_finish(&exec_ctx);
}
Пример #9
0
/* Write to a socket using the grpc_tcp API, then drain it directly.
   Note that if the write does not complete immediately we need to drain the
   socket in parallel with the read. */
static void write_test(ssize_t num_bytes, ssize_t slice_size) {
  int sv[2];
  grpc_endpoint *ep;
  struct write_socket_state state;
  ssize_t read_bytes;
  size_t num_blocks;
  gpr_slice *slices;
  int current_data = 0;
  gpr_slice_buffer outgoing;
  grpc_iomgr_closure write_done_closure;
  gpr_timespec deadline = GRPC_TIMEOUT_SECONDS_TO_DEADLINE(20);

  gpr_log(GPR_INFO, "Start write test with %d bytes, slice size %d", num_bytes,
          slice_size);

  create_sockets(sv);

  ep = grpc_tcp_create(grpc_fd_create(sv[1], "write_test"),
                       GRPC_TCP_DEFAULT_READ_SLICE_SIZE, "test");
  grpc_endpoint_add_to_pollset(ep, &g_pollset);

  state.ep = ep;
  state.write_done = 0;

  slices = allocate_blocks(num_bytes, slice_size, &num_blocks, &current_data);

  gpr_slice_buffer_init(&outgoing);
  gpr_slice_buffer_addn(&outgoing, slices, num_blocks);
  grpc_iomgr_closure_init(&write_done_closure, write_done, &state);

  switch (grpc_endpoint_write(ep, &outgoing, &write_done_closure)) {
    case GRPC_ENDPOINT_DONE:
      /* Write completed immediately */
      read_bytes = drain_socket(sv[0]);
      GPR_ASSERT(read_bytes == num_bytes);
      break;
    case GRPC_ENDPOINT_PENDING:
      drain_socket_blocking(sv[0], num_bytes, num_bytes);
      gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
      for (;;) {
        grpc_pollset_worker worker;
        if (state.write_done) {
          break;
        }
        grpc_pollset_work(&g_pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC),
                          deadline);
      }
      gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
      break;
    case GRPC_ENDPOINT_ERROR:
      gpr_log(GPR_ERROR, "endpoint got error");
      abort();
  }

  gpr_slice_buffer_destroy(&outgoing);
  grpc_endpoint_destroy(ep);
  gpr_free(slices);
}
Пример #10
0
static grpc_endpoint_test_fixture create_fixture_tcp_socketpair(
    size_t slice_size) {
  int sv[2];
  grpc_endpoint_test_fixture f;
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;

  create_sockets(sv);
  f.client_ep = grpc_tcp_create(grpc_fd_create(sv[0], "fixture:client"),
                                slice_size, "test");
  f.server_ep = grpc_tcp_create(grpc_fd_create(sv[1], "fixture:server"),
                                slice_size, "test");
  grpc_endpoint_add_to_pollset(&exec_ctx, f.client_ep, &g_pollset);
  grpc_endpoint_add_to_pollset(&exec_ctx, f.server_ep, &g_pollset);

  grpc_exec_ctx_finish(&exec_ctx);

  return f;
}
Пример #11
0
static grpc_endpoint_test_fixture create_fixture_tcp_socketpair(
    size_t slice_size) {
  int sv[2];
  grpc_endpoint_test_fixture f;
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;

  create_sockets(sv);
  grpc_resource_quota *resource_quota =
      grpc_resource_quota_create("tcp_posix_test_socketpair");
  f.client_ep = grpc_tcp_create(grpc_fd_create(sv[0], "fixture:client"),
                                resource_quota, slice_size, "test");
  f.server_ep = grpc_tcp_create(grpc_fd_create(sv[1], "fixture:server"),
                                resource_quota, slice_size, "test");
  grpc_resource_quota_unref_internal(&exec_ctx, resource_quota);
  grpc_endpoint_add_to_pollset(&exec_ctx, f.client_ep, g_pollset);
  grpc_endpoint_add_to_pollset(&exec_ctx, f.server_ep, g_pollset);

  grpc_exec_ctx_finish(&exec_ctx);

  return f;
}
Пример #12
0
/* Write to a socket using the grpc_tcp API, then drain it directly.
   Note that if the write does not complete immediately we need to drain the
   socket in parallel with the read. */
static void write_test(size_t num_bytes, size_t slice_size) {
  int sv[2];
  grpc_endpoint *ep;
  struct write_socket_state state;
  size_t num_blocks;
  gpr_slice *slices;
  gpr_uint8 current_data = 0;
  gpr_slice_buffer outgoing;
  grpc_closure write_done_closure;
  gpr_timespec deadline = GRPC_TIMEOUT_SECONDS_TO_DEADLINE(20);
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;

  gpr_log(GPR_INFO, "Start write test with %d bytes, slice size %d", num_bytes,
          slice_size);

  create_sockets(sv);

  ep = grpc_tcp_create(grpc_fd_create(sv[1], "write_test"),
                       GRPC_TCP_DEFAULT_READ_SLICE_SIZE, "test");
  grpc_endpoint_add_to_pollset(&exec_ctx, ep, &g_pollset);

  state.ep = ep;
  state.write_done = 0;

  slices = allocate_blocks(num_bytes, slice_size, &num_blocks, &current_data);

  gpr_slice_buffer_init(&outgoing);
  gpr_slice_buffer_addn(&outgoing, slices, num_blocks);
  grpc_closure_init(&write_done_closure, write_done, &state);

  grpc_endpoint_write(&exec_ctx, ep, &outgoing, &write_done_closure);
  drain_socket_blocking(sv[0], num_bytes, num_bytes);
  gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
  for (;;) {
    grpc_pollset_worker worker;
    if (state.write_done) {
      break;
    }
    grpc_pollset_work(&exec_ctx, &g_pollset, &worker,
                      gpr_now(GPR_CLOCK_MONOTONIC), deadline);
    gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
    grpc_exec_ctx_finish(&exec_ctx);
    gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
  }
  gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));

  gpr_slice_buffer_destroy(&outgoing);
  grpc_endpoint_destroy(&exec_ctx, ep);
  gpr_free(slices);
  grpc_exec_ctx_finish(&exec_ctx);
}
Пример #13
0
/* event manager callback when reads are ready */
static void on_read(void *arg, int success) {
  server_port *sp = arg;

  if (!success) {
    goto error;
  }

  /* loop until accept4 returns EAGAIN, and then re-arm notification */
  for (;;) {
    struct sockaddr_storage addr;
    socklen_t addrlen = sizeof(addr);
    char *addr_str;
    char *name;
    /* Note: If we ever decide to return this address to the user, remember to
             strip off the ::ffff:0.0.0.0/96 prefix first. */
    int fd = grpc_accept4(sp->fd, (struct sockaddr *)&addr, &addrlen, 1, 1);
    if (fd < 0) {
      switch (errno) {
        case EINTR:
          continue;
        case EAGAIN:
          grpc_fd_notify_on_read(sp->emfd, &sp->read_closure);
          return;
        default:
          gpr_log(GPR_ERROR, "Failed accept4: %s", strerror(errno));
          goto error;
      }
    }

    grpc_set_socket_no_sigpipe_if_possible(fd);

    grpc_sockaddr_to_string(&addr_str, (struct sockaddr *)&addr, 1);
    gpr_asprintf(&name, "tcp-server-connection:%s", addr_str);

    sp->server->cb(sp->server->cb_arg,
                   grpc_tcp_create(grpc_fd_create(fd, name),
                                   GRPC_TCP_DEFAULT_READ_SLICE_SIZE));

    gpr_free(addr_str);
    gpr_free(name);
  }

  abort();

error:
  gpr_mu_lock(&sp->server->mu);
  if (0 == --sp->server->active_ports) {
    gpr_cv_broadcast(&sp->server->cv);
  }
  gpr_mu_unlock(&sp->server->mu);
}
Пример #14
0
/* Write to a socket until it fills up, then read from it using the grpc_tcp
   API. */
static void large_read_test(size_t slice_size) {
  int sv[2];
  grpc_endpoint *ep;
  struct read_socket_state state;
  ssize_t written_bytes;
  gpr_timespec deadline = grpc_timeout_seconds_to_deadline(20);
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;

  gpr_log(GPR_INFO, "Start large read test, slice size %" PRIuPTR, slice_size);

  create_sockets(sv);

  grpc_resource_quota *resource_quota =
      grpc_resource_quota_create("large_read_test");
  ep = grpc_tcp_create(grpc_fd_create(sv[1], "large_read_test"), resource_quota,
                       slice_size, "test");
  grpc_resource_quota_unref_internal(&exec_ctx, resource_quota);
  grpc_endpoint_add_to_pollset(&exec_ctx, ep, g_pollset);

  written_bytes = fill_socket(sv[0]);
  gpr_log(GPR_INFO, "Wrote %" PRIuPTR " bytes", written_bytes);

  state.ep = ep;
  state.read_bytes = 0;
  state.target_read_bytes = (size_t)written_bytes;
  grpc_slice_buffer_init(&state.incoming);
  grpc_closure_init(&state.read_cb, read_cb, &state, grpc_schedule_on_exec_ctx);

  grpc_endpoint_read(&exec_ctx, ep, &state.incoming, &state.read_cb);

  gpr_mu_lock(g_mu);
  while (state.read_bytes < state.target_read_bytes) {
    grpc_pollset_worker *worker = NULL;
    GPR_ASSERT(GRPC_LOG_IF_ERROR(
        "pollset_work",
        grpc_pollset_work(&exec_ctx, g_pollset, &worker,
                          gpr_now(GPR_CLOCK_MONOTONIC), deadline)));
    gpr_mu_unlock(g_mu);
    grpc_exec_ctx_finish(&exec_ctx);
    gpr_mu_lock(g_mu);
  }
  GPR_ASSERT(state.read_bytes == state.target_read_bytes);
  gpr_mu_unlock(g_mu);

  grpc_slice_buffer_destroy_internal(&exec_ctx, &state.incoming);
  grpc_endpoint_destroy(&exec_ctx, ep);
  grpc_exec_ctx_finish(&exec_ctx);
}
Пример #15
0
/* Write to a socket until it fills up, then read from it using the grpc_tcp
   API. */
static void large_read_test(ssize_t slice_size) {
  int sv[2];
  grpc_endpoint *ep;
  struct read_socket_state state;
  ssize_t written_bytes;
  gpr_timespec deadline = GRPC_TIMEOUT_SECONDS_TO_DEADLINE(20);

  gpr_log(GPR_INFO, "Start large read test, slice size %d", slice_size);

  create_sockets(sv);

  ep = grpc_tcp_create(grpc_fd_create(sv[1], "large_read_test"), slice_size,
                       "test");
  grpc_endpoint_add_to_pollset(ep, &g_pollset);

  written_bytes = fill_socket(sv[0]);
  gpr_log(GPR_INFO, "Wrote %d bytes", written_bytes);

  state.ep = ep;
  state.read_bytes = 0;
  state.target_read_bytes = written_bytes;
  gpr_slice_buffer_init(&state.incoming);
  grpc_iomgr_closure_init(&state.read_cb, read_cb, &state);

  switch (grpc_endpoint_read(ep, &state.incoming, &state.read_cb)) {
    case GRPC_ENDPOINT_DONE:
      read_cb(&state, 1);
      break;
    case GRPC_ENDPOINT_ERROR:
      read_cb(&state, 0);
      break;
    case GRPC_ENDPOINT_PENDING:
      break;
  }

  gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
  while (state.read_bytes < state.target_read_bytes) {
    grpc_pollset_worker worker;
    grpc_pollset_work(&g_pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC),
                      deadline);
  }
  GPR_ASSERT(state.read_bytes == state.target_read_bytes);
  gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));

  gpr_slice_buffer_destroy(&state.incoming);
  grpc_endpoint_destroy(ep);
}
Пример #16
0
static void on_connect(uv_stream_t *server, int status) {
  grpc_tcp_listener *sp = (grpc_tcp_listener *)server->data;
  uv_tcp_t *client;
  grpc_endpoint *ep = NULL;
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
  grpc_resolved_address peer_name;
  char *peer_name_string;
  int err;

  if (status < 0) {
    gpr_log(GPR_INFO, "Skipping on_accept due to error: %s",
            uv_strerror(status));
    return;
  }

  client = gpr_malloc(sizeof(uv_tcp_t));
  uv_tcp_init(uv_default_loop(), client);
  // UV documentation says this is guaranteed to succeed
  uv_accept((uv_stream_t *)server, (uv_stream_t *)client);
  // If the server has not been started, we discard incoming connections
  if (sp->server->on_accept_cb == NULL) {
    uv_close((uv_handle_t *)client, accepted_connection_close_cb);
  } else {
    peer_name_string = NULL;
    memset(&peer_name, 0, sizeof(grpc_resolved_address));
    peer_name.len = sizeof(struct sockaddr_storage);
    err = uv_tcp_getpeername(client, (struct sockaddr *)&peer_name.addr,
                             (int *)&peer_name.len);
    if (err == 0) {
      peer_name_string = grpc_sockaddr_to_uri(&peer_name);
    } else {
      gpr_log(GPR_INFO, "uv_tcp_getpeername error: %s", uv_strerror(status));
    }
    ep = grpc_tcp_create(client, sp->server->resource_quota, peer_name_string);
    // Create acceptor.
    grpc_tcp_server_acceptor *acceptor = gpr_malloc(sizeof(*acceptor));
    acceptor->from_server = sp->server;
    acceptor->port_index = sp->port_index;
    acceptor->fd_index = 0;
    sp->server->on_accept_cb(&exec_ctx, sp->server->on_accept_cb_arg, ep, NULL,
                             acceptor);
    grpc_exec_ctx_finish(&exec_ctx);
  }
}
Пример #17
0
/* Write to a socket until it fills up, then read from it using the grpc_tcp
   API. */
static void large_read_test(size_t slice_size) {
  int sv[2];
  grpc_endpoint *ep;
  struct read_socket_state state;
  ssize_t written_bytes;
  gpr_timespec deadline = GRPC_TIMEOUT_SECONDS_TO_DEADLINE(20);
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;

  gpr_log(GPR_INFO, "Start large read test, slice size %d", slice_size);

  create_sockets(sv);

  ep = grpc_tcp_create(grpc_fd_create(sv[1], "large_read_test"), slice_size,
                       "test");
  grpc_endpoint_add_to_pollset(&exec_ctx, ep, &g_pollset);

  written_bytes = fill_socket(sv[0]);
  gpr_log(GPR_INFO, "Wrote %d bytes", written_bytes);

  state.ep = ep;
  state.read_bytes = 0;
  state.target_read_bytes = (size_t)written_bytes;
  gpr_slice_buffer_init(&state.incoming);
  grpc_closure_init(&state.read_cb, read_cb, &state);

  grpc_endpoint_read(&exec_ctx, ep, &state.incoming, &state.read_cb);

  gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
  while (state.read_bytes < state.target_read_bytes) {
    grpc_pollset_worker worker;
    grpc_pollset_work(&exec_ctx, &g_pollset, &worker,
                      gpr_now(GPR_CLOCK_MONOTONIC), deadline);
    gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
    grpc_exec_ctx_finish(&exec_ctx);
    gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
  }
  GPR_ASSERT(state.read_bytes == state.target_read_bytes);
  gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));

  gpr_slice_buffer_destroy(&state.incoming);
  grpc_endpoint_destroy(&exec_ctx, ep);
  grpc_exec_ctx_finish(&exec_ctx);
}
Пример #18
0
static void on_connect(void *acp, int success) {
  async_connect *ac = acp;
  SOCKET sock = ac->socket->socket;
  grpc_endpoint *ep = NULL;
  grpc_winsocket_callback_info *info = &ac->socket->write_info;
  void(*cb)(void *arg, grpc_endpoint *tcp) = ac->cb;
  void *cb_arg = ac->cb_arg;

  grpc_alarm_cancel(&ac->alarm);

  if (success) {
    DWORD transfered_bytes = 0;
    DWORD flags;
    BOOL wsa_success = WSAGetOverlappedResult(sock, &info->overlapped,
                                              &transfered_bytes, FALSE,
                                              &flags);
    GPR_ASSERT(transfered_bytes == 0);
    if (!wsa_success) {
      char *utf8_message = gpr_format_message(WSAGetLastError());
      gpr_log(GPR_ERROR, "on_connect error: %s", utf8_message);
      gpr_free(utf8_message);
      goto finish;
    } else {
      ep = grpc_tcp_create(ac->socket);
      goto finish;
    }
  } else {
    gpr_log(GPR_ERROR, "on_connect is shutting down");
    goto finish;
  }

  abort();

finish:
  gpr_mu_lock(&ac->mu);
  if (!ep) {
    grpc_winsocket_orphan(ac->socket);
  }
  async_connect_cleanup(ac);
  cb(cb_arg, ep);
}
Пример #19
0
static void on_connect(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) {
  async_connect *ac = acp;
  grpc_endpoint **ep = ac->endpoint;
  GPR_ASSERT(*ep == NULL);
  grpc_closure *on_done = ac->on_done;

  GRPC_ERROR_REF(error);

  gpr_mu_lock(&ac->mu);
  grpc_winsocket *socket = ac->socket;
  ac->socket = NULL;
  gpr_mu_unlock(&ac->mu);

  grpc_timer_cancel(exec_ctx, &ac->alarm);

  gpr_mu_lock(&ac->mu);

  if (error == GRPC_ERROR_NONE && socket != NULL) {
    DWORD transfered_bytes = 0;
    DWORD flags;
    BOOL wsa_success =
        WSAGetOverlappedResult(socket->socket, &socket->write_info.overlapped,
                               &transfered_bytes, FALSE, &flags);
    GPR_ASSERT(transfered_bytes == 0);
    if (!wsa_success) {
      error = GRPC_WSA_ERROR(WSAGetLastError(), "ConnectEx");
    } else {
      *ep = grpc_tcp_create(socket, ac->resource_quota, ac->addr_name);
      socket = NULL;
    }
  }

  async_connect_unlock_and_cleanup(exec_ctx, ac, socket);
  /* If the connection was aborted, the callback was already called when
     the deadline was met. */
  grpc_exec_ctx_sched(exec_ctx, on_done, error, NULL);
}
Пример #20
0
static void on_writable(void *acp, int success) {
  async_connect *ac = acp;
  int so_error = 0;
  socklen_t so_error_size;
  int err;
  int fd = ac->fd->fd;
  int done;
  grpc_endpoint *ep = NULL;
  void (*cb)(void *arg, grpc_endpoint *tcp) = ac->cb;
  void *cb_arg = ac->cb_arg;

  grpc_alarm_cancel(&ac->alarm);

  if (success) {
    do {
      so_error_size = sizeof(so_error);
      err = getsockopt(fd, SOL_SOCKET, SO_ERROR, &so_error, &so_error_size);
    } while (err < 0 && errno == EINTR);
    if (err < 0) {
      gpr_log(GPR_ERROR, "getsockopt(ERROR): %s", strerror(errno));
      goto finish;
    } else if (so_error != 0) {
      if (so_error == ENOBUFS) {
        /* We will get one of these errors if we have run out of
           memory in the kernel for the data structures allocated
           when you connect a socket.  If this happens it is very
           likely that if we wait a little bit then try again the
           connection will work (since other programs or this
           program will close their network connections and free up
           memory).  This does _not_ indicate that there is anything
           wrong with the server we are connecting to, this is a
           local problem.

           If you are looking at this code, then chances are that
           your program or another program on the same computer
           opened too many network connections.  The "easy" fix:
           don't do that! */
        gpr_log(GPR_ERROR, "kernel out of buffers");
        grpc_fd_notify_on_write(ac->fd, &ac->write_closure);
        return;
      } else {
        switch (so_error) {
          case ECONNREFUSED:
            gpr_log(GPR_ERROR, "socket error: connection refused");
            break;
          default:
            gpr_log(GPR_ERROR, "socket error: %d", so_error);
            break;
        }
        goto finish;
      }
    } else {
      ep = grpc_tcp_create(ac->fd, GRPC_TCP_DEFAULT_READ_SLICE_SIZE);
      goto finish;
    }
  } else {
    gpr_log(GPR_ERROR, "on_writable failed during connect");
    goto finish;
  }

  abort();

finish:
  gpr_mu_lock(&ac->mu);
  if (!ep) {
    grpc_fd_orphan(ac->fd, NULL, NULL);
  }
  done = (--ac->refs == 0);
  gpr_mu_unlock(&ac->mu);
  if (done) {
    gpr_mu_destroy(&ac->mu);
    gpr_free(ac);
  }
  cb(cb_arg, ep);
}
Пример #21
0
/* Event manager callback when reads are ready. */
static void on_accept(void *arg, int from_iocp) {
  server_port *sp = arg;
  SOCKET sock = sp->new_socket;
  grpc_winsocket_callback_info *info = &sp->socket->read_info;
  grpc_endpoint *ep = NULL;

  /* The shutdown sequence is done in two parts. This is the second
     part here, acknowledging the IOCP notification, and doing nothing
     else, especially not queuing a new accept. */
  if (sp->shutting_down) {
    GPR_ASSERT(from_iocp);
    sp->shutting_down = 0;
    gpr_mu_lock(&sp->server->mu);
    if (0 == --sp->server->active_ports) {
      gpr_cv_broadcast(&sp->server->cv);
    }
    gpr_mu_unlock(&sp->server->mu);
    return;
  }

  if (from_iocp) {
    /* The IOCP notified us of a completed operation. Let's grab the results,
       and act accordingly. */
    DWORD transfered_bytes = 0;
    DWORD flags;
    BOOL wsa_success = WSAGetOverlappedResult(sock, &info->overlapped,
                                              &transfered_bytes, FALSE, &flags);
    if (!wsa_success) {
      char *utf8_message = gpr_format_message(WSAGetLastError());
      gpr_log(GPR_ERROR, "on_accept error: %s", utf8_message);
      gpr_free(utf8_message);
      closesocket(sock);
    } else {
      ep = grpc_tcp_create(grpc_winsocket_create(sock));
    }
  } else {
    /* If we're not notified from the IOCP, it means we are asked to shutdown.
       This will initiate that shutdown. Calling closesocket will trigger an
       IOCP notification, that will call this function a second time, from
       the IOCP thread. Of course, this only works if the socket was, in fact,
       listening. If that's not the case, we'd wait indefinitely. That's a bit
       of a degenerate case, but it can happen if you create a server, but
       don't start it. So let's support that by recursing once. */
    sp->shutting_down = 1;
    sp->new_socket = INVALID_SOCKET;
    if (sock != INVALID_SOCKET) {
      closesocket(sock);
    } else {
      on_accept(sp, 1);
    }
    return;
  }

  /* The only time we should call our callback, is where we successfully
     managed to accept a connection, and created an endpoint. */
  if (ep) sp->server->cb(sp->server->cb_arg, ep);
  /* As we were notified from the IOCP of one and exactly one accept,
      the former socked we created has now either been destroy or assigned
      to the new connection. We need to create a new one for the next
      connection. */
  start_accept(sp);
}
Пример #22
0
/* Do a read_test, then release fd and try to read/write again. Verify that
   grpc_tcp_fd() is available before the fd is released. */
static void release_fd_test(size_t num_bytes, size_t slice_size) {
  int sv[2];
  grpc_endpoint *ep;
  struct read_socket_state state;
  size_t written_bytes;
  int fd;
  gpr_timespec deadline = grpc_timeout_seconds_to_deadline(20);
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
  grpc_closure fd_released_cb;
  int fd_released_done = 0;
  grpc_closure_init(&fd_released_cb, &on_fd_released, &fd_released_done,
                    grpc_schedule_on_exec_ctx);

  gpr_log(GPR_INFO,
          "Release fd read_test of size %" PRIuPTR ", slice size %" PRIuPTR,
          num_bytes, slice_size);

  create_sockets(sv);

  grpc_resource_quota *resource_quota =
      grpc_resource_quota_create("release_fd_test");
  ep = grpc_tcp_create(grpc_fd_create(sv[1], "read_test"), resource_quota,
                       slice_size, "test");
  GPR_ASSERT(grpc_tcp_fd(ep) == sv[1] && sv[1] >= 0);
  grpc_resource_quota_unref_internal(&exec_ctx, resource_quota);
  grpc_endpoint_add_to_pollset(&exec_ctx, ep, g_pollset);

  written_bytes = fill_socket_partial(sv[0], num_bytes);
  gpr_log(GPR_INFO, "Wrote %" PRIuPTR " bytes", written_bytes);

  state.ep = ep;
  state.read_bytes = 0;
  state.target_read_bytes = written_bytes;
  grpc_slice_buffer_init(&state.incoming);
  grpc_closure_init(&state.read_cb, read_cb, &state, grpc_schedule_on_exec_ctx);

  grpc_endpoint_read(&exec_ctx, ep, &state.incoming, &state.read_cb);

  gpr_mu_lock(g_mu);
  while (state.read_bytes < state.target_read_bytes) {
    grpc_pollset_worker *worker = NULL;
    GPR_ASSERT(GRPC_LOG_IF_ERROR(
        "pollset_work",
        grpc_pollset_work(&exec_ctx, g_pollset, &worker,
                          gpr_now(GPR_CLOCK_MONOTONIC), deadline)));
    gpr_log(GPR_DEBUG, "wakeup: read=%" PRIdPTR " target=%" PRIdPTR,
            state.read_bytes, state.target_read_bytes);
    gpr_mu_unlock(g_mu);
    grpc_exec_ctx_flush(&exec_ctx);
    gpr_mu_lock(g_mu);
  }
  GPR_ASSERT(state.read_bytes == state.target_read_bytes);
  gpr_mu_unlock(g_mu);

  grpc_slice_buffer_destroy_internal(&exec_ctx, &state.incoming);
  grpc_tcp_destroy_and_release_fd(&exec_ctx, ep, &fd, &fd_released_cb);
  grpc_exec_ctx_flush(&exec_ctx);
  gpr_mu_lock(g_mu);
  while (!fd_released_done) {
    grpc_pollset_worker *worker = NULL;
    GPR_ASSERT(GRPC_LOG_IF_ERROR(
        "pollset_work",
        grpc_pollset_work(&exec_ctx, g_pollset, &worker,
                          gpr_now(GPR_CLOCK_MONOTONIC), deadline)));
    gpr_log(GPR_DEBUG, "wakeup: fd_released_done=%d", fd_released_done);
  }
  gpr_mu_unlock(g_mu);
  GPR_ASSERT(fd_released_done == 1);
  GPR_ASSERT(fd == sv[1]);
  grpc_exec_ctx_finish(&exec_ctx);

  written_bytes = fill_socket_partial(sv[0], num_bytes);
  drain_socket_blocking(fd, written_bytes, written_bytes);
  written_bytes = fill_socket_partial(fd, num_bytes);
  drain_socket_blocking(sv[0], written_bytes, written_bytes);
  close(fd);
}
Пример #23
0
/* Event manager callback when reads are ready. */
static void on_accept(grpc_exec_ctx *exec_ctx, void *arg, bool from_iocp) {
  grpc_tcp_listener *sp = arg;
  grpc_tcp_server_acceptor acceptor = {sp->server, sp->port_index, 0};
  SOCKET sock = sp->new_socket;
  grpc_winsocket_callback_info *info = &sp->socket->read_info;
  grpc_endpoint *ep = NULL;
  struct sockaddr_storage peer_name;
  char *peer_name_string;
  char *fd_name;
  int peer_name_len = sizeof(peer_name);
  DWORD transfered_bytes;
  DWORD flags;
  BOOL wsa_success;
  int err;

  /* The general mechanism for shutting down is to queue abortion calls. While
     this is necessary in the read/write case, it's useless for the accept
     case. We only need to adjust the pending callback count */
  if (!from_iocp) {
    return;
  }

  /* The IOCP notified us of a completed operation. Let's grab the results,
     and act accordingly. */
  transfered_bytes = 0;
  wsa_success = WSAGetOverlappedResult(sock, &info->overlapped,
                                       &transfered_bytes, FALSE, &flags);
  if (!wsa_success) {
    if (sp->shutting_down) {
      /* During the shutdown case, we ARE expecting an error. So that's well,
         and we can wake up the shutdown thread. */
      decrement_active_ports_and_notify(exec_ctx, sp);
      return;
    } else {
      char *utf8_message = gpr_format_message(WSAGetLastError());
      gpr_log(GPR_ERROR, "on_accept error: %s", utf8_message);
      gpr_free(utf8_message);
      closesocket(sock);
    }
  } else {
    if (!sp->shutting_down) {
      peer_name_string = NULL;
      err = setsockopt(sock, SOL_SOCKET, SO_UPDATE_ACCEPT_CONTEXT,
                       (char *)&sp->socket->socket, sizeof(sp->socket->socket));
      if (err) {
        char *utf8_message = gpr_format_message(WSAGetLastError());
        gpr_log(GPR_ERROR, "setsockopt error: %s", utf8_message);
        gpr_free(utf8_message);
      }
      err = getpeername(sock, (struct sockaddr *)&peer_name, &peer_name_len);
      if (!err) {
        peer_name_string = grpc_sockaddr_to_uri((struct sockaddr *)&peer_name);
      } else {
        char *utf8_message = gpr_format_message(WSAGetLastError());
        gpr_log(GPR_ERROR, "getpeername error: %s", utf8_message);
        gpr_free(utf8_message);
      }
      gpr_asprintf(&fd_name, "tcp_server:%s", peer_name_string);
      ep = grpc_tcp_create(grpc_winsocket_create(sock, fd_name),
                           peer_name_string);
      gpr_free(fd_name);
      gpr_free(peer_name_string);
    } else {
      closesocket(sock);
    }
  }

  /* The only time we should call our callback, is where we successfully
     managed to accept a connection, and created an endpoint. */
  if (ep)
    sp->server->on_accept_cb(exec_ctx, sp->server->on_accept_cb_arg, ep,
                             &acceptor);
  /* As we were notified from the IOCP of one and exactly one accept,
     the former socked we created has now either been destroy or assigned
     to the new connection. We need to create a new one for the next
     connection. */
  start_accept(exec_ctx, sp);
}
Пример #24
0
/* Do a read_test, then release fd and try to read/write again. Verify that
   grpc_tcp_fd() is available before the fd is released. */
static void release_fd_test(size_t num_bytes, size_t slice_size) {
  int sv[2];
  grpc_endpoint *ep;
  struct read_socket_state state;
  size_t written_bytes;
  int fd;
  gpr_timespec deadline = GRPC_TIMEOUT_SECONDS_TO_DEADLINE(20);
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
  grpc_closure fd_released_cb;
  int fd_released_done = 0;
  grpc_closure_init(&fd_released_cb, &on_fd_released, &fd_released_done);

  gpr_log(GPR_INFO, "Release fd read_test of size %d, slice size %d", num_bytes,
          slice_size);

  create_sockets(sv);

  ep = grpc_tcp_create(grpc_fd_create(sv[1], "read_test"), slice_size, "test");
  GPR_ASSERT(grpc_tcp_fd(ep) == sv[1] && sv[1] >= 0);
  grpc_endpoint_add_to_pollset(&exec_ctx, ep, &g_pollset);

  written_bytes = fill_socket_partial(sv[0], num_bytes);
  gpr_log(GPR_INFO, "Wrote %d bytes", written_bytes);

  state.ep = ep;
  state.read_bytes = 0;
  state.target_read_bytes = written_bytes;
  gpr_slice_buffer_init(&state.incoming);
  grpc_closure_init(&state.read_cb, read_cb, &state);

  grpc_endpoint_read(&exec_ctx, ep, &state.incoming, &state.read_cb);

  gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
  while (state.read_bytes < state.target_read_bytes) {
    grpc_pollset_worker *worker = NULL;
    grpc_pollset_work(&exec_ctx, &g_pollset, &worker,
                      gpr_now(GPR_CLOCK_MONOTONIC), deadline);
    gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
    grpc_exec_ctx_finish(&exec_ctx);
    gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
  }
  GPR_ASSERT(state.read_bytes == state.target_read_bytes);
  gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));

  gpr_slice_buffer_destroy(&state.incoming);
  grpc_tcp_destroy_and_release_fd(&exec_ctx, ep, &fd, &fd_released_cb);
  gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
  while (!fd_released_done) {
    grpc_pollset_worker *worker = NULL;
    grpc_pollset_work(&exec_ctx, &g_pollset, &worker,
                      gpr_now(GPR_CLOCK_MONOTONIC), deadline);
  }
  gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
  GPR_ASSERT(fd_released_done == 1);
  GPR_ASSERT(fd == sv[1]);
  grpc_exec_ctx_finish(&exec_ctx);

  written_bytes = fill_socket_partial(sv[0], num_bytes);
  drain_socket_blocking(fd, written_bytes, written_bytes);
  written_bytes = fill_socket_partial(fd, num_bytes);
  drain_socket_blocking(sv[0], written_bytes, written_bytes);
  close(fd);
}
Пример #25
0
/* event manager callback when reads are ready */
static void on_read(grpc_exec_ctx *exec_ctx, void *arg, int success) {
    grpc_tcp_listener *sp = arg;
    grpc_fd *fdobj;
    size_t i;

    if (!success) {
        goto error;
    }

    /* loop until accept4 returns EAGAIN, and then re-arm notification */
    for (;;) {
        struct sockaddr_storage addr;
        socklen_t addrlen = sizeof(addr);
        char *addr_str;
        char *name;
        /* Note: If we ever decide to return this address to the user, remember to
           strip off the ::ffff:0.0.0.0/96 prefix first. */
        int fd = grpc_accept4(sp->fd, (struct sockaddr *)&addr, &addrlen, 1, 1);
        if (fd < 0) {
            switch (errno) {
            case EINTR:
                continue;
            case EAGAIN:
                grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure);
                return;
            default:
                gpr_log(GPR_ERROR, "Failed accept4: %s", strerror(errno));
                goto error;
            }
        }

        grpc_set_socket_no_sigpipe_if_possible(fd);

        addr_str = grpc_sockaddr_to_uri((struct sockaddr *)&addr);
        gpr_asprintf(&name, "tcp-server-connection:%s", addr_str);

        if (grpc_tcp_trace) {
            gpr_log(GPR_DEBUG, "SERVER_CONNECT: incoming connection: %s", addr_str);
        }

        fdobj = grpc_fd_create(fd, name);
        /* TODO(ctiller): revise this when we have server-side sharding
           of channels -- we certainly should not be automatically adding every
           incoming channel to every pollset owned by the server */
        for (i = 0; i < sp->server->pollset_count; i++) {
            grpc_pollset_add_fd(exec_ctx, sp->server->pollsets[i], fdobj);
        }
        sp->server->on_accept_cb(
            exec_ctx, sp->server->on_accept_cb_arg,
            grpc_tcp_create(fdobj, GRPC_TCP_DEFAULT_READ_SLICE_SIZE, addr_str));

        gpr_free(name);
        gpr_free(addr_str);
    }

    GPR_UNREACHABLE_CODE(return );

error:
    gpr_mu_lock(&sp->server->mu);
    if (0 == --sp->server->active_ports) {
        gpr_mu_unlock(&sp->server->mu);
        deactivated_all_ports(exec_ctx, sp->server);
    } else {
        gpr_mu_unlock(&sp->server->mu);
    }
}
Пример #26
0
void grpc_tcp_client_connect(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
                             grpc_endpoint **ep,
                             grpc_pollset_set *interested_parties,
                             const struct sockaddr *addr, size_t addr_len,
                             gpr_timespec deadline) {
  int fd;
  grpc_dualstack_mode dsmode;
  int err;
  async_connect *ac;
  struct sockaddr_in6 addr6_v4mapped;
  struct sockaddr_in addr4_copy;
  grpc_fd *fdobj;
  char *name;
  char *addr_str;

  *ep = NULL;

  /* Use dualstack sockets where available. */
  if (grpc_sockaddr_to_v4mapped(addr, &addr6_v4mapped)) {
    addr = (const struct sockaddr *)&addr6_v4mapped;
    addr_len = sizeof(addr6_v4mapped);
  }

  fd = grpc_create_dualstack_socket(addr, SOCK_STREAM, 0, &dsmode);
  if (fd < 0) {
    gpr_log(GPR_ERROR, "Unable to create socket: %s", strerror(errno));
  }
  if (dsmode == GRPC_DSMODE_IPV4) {
    /* If we got an AF_INET socket, map the address back to IPv4. */
    GPR_ASSERT(grpc_sockaddr_is_v4mapped(addr, &addr4_copy));
    addr = (struct sockaddr *)&addr4_copy;
    addr_len = sizeof(addr4_copy);
  }
  if (!prepare_socket(addr, fd)) {
    grpc_exec_ctx_enqueue(exec_ctx, closure, 0);
    return;
  }

  do {
    GPR_ASSERT(addr_len < ~(socklen_t)0);
    err = connect(fd, addr, (socklen_t)addr_len);
  } while (err < 0 && errno == EINTR);

  addr_str = grpc_sockaddr_to_uri(addr);
  gpr_asprintf(&name, "tcp-client:%s", addr_str);

  fdobj = grpc_fd_create(fd, name);

  if (err >= 0) {
    *ep = grpc_tcp_create(fdobj, GRPC_TCP_DEFAULT_READ_SLICE_SIZE, addr_str);
    grpc_exec_ctx_enqueue(exec_ctx, closure, 1);
    goto done;
  }

  if (errno != EWOULDBLOCK && errno != EINPROGRESS) {
    gpr_log(GPR_ERROR, "connect error to '%s': %s", addr_str, strerror(errno));
    grpc_fd_orphan(exec_ctx, fdobj, NULL, "tcp_client_connect_error");
    grpc_exec_ctx_enqueue(exec_ctx, closure, 0);
    goto done;
  }

  grpc_pollset_set_add_fd(exec_ctx, interested_parties, fdobj);

  ac = gpr_malloc(sizeof(async_connect));
  ac->closure = closure;
  ac->ep = ep;
  ac->fd = fdobj;
  ac->interested_parties = interested_parties;
  ac->addr_str = addr_str;
  addr_str = NULL;
  gpr_mu_init(&ac->mu);
  ac->refs = 2;
  ac->write_closure.cb = on_writable;
  ac->write_closure.cb_arg = ac;

  if (grpc_tcp_trace) {
    gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %s: asynchronously connecting",
            ac->addr_str);
  }

  gpr_mu_lock(&ac->mu);
  grpc_alarm_init(exec_ctx, &ac->alarm,
                  gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC),
                  tc_on_alarm, ac, gpr_now(GPR_CLOCK_MONOTONIC));
  grpc_fd_notify_on_write(exec_ctx, ac->fd, &ac->write_closure);
  gpr_mu_unlock(&ac->mu);

done:
  gpr_free(name);
  gpr_free(addr_str);
}
Пример #27
0
static void on_writable(grpc_exec_ctx *exec_ctx, void *acp, int success) {
  async_connect *ac = acp;
  int so_error = 0;
  socklen_t so_error_size;
  int err;
  int done;
  grpc_endpoint **ep = ac->ep;
  grpc_closure *closure = ac->closure;
  grpc_fd *fd;

  if (grpc_tcp_trace) {
    gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %s: on_writable: success=%d",
            ac->addr_str, success);
  }

  gpr_mu_lock(&ac->mu);
  GPR_ASSERT(ac->fd);
  fd = ac->fd;
  ac->fd = NULL;
  gpr_mu_unlock(&ac->mu);

  grpc_alarm_cancel(exec_ctx, &ac->alarm);

  gpr_mu_lock(&ac->mu);
  if (success) {
    do {
      so_error_size = sizeof(so_error);
      err = getsockopt(fd->fd, SOL_SOCKET, SO_ERROR, &so_error, &so_error_size);
    } while (err < 0 && errno == EINTR);
    if (err < 0) {
      gpr_log(GPR_ERROR, "failed to connect to '%s': getsockopt(ERROR): %s",
              ac->addr_str, strerror(errno));
      goto finish;
    } else if (so_error != 0) {
      if (so_error == ENOBUFS) {
        /* We will get one of these errors if we have run out of
           memory in the kernel for the data structures allocated
           when you connect a socket.  If this happens it is very
           likely that if we wait a little bit then try again the
           connection will work (since other programs or this
           program will close their network connections and free up
           memory).  This does _not_ indicate that there is anything
           wrong with the server we are connecting to, this is a
           local problem.

           If you are looking at this code, then chances are that
           your program or another program on the same computer
           opened too many network connections.  The "easy" fix:
           don't do that! */
        gpr_log(GPR_ERROR, "kernel out of buffers");
        gpr_mu_unlock(&ac->mu);
        grpc_fd_notify_on_write(exec_ctx, fd, &ac->write_closure);
        return;
      } else {
        switch (so_error) {
          case ECONNREFUSED:
            gpr_log(
                GPR_ERROR,
                "failed to connect to '%s': socket error: connection refused",
                ac->addr_str);
            break;
          default:
            gpr_log(GPR_ERROR, "failed to connect to '%s': socket error: %d",
                    ac->addr_str, so_error);
            break;
        }
        goto finish;
      }
    } else {
      grpc_pollset_set_del_fd(exec_ctx, ac->interested_parties, fd);
      *ep = grpc_tcp_create(fd, GRPC_TCP_DEFAULT_READ_SLICE_SIZE, ac->addr_str);
      fd = NULL;
      goto finish;
    }
  } else {
    gpr_log(GPR_ERROR, "failed to connect to '%s': timeout occurred",
            ac->addr_str);
    goto finish;
  }

  GPR_UNREACHABLE_CODE(return );

finish:
  if (fd != NULL) {
    grpc_pollset_set_del_fd(exec_ctx, ac->interested_parties, fd);
    grpc_fd_orphan(exec_ctx, fd, NULL, "tcp_client_orphan");
    fd = NULL;
  }
  done = (--ac->refs == 0);
  gpr_mu_unlock(&ac->mu);
  if (done) {
    gpr_mu_destroy(&ac->mu);
    gpr_free(ac->addr_str);
    gpr_free(ac);
  }
  grpc_exec_ctx_enqueue(exec_ctx, closure, *ep != NULL);
}
Пример #28
0
void grpc_tcp_client_connect(void (*cb)(void *arg, grpc_endpoint *ep),
                             void *arg, const struct sockaddr *addr,
                             int addr_len, gpr_timespec deadline) {
  int fd;
  grpc_dualstack_mode dsmode;
  int err;
  async_connect *ac;
  struct sockaddr_in6 addr6_v4mapped;
  struct sockaddr_in addr4_copy;

  /* Use dualstack sockets where available. */
  if (grpc_sockaddr_to_v4mapped(addr, &addr6_v4mapped)) {
    addr = (const struct sockaddr *)&addr6_v4mapped;
    addr_len = sizeof(addr6_v4mapped);
  }

  fd = grpc_create_dualstack_socket(addr, SOCK_STREAM, 0, &dsmode);
  if (fd < 0) {
    gpr_log(GPR_ERROR, "Unable to create socket: %s", strerror(errno));
  }
  if (dsmode == GRPC_DSMODE_IPV4) {
    /* If we got an AF_INET socket, map the address back to IPv4. */
    GPR_ASSERT(grpc_sockaddr_is_v4mapped(addr, &addr4_copy));
    addr = (struct sockaddr *)&addr4_copy;
    addr_len = sizeof(addr4_copy);
  }
  if (!prepare_socket(addr, fd)) {
    cb(arg, NULL);
    return;
  }

  do {
    err = connect(fd, addr, addr_len);
  } while (err < 0 && errno == EINTR);

  if (err >= 0) {
    gpr_log(GPR_DEBUG, "instant connect");
    cb(arg,
       grpc_tcp_create(grpc_fd_create(fd), GRPC_TCP_DEFAULT_READ_SLICE_SIZE));
    return;
  }

  if (errno != EWOULDBLOCK && errno != EINPROGRESS) {
    gpr_log(GPR_ERROR, "connect error: %s", strerror(errno));
    close(fd);
    cb(arg, NULL);
    return;
  }

  ac = gpr_malloc(sizeof(async_connect));
  ac->cb = cb;
  ac->cb_arg = arg;
  ac->fd = grpc_fd_create(fd);
  gpr_mu_init(&ac->mu);
  ac->refs = 2;
  ac->write_closure.cb = on_writable;
  ac->write_closure.cb_arg = ac;

  grpc_alarm_init(&ac->alarm, deadline, on_alarm, ac, gpr_now());
  grpc_fd_notify_on_write(ac->fd, &ac->write_closure);
}
Пример #29
0
/* event manager callback when reads are ready */
static void on_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *err) {
  grpc_tcp_listener *sp = arg;
  grpc_tcp_server_acceptor acceptor = {sp->server, sp->port_index,
                                       sp->fd_index};
  grpc_pollset *read_notifier_pollset = NULL;
  grpc_fd *fdobj;

  if (err != GRPC_ERROR_NONE) {
    goto error;
  }

  read_notifier_pollset =
      sp->server->pollsets[(size_t)gpr_atm_no_barrier_fetch_add(
                               &sp->server->next_pollset_to_assign, 1) %
                           sp->server->pollset_count];

  /* loop until accept4 returns EAGAIN, and then re-arm notification */
  for (;;) {
    struct sockaddr_storage addr;
    socklen_t addrlen = sizeof(addr);
    char *addr_str;
    char *name;
    /* Note: If we ever decide to return this address to the user, remember to
       strip off the ::ffff:0.0.0.0/96 prefix first. */
    int fd = grpc_accept4(sp->fd, (struct sockaddr *)&addr, &addrlen, 1, 1);
    if (fd < 0) {
      switch (errno) {
        case EINTR:
          continue;
        case EAGAIN:
          grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure);
          return;
        default:
          gpr_log(GPR_ERROR, "Failed accept4: %s", strerror(errno));
          goto error;
      }
    }

    grpc_set_socket_no_sigpipe_if_possible(fd);

    addr_str = grpc_sockaddr_to_uri((struct sockaddr *)&addr);
    gpr_asprintf(&name, "tcp-server-connection:%s", addr_str);

    if (grpc_tcp_trace) {
      gpr_log(GPR_DEBUG, "SERVER_CONNECT: incoming connection: %s", addr_str);
    }

    fdobj = grpc_fd_create(fd, name);

    if (read_notifier_pollset == NULL) {
      gpr_log(GPR_ERROR, "Read notifier pollset is not set on the fd");
      goto error;
    }

    grpc_pollset_add_fd(exec_ctx, read_notifier_pollset, fdobj);

    sp->server->on_accept_cb(
        exec_ctx, sp->server->on_accept_cb_arg,
        grpc_tcp_create(fdobj, GRPC_TCP_DEFAULT_READ_SLICE_SIZE, addr_str),
        read_notifier_pollset, &acceptor);

    gpr_free(name);
    gpr_free(addr_str);
  }

  GPR_UNREACHABLE_CODE(return );

error:
  gpr_mu_lock(&sp->server->mu);
  if (0 == --sp->server->active_ports) {
    gpr_mu_unlock(&sp->server->mu);
    deactivated_all_ports(exec_ctx, sp->server);
  } else {
    gpr_mu_unlock(&sp->server->mu);
  }
}
Пример #30
0
grpc_endpoint *grpc_tcp_client_create_from_fd(
    grpc_exec_ctx *exec_ctx, grpc_fd *fd, const grpc_channel_args *channel_args,
    const char *addr_str) {
  return grpc_tcp_create(exec_ctx, fd, channel_args, addr_str);
}