Esempio n. 1
0
grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(size_t read_slice_size) {
  int sv[2];
  grpc_endpoint_pair p;
  create_sockets(sv);
  p.client = grpc_tcp_create(grpc_fd_create(sv[1]), read_slice_size);
  p.server = grpc_tcp_create(grpc_fd_create(sv[0]), read_slice_size);
  return p;
}
Esempio n. 2
0
static grpc_endpoint_test_fixture create_fixture_tcp_socketpair(
    size_t slice_size) {
  int sv[2];
  grpc_endpoint_test_fixture f;

  create_sockets(sv);
  f.client_ep = grpc_tcp_create(grpc_fd_create(sv[0], "fixture:client"),
                                slice_size, "test");
  f.server_ep = grpc_tcp_create(grpc_fd_create(sv[1], "fixture:server"),
                                slice_size, "test");
  grpc_endpoint_add_to_pollset(f.client_ep, &g_pollset);
  grpc_endpoint_add_to_pollset(f.server_ep, &g_pollset);

  return f;
}
Esempio n. 3
0
/* Called when a new TCP connection request arrives in the listening port. */
static void listen_cb(grpc_exec_ctx *exec_ctx, void *arg, /*=sv_arg*/
                      grpc_error *error) {
  server *sv = arg;
  int fd;
  int flags;
  session *se;
  struct sockaddr_storage ss;
  socklen_t slen = sizeof(ss);
  grpc_fd *listen_em_fd = sv->em_fd;

  if (error != GRPC_ERROR_NONE) {
    listen_shutdown_cb(exec_ctx, arg, 1);
    return;
  }

  fd = accept(grpc_fd_wrapped_fd(listen_em_fd), (struct sockaddr *)&ss, &slen);
  GPR_ASSERT(fd >= 0);
  GPR_ASSERT(fd < FD_SETSIZE);
  flags = fcntl(fd, F_GETFL, 0);
  fcntl(fd, F_SETFL, flags | O_NONBLOCK);
  se = gpr_malloc(sizeof(*se));
  se->sv = sv;
  se->em_fd = grpc_fd_create(fd, "listener");
  grpc_pollset_add_fd(exec_ctx, g_pollset, se->em_fd);
  GRPC_CLOSURE_INIT(&se->session_read_closure, session_read_cb, se,
                    grpc_schedule_on_exec_ctx);
  grpc_fd_notify_on_read(exec_ctx, se->em_fd, &se->session_read_closure);

  grpc_fd_notify_on_read(exec_ctx, listen_em_fd, &sv->listen_closure);
}
Esempio n. 4
0
/* Start a client to send a stream of bytes. */
static void client_start(grpc_exec_ctx *exec_ctx, client *cl, int port) {
  int fd;
  struct sockaddr_in sin;
  create_test_socket(port, &fd, &sin);
  if (connect(fd, (struct sockaddr *)&sin, sizeof(sin)) == -1) {
    if (errno == EINPROGRESS) {
      struct pollfd pfd;
      pfd.fd = fd;
      pfd.events = POLLOUT;
      pfd.revents = 0;
      if (poll(&pfd, 1, -1) == -1) {
        gpr_log(GPR_ERROR, "poll() failed during connect; errno=%d", errno);
        abort();
      }
    } else {
      gpr_log(GPR_ERROR, "Failed to connect to the server (errno=%d)", errno);
      abort();
    }
  }

  cl->em_fd = grpc_fd_create(fd, "client");
  grpc_pollset_add_fd(exec_ctx, g_pollset, cl->em_fd);

  client_session_write(exec_ctx, cl, GRPC_ERROR_NONE);
}
Esempio n. 5
0
static int add_socket_to_server(grpc_tcp_server *s, int fd,
                                const struct sockaddr *addr, int addr_len) {
  server_port *sp;
  int port;

  port = prepare_socket(fd, addr, addr_len);
  if (port >= 0) {
    gpr_mu_lock(&s->mu);
    GPR_ASSERT(!s->cb && "must add ports before starting server");
    /* append it to the list under a lock */
    if (s->nports == s->port_capacity) {
      s->port_capacity *= 2;
      s->ports = gpr_realloc(s->ports, sizeof(server_port) * s->port_capacity);
    }
    sp = &s->ports[s->nports++];
    sp->server = s;
    sp->fd = fd;
    sp->emfd = grpc_fd_create(fd);
    memcpy(sp->addr.untyped, addr, addr_len);
    sp->addr_len = addr_len;
    GPR_ASSERT(sp->emfd);
    gpr_mu_unlock(&s->mu);
  }

  return port;
}
Esempio n. 6
0
/* Called when a new TCP connection request arrives in the listening port. */
static void listen_cb(void *arg, /*=sv_arg*/
                      int success) {
  server *sv = arg;
  int fd;
  int flags;
  session *se;
  struct sockaddr_storage ss;
  socklen_t slen = sizeof(ss);
  grpc_fd *listen_em_fd = sv->em_fd;

  if (!success) {
    listen_shutdown_cb(arg, 1);
    return;
  }

  fd = accept(listen_em_fd->fd, (struct sockaddr *)&ss, &slen);
  GPR_ASSERT(fd >= 0);
  GPR_ASSERT(fd < FD_SETSIZE);
  flags = fcntl(fd, F_GETFL, 0);
  fcntl(fd, F_SETFL, flags | O_NONBLOCK);
  se = gpr_malloc(sizeof(*se));
  se->sv = sv;
  se->em_fd = grpc_fd_create(fd, "listener");
  se->session_read_closure.cb = session_read_cb;
  se->session_read_closure.cb_arg = se;
  grpc_fd_notify_on_read(se->em_fd, &se->session_read_closure);

  grpc_fd_notify_on_read(listen_em_fd, &sv->listen_closure);
}
Esempio n. 7
0
static int add_socket_to_server(grpc_tcp_server *s, int fd,
                                const struct sockaddr *addr, size_t addr_len) {
  server_port *sp;
  int port;
  char *addr_str;
  char *name;

  port = prepare_socket(fd, addr, addr_len);
  if (port >= 0) {
    grpc_sockaddr_to_string(&addr_str, (struct sockaddr *)&addr, 1);
    gpr_asprintf(&name, "tcp-server-listener:%s", addr_str);
    gpr_mu_lock(&s->mu);
    GPR_ASSERT(!s->on_accept_cb && "must add ports before starting server");
    /* append it to the list under a lock */
    if (s->nports == s->port_capacity) {
      s->port_capacity *= 2;
      s->ports = gpr_realloc(s->ports, sizeof(server_port) * s->port_capacity);
    }
    sp = &s->ports[s->nports++];
    sp->server = s;
    sp->fd = fd;
    sp->emfd = grpc_fd_create(fd, name);
    memcpy(sp->addr.untyped, addr, addr_len);
    sp->addr_len = addr_len;
    GPR_ASSERT(sp->emfd);
    gpr_mu_unlock(&s->mu);
    gpr_free(addr_str);
    gpr_free(name);
  }

  return port;
}
Esempio n. 8
0
grpc_channel *grpc_insecure_channel_create_from_fd(
    const char *target, int fd, const grpc_channel_args *args) {
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
  GRPC_API_TRACE("grpc_insecure_channel_create(target=%p, fd=%d, args=%p)", 3,
                 (target, fd, args));

  grpc_arg default_authority_arg;
  default_authority_arg.type = GRPC_ARG_STRING;
  default_authority_arg.key = GRPC_ARG_DEFAULT_AUTHORITY;
  default_authority_arg.value.string = "test.authority";
  grpc_channel_args *final_args =
      grpc_channel_args_copy_and_add(args, &default_authority_arg, 1);

  int flags = fcntl(fd, F_GETFL, 0);
  GPR_ASSERT(fcntl(fd, F_SETFL, flags | O_NONBLOCK) == 0);

  grpc_endpoint *client = grpc_tcp_client_create_from_fd(
      &exec_ctx, grpc_fd_create(fd, "client"), args, "fd-client");

  grpc_transport *transport =
      grpc_create_chttp2_transport(&exec_ctx, final_args, client, 1);
  GPR_ASSERT(transport);
  grpc_channel *channel = grpc_channel_create(
      &exec_ctx, target, final_args, GRPC_CLIENT_DIRECT_CHANNEL, transport);
  grpc_channel_args_destroy(final_args);
  grpc_chttp2_transport_start_reading(&exec_ctx, transport, NULL);

  grpc_exec_ctx_finish(&exec_ctx);

  return channel != NULL ? channel : grpc_lame_client_channel_create(
                                         target, GRPC_STATUS_INTERNAL,
                                         "Failed to create client channel");
}
static grpc_tcp_listener *add_socket_to_server(grpc_tcp_server *s, int fd,
        const struct sockaddr *addr,
        size_t addr_len) {
    grpc_tcp_listener *sp = NULL;
    int port;
    char *addr_str;
    char *name;

    port = prepare_socket(fd, addr, addr_len);
    if (port >= 0) {
        grpc_sockaddr_to_string(&addr_str, (struct sockaddr *)&addr, 1);
        gpr_asprintf(&name, "tcp-server-listener:%s", addr_str);
        gpr_mu_lock(&s->mu);
        s->nports++;
        GPR_ASSERT(!s->on_accept_cb && "must add ports before starting server");
        sp = gpr_malloc(sizeof(grpc_tcp_listener));
        sp->next = s->head;
        s->head = sp;
        sp->server = s;
        sp->fd = fd;
        sp->emfd = grpc_fd_create(fd, name);
        memcpy(sp->addr.untyped, addr, addr_len);
        sp->addr_len = addr_len;
        sp->port = port;
        sp->is_sibling = 0;
        sp->sibling = NULL;
        gpr_ref_init(&sp->refs, 1);
        GPR_ASSERT(sp->emfd);
        gpr_mu_unlock(&s->mu);
        gpr_free(addr_str);
        gpr_free(name);
    }

    return sp;
}
Esempio n. 10
0
grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(const char *name,
                                                   size_t read_slice_size) {
  int sv[2];
  grpc_endpoint_pair p;
  char *final_name;
  create_sockets(sv);

  gpr_asprintf(&final_name, "%s:client", name);
  p.client = grpc_tcp_create(grpc_fd_create(sv[1], final_name), read_slice_size,
                             "socketpair-server");
  gpr_free(final_name);
  gpr_asprintf(&final_name, "%s:server", name);
  p.server = grpc_tcp_create(grpc_fd_create(sv[0], final_name), read_slice_size,
                             "socketpair-client");
  gpr_free(final_name);
  return p;
}
Esempio n. 11
0
/* Write to a socket using the grpc_tcp API, then drain it directly.
   Note that if the write does not complete immediately we need to drain the
   socket in parallel with the read. */
static void write_test(size_t num_bytes, size_t slice_size) {
  int sv[2];
  grpc_endpoint *ep;
  struct write_socket_state state;
  size_t num_blocks;
  grpc_slice *slices;
  uint8_t current_data = 0;
  grpc_slice_buffer outgoing;
  grpc_closure write_done_closure;
  gpr_timespec deadline = grpc_timeout_seconds_to_deadline(20);
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;

  gpr_log(GPR_INFO,
          "Start write test with %" PRIuPTR " bytes, slice size %" PRIuPTR,
          num_bytes, slice_size);

  create_sockets(sv);

  grpc_resource_quota *resource_quota =
      grpc_resource_quota_create("write_test");
  ep = grpc_tcp_create(grpc_fd_create(sv[1], "write_test"), resource_quota,
                       GRPC_TCP_DEFAULT_READ_SLICE_SIZE, "test");
  grpc_resource_quota_unref_internal(&exec_ctx, resource_quota);
  grpc_endpoint_add_to_pollset(&exec_ctx, ep, g_pollset);

  state.ep = ep;
  state.write_done = 0;

  slices = allocate_blocks(num_bytes, slice_size, &num_blocks, &current_data);

  grpc_slice_buffer_init(&outgoing);
  grpc_slice_buffer_addn(&outgoing, slices, num_blocks);
  grpc_closure_init(&write_done_closure, write_done, &state,
                    grpc_schedule_on_exec_ctx);

  grpc_endpoint_write(&exec_ctx, ep, &outgoing, &write_done_closure);
  drain_socket_blocking(sv[0], num_bytes, num_bytes);
  gpr_mu_lock(g_mu);
  for (;;) {
    grpc_pollset_worker *worker = NULL;
    if (state.write_done) {
      break;
    }
    GPR_ASSERT(GRPC_LOG_IF_ERROR(
        "pollset_work",
        grpc_pollset_work(&exec_ctx, g_pollset, &worker,
                          gpr_now(GPR_CLOCK_MONOTONIC), deadline)));
    gpr_mu_unlock(g_mu);
    grpc_exec_ctx_finish(&exec_ctx);
    gpr_mu_lock(g_mu);
  }
  gpr_mu_unlock(g_mu);

  grpc_slice_buffer_destroy_internal(&exec_ctx, &outgoing);
  grpc_endpoint_destroy(&exec_ctx, ep);
  gpr_free(slices);
  grpc_exec_ctx_finish(&exec_ctx);
}
Esempio n. 12
0
/* Write to a socket using the grpc_tcp API, then drain it directly.
   Note that if the write does not complete immediately we need to drain the
   socket in parallel with the read. */
static void write_test(ssize_t num_bytes, ssize_t slice_size) {
  int sv[2];
  grpc_endpoint *ep;
  struct write_socket_state state;
  ssize_t read_bytes;
  size_t num_blocks;
  gpr_slice *slices;
  int current_data = 0;
  gpr_slice_buffer outgoing;
  grpc_iomgr_closure write_done_closure;
  gpr_timespec deadline = GRPC_TIMEOUT_SECONDS_TO_DEADLINE(20);

  gpr_log(GPR_INFO, "Start write test with %d bytes, slice size %d", num_bytes,
          slice_size);

  create_sockets(sv);

  ep = grpc_tcp_create(grpc_fd_create(sv[1], "write_test"),
                       GRPC_TCP_DEFAULT_READ_SLICE_SIZE, "test");
  grpc_endpoint_add_to_pollset(ep, &g_pollset);

  state.ep = ep;
  state.write_done = 0;

  slices = allocate_blocks(num_bytes, slice_size, &num_blocks, &current_data);

  gpr_slice_buffer_init(&outgoing);
  gpr_slice_buffer_addn(&outgoing, slices, num_blocks);
  grpc_iomgr_closure_init(&write_done_closure, write_done, &state);

  switch (grpc_endpoint_write(ep, &outgoing, &write_done_closure)) {
    case GRPC_ENDPOINT_DONE:
      /* Write completed immediately */
      read_bytes = drain_socket(sv[0]);
      GPR_ASSERT(read_bytes == num_bytes);
      break;
    case GRPC_ENDPOINT_PENDING:
      drain_socket_blocking(sv[0], num_bytes, num_bytes);
      gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
      for (;;) {
        grpc_pollset_worker worker;
        if (state.write_done) {
          break;
        }
        grpc_pollset_work(&g_pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC),
                          deadline);
      }
      gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
      break;
    case GRPC_ENDPOINT_ERROR:
      gpr_log(GPR_ERROR, "endpoint got error");
      abort();
  }

  gpr_slice_buffer_destroy(&outgoing);
  grpc_endpoint_destroy(ep);
  gpr_free(slices);
}
Esempio n. 13
0
static void init_test_fds(grpc_exec_ctx *exec_ctx, test_fd *tfds,
                          const int num_fds) {
  for (int i = 0; i < num_fds; i++) {
    GPR_ASSERT(GRPC_ERROR_NONE == grpc_wakeup_fd_init(&tfds[i].wakeup_fd));
    tfds[i].fd = grpc_fd_create(GRPC_WAKEUP_FD_GET_READ_FD(&tfds[i].wakeup_fd),
                                "test_fd");
    reset_test_fd(exec_ctx, &tfds[i]);
  }
}
Esempio n. 14
0
static grpc_endpoint_test_fixture create_fixture_tcp_socketpair(
    size_t slice_size) {
  int sv[2];
  grpc_endpoint_test_fixture f;
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;

  create_sockets(sv);
  f.client_ep = grpc_tcp_create(grpc_fd_create(sv[0], "fixture:client"),
                                slice_size, "test");
  f.server_ep = grpc_tcp_create(grpc_fd_create(sv[1], "fixture:server"),
                                slice_size, "test");
  grpc_endpoint_add_to_pollset(&exec_ctx, f.client_ep, &g_pollset);
  grpc_endpoint_add_to_pollset(&exec_ctx, f.server_ep, &g_pollset);

  grpc_exec_ctx_finish(&exec_ctx);

  return f;
}
Esempio n. 15
0
static grpc_endpoint_test_fixture create_fixture_tcp_socketpair(
    size_t slice_size) {
  int sv[2];
  grpc_endpoint_test_fixture f;
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;

  create_sockets(sv);
  grpc_resource_quota *resource_quota =
      grpc_resource_quota_create("tcp_posix_test_socketpair");
  f.client_ep = grpc_tcp_create(grpc_fd_create(sv[0], "fixture:client"),
                                resource_quota, slice_size, "test");
  f.server_ep = grpc_tcp_create(grpc_fd_create(sv[1], "fixture:server"),
                                resource_quota, slice_size, "test");
  grpc_resource_quota_unref_internal(&exec_ctx, resource_quota);
  grpc_endpoint_add_to_pollset(&exec_ctx, f.client_ep, g_pollset);
  grpc_endpoint_add_to_pollset(&exec_ctx, f.server_ep, g_pollset);

  grpc_exec_ctx_finish(&exec_ctx);

  return f;
}
Esempio n. 16
0
/* event manager callback when reads are ready */
static void on_read(void *arg, int success) {
  server_port *sp = arg;

  if (!success) {
    goto error;
  }

  /* loop until accept4 returns EAGAIN, and then re-arm notification */
  for (;;) {
    struct sockaddr_storage addr;
    socklen_t addrlen = sizeof(addr);
    char *addr_str;
    char *name;
    /* Note: If we ever decide to return this address to the user, remember to
             strip off the ::ffff:0.0.0.0/96 prefix first. */
    int fd = grpc_accept4(sp->fd, (struct sockaddr *)&addr, &addrlen, 1, 1);
    if (fd < 0) {
      switch (errno) {
        case EINTR:
          continue;
        case EAGAIN:
          grpc_fd_notify_on_read(sp->emfd, &sp->read_closure);
          return;
        default:
          gpr_log(GPR_ERROR, "Failed accept4: %s", strerror(errno));
          goto error;
      }
    }

    grpc_set_socket_no_sigpipe_if_possible(fd);

    grpc_sockaddr_to_string(&addr_str, (struct sockaddr *)&addr, 1);
    gpr_asprintf(&name, "tcp-server-connection:%s", addr_str);

    sp->server->cb(sp->server->cb_arg,
                   grpc_tcp_create(grpc_fd_create(fd, name),
                                   GRPC_TCP_DEFAULT_READ_SLICE_SIZE));

    gpr_free(addr_str);
    gpr_free(name);
  }

  abort();

error:
  gpr_mu_lock(&sp->server->mu);
  if (0 == --sp->server->active_ports) {
    gpr_cv_broadcast(&sp->server->cv);
  }
  gpr_mu_unlock(&sp->server->mu);
}
Esempio n. 17
0
/* Write to a socket using the grpc_tcp API, then drain it directly.
   Note that if the write does not complete immediately we need to drain the
   socket in parallel with the read. */
static void write_test(size_t num_bytes, size_t slice_size) {
  int sv[2];
  grpc_endpoint *ep;
  struct write_socket_state state;
  size_t num_blocks;
  gpr_slice *slices;
  gpr_uint8 current_data = 0;
  gpr_slice_buffer outgoing;
  grpc_closure write_done_closure;
  gpr_timespec deadline = GRPC_TIMEOUT_SECONDS_TO_DEADLINE(20);
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;

  gpr_log(GPR_INFO, "Start write test with %d bytes, slice size %d", num_bytes,
          slice_size);

  create_sockets(sv);

  ep = grpc_tcp_create(grpc_fd_create(sv[1], "write_test"),
                       GRPC_TCP_DEFAULT_READ_SLICE_SIZE, "test");
  grpc_endpoint_add_to_pollset(&exec_ctx, ep, &g_pollset);

  state.ep = ep;
  state.write_done = 0;

  slices = allocate_blocks(num_bytes, slice_size, &num_blocks, &current_data);

  gpr_slice_buffer_init(&outgoing);
  gpr_slice_buffer_addn(&outgoing, slices, num_blocks);
  grpc_closure_init(&write_done_closure, write_done, &state);

  grpc_endpoint_write(&exec_ctx, ep, &outgoing, &write_done_closure);
  drain_socket_blocking(sv[0], num_bytes, num_bytes);
  gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
  for (;;) {
    grpc_pollset_worker worker;
    if (state.write_done) {
      break;
    }
    grpc_pollset_work(&exec_ctx, &g_pollset, &worker,
                      gpr_now(GPR_CLOCK_MONOTONIC), deadline);
    gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
    grpc_exec_ctx_finish(&exec_ctx);
    gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
  }
  gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));

  gpr_slice_buffer_destroy(&outgoing);
  grpc_endpoint_destroy(&exec_ctx, ep);
  gpr_free(slices);
  grpc_exec_ctx_finish(&exec_ctx);
}
Esempio n. 18
0
/* Write to a socket until it fills up, then read from it using the grpc_tcp
   API. */
static void large_read_test(size_t slice_size) {
  int sv[2];
  grpc_endpoint *ep;
  struct read_socket_state state;
  ssize_t written_bytes;
  gpr_timespec deadline = grpc_timeout_seconds_to_deadline(20);
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;

  gpr_log(GPR_INFO, "Start large read test, slice size %" PRIuPTR, slice_size);

  create_sockets(sv);

  grpc_resource_quota *resource_quota =
      grpc_resource_quota_create("large_read_test");
  ep = grpc_tcp_create(grpc_fd_create(sv[1], "large_read_test"), resource_quota,
                       slice_size, "test");
  grpc_resource_quota_unref_internal(&exec_ctx, resource_quota);
  grpc_endpoint_add_to_pollset(&exec_ctx, ep, g_pollset);

  written_bytes = fill_socket(sv[0]);
  gpr_log(GPR_INFO, "Wrote %" PRIuPTR " bytes", written_bytes);

  state.ep = ep;
  state.read_bytes = 0;
  state.target_read_bytes = (size_t)written_bytes;
  grpc_slice_buffer_init(&state.incoming);
  grpc_closure_init(&state.read_cb, read_cb, &state, grpc_schedule_on_exec_ctx);

  grpc_endpoint_read(&exec_ctx, ep, &state.incoming, &state.read_cb);

  gpr_mu_lock(g_mu);
  while (state.read_bytes < state.target_read_bytes) {
    grpc_pollset_worker *worker = NULL;
    GPR_ASSERT(GRPC_LOG_IF_ERROR(
        "pollset_work",
        grpc_pollset_work(&exec_ctx, g_pollset, &worker,
                          gpr_now(GPR_CLOCK_MONOTONIC), deadline)));
    gpr_mu_unlock(g_mu);
    grpc_exec_ctx_finish(&exec_ctx);
    gpr_mu_lock(g_mu);
  }
  GPR_ASSERT(state.read_bytes == state.target_read_bytes);
  gpr_mu_unlock(g_mu);

  grpc_slice_buffer_destroy_internal(&exec_ctx, &state.incoming);
  grpc_endpoint_destroy(&exec_ctx, ep);
  grpc_exec_ctx_finish(&exec_ctx);
}
Esempio n. 19
0
/* Insert count new listeners after listener. Every new listener will have the
   same listen address as listener (SO_REUSEPORT must be enabled). Every new
   listener is a sibling of listener. */
static grpc_error *clone_port(grpc_tcp_listener *listener, unsigned count) {
  grpc_tcp_listener *sp = NULL;
  char *addr_str;
  char *name;
  grpc_error *err;

  for (grpc_tcp_listener *l = listener->next; l && l->is_sibling; l = l->next) {
    l->fd_index += count;
  }

  for (unsigned i = 0; i < count; i++) {
    int fd = -1;
    int port = -1;
    grpc_dualstack_mode dsmode;
    err = grpc_create_dualstack_socket(&listener->addr, SOCK_STREAM, 0, &dsmode,
                                       &fd);
    if (err != GRPC_ERROR_NONE) return err;
    err = prepare_socket(fd, &listener->addr, true, &port);
    if (err != GRPC_ERROR_NONE) return err;
    listener->server->nports++;
    grpc_sockaddr_to_string(&addr_str, &listener->addr, 1);
    gpr_asprintf(&name, "tcp-server-listener:%s/clone-%d", addr_str, i);
    sp = gpr_malloc(sizeof(grpc_tcp_listener));
    sp->next = listener->next;
    listener->next = sp;
    /* sp (the new listener) is a sibling of 'listener' (the original
       listener). */
    sp->is_sibling = 1;
    sp->sibling = listener->sibling;
    listener->sibling = sp;
    sp->server = listener->server;
    sp->fd = fd;
    sp->emfd = grpc_fd_create(fd, name);
    memcpy(&sp->addr, &listener->addr, sizeof(grpc_resolved_address));
    sp->port = port;
    sp->port_index = listener->port_index;
    sp->fd_index = listener->fd_index + count - i;
    GPR_ASSERT(sp->emfd);
    while (listener->server->tail->next != NULL) {
      listener->server->tail = listener->server->tail->next;
    }
    gpr_free(addr_str);
    gpr_free(name);
  }

  return GRPC_ERROR_NONE;
}
Esempio n. 20
0
/* Write to a socket until it fills up, then read from it using the grpc_tcp
   API. */
static void large_read_test(ssize_t slice_size) {
  int sv[2];
  grpc_endpoint *ep;
  struct read_socket_state state;
  ssize_t written_bytes;
  gpr_timespec deadline = GRPC_TIMEOUT_SECONDS_TO_DEADLINE(20);

  gpr_log(GPR_INFO, "Start large read test, slice size %d", slice_size);

  create_sockets(sv);

  ep = grpc_tcp_create(grpc_fd_create(sv[1], "large_read_test"), slice_size,
                       "test");
  grpc_endpoint_add_to_pollset(ep, &g_pollset);

  written_bytes = fill_socket(sv[0]);
  gpr_log(GPR_INFO, "Wrote %d bytes", written_bytes);

  state.ep = ep;
  state.read_bytes = 0;
  state.target_read_bytes = written_bytes;
  gpr_slice_buffer_init(&state.incoming);
  grpc_iomgr_closure_init(&state.read_cb, read_cb, &state);

  switch (grpc_endpoint_read(ep, &state.incoming, &state.read_cb)) {
    case GRPC_ENDPOINT_DONE:
      read_cb(&state, 1);
      break;
    case GRPC_ENDPOINT_ERROR:
      read_cb(&state, 0);
      break;
    case GRPC_ENDPOINT_PENDING:
      break;
  }

  gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
  while (state.read_bytes < state.target_read_bytes) {
    grpc_pollset_worker worker;
    grpc_pollset_work(&g_pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC),
                      deadline);
  }
  GPR_ASSERT(state.read_bytes == state.target_read_bytes);
  gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));

  gpr_slice_buffer_destroy(&state.incoming);
  grpc_endpoint_destroy(ep);
}
Esempio n. 21
0
static grpc_error *add_socket_to_server(grpc_tcp_server *s, int fd,
                                        const struct sockaddr *addr,
                                        size_t addr_len, unsigned port_index,
                                        unsigned fd_index,
                                        grpc_tcp_listener **listener) {
  grpc_tcp_listener *sp = NULL;
  int port = -1;
  char *addr_str;
  char *name;

  grpc_error *err = prepare_socket(fd, addr, addr_len, s->so_reuseport, &port);
  if (err == GRPC_ERROR_NONE) {
    GPR_ASSERT(port > 0);
    grpc_sockaddr_to_string(&addr_str, (struct sockaddr *)&addr, 1);
    gpr_asprintf(&name, "tcp-server-listener:%s", addr_str);
    gpr_mu_lock(&s->mu);
    s->nports++;
    GPR_ASSERT(!s->on_accept_cb && "must add ports before starting server");
    sp = gpr_malloc(sizeof(grpc_tcp_listener));
    sp->next = NULL;
    if (s->head == NULL) {
      s->head = sp;
    } else {
      s->tail->next = sp;
    }
    s->tail = sp;
    sp->server = s;
    sp->fd = fd;
    sp->emfd = grpc_fd_create(fd, name);
    memcpy(sp->addr.untyped, addr, addr_len);
    sp->addr_len = addr_len;
    sp->port = port;
    sp->port_index = port_index;
    sp->fd_index = fd_index;
    sp->is_sibling = 0;
    sp->sibling = NULL;
    GPR_ASSERT(sp->emfd);
    gpr_mu_unlock(&s->mu);
    gpr_free(addr_str);
    gpr_free(name);
  }

  *listener = sp;
  return err;
}
Esempio n. 22
0
static void test_threading(void) {
  threading_shared shared;
  shared.pollset = gpr_zalloc(grpc_pollset_size());
  grpc_pollset_init(shared.pollset, &shared.mu);

  gpr_thd_id thds[10];
  for (size_t i = 0; i < GPR_ARRAY_SIZE(thds); i++) {
    gpr_thd_options opt = gpr_thd_options_default();
    gpr_thd_options_set_joinable(&opt);
    gpr_thd_new(&thds[i], test_threading_loop, &shared, &opt);
  }
  grpc_wakeup_fd fd;
  GPR_ASSERT(GRPC_LOG_IF_ERROR("wakeup_fd_init", grpc_wakeup_fd_init(&fd)));
  shared.wakeup_fd = &fd;
  shared.wakeup_desc = grpc_fd_create(fd.read_fd, "wakeup");
  shared.wakeups = 0;
  {
    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
    grpc_pollset_add_fd(&exec_ctx, shared.pollset, shared.wakeup_desc);
    grpc_fd_notify_on_read(
        &exec_ctx, shared.wakeup_desc,
        GRPC_CLOSURE_INIT(&shared.on_wakeup, test_threading_wakeup, &shared,
                          grpc_schedule_on_exec_ctx));
    grpc_exec_ctx_finish(&exec_ctx);
  }
  GPR_ASSERT(GRPC_LOG_IF_ERROR("wakeup_first",
                               grpc_wakeup_fd_wakeup(shared.wakeup_fd)));
  for (size_t i = 0; i < GPR_ARRAY_SIZE(thds); i++) {
    gpr_thd_join(thds[i]);
  }
  fd.read_fd = 0;
  grpc_wakeup_fd_destroy(&fd);
  {
    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
    grpc_fd_shutdown(&exec_ctx, shared.wakeup_desc, GRPC_ERROR_CANCELLED);
    grpc_fd_orphan(&exec_ctx, shared.wakeup_desc, NULL, NULL,
                   false /* already_closed */, "done");
    grpc_pollset_shutdown(&exec_ctx, shared.pollset,
                          GRPC_CLOSURE_CREATE(destroy_pollset, shared.pollset,
                                              grpc_schedule_on_exec_ctx));
    grpc_exec_ctx_finish(&exec_ctx);
  }
  gpr_free(shared.pollset);
}
Esempio n. 23
0
/* Write to a socket until it fills up, then read from it using the grpc_tcp
   API. */
static void large_read_test(size_t slice_size) {
  int sv[2];
  grpc_endpoint *ep;
  struct read_socket_state state;
  ssize_t written_bytes;
  gpr_timespec deadline = GRPC_TIMEOUT_SECONDS_TO_DEADLINE(20);
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;

  gpr_log(GPR_INFO, "Start large read test, slice size %d", slice_size);

  create_sockets(sv);

  ep = grpc_tcp_create(grpc_fd_create(sv[1], "large_read_test"), slice_size,
                       "test");
  grpc_endpoint_add_to_pollset(&exec_ctx, ep, &g_pollset);

  written_bytes = fill_socket(sv[0]);
  gpr_log(GPR_INFO, "Wrote %d bytes", written_bytes);

  state.ep = ep;
  state.read_bytes = 0;
  state.target_read_bytes = (size_t)written_bytes;
  gpr_slice_buffer_init(&state.incoming);
  grpc_closure_init(&state.read_cb, read_cb, &state);

  grpc_endpoint_read(&exec_ctx, ep, &state.incoming, &state.read_cb);

  gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
  while (state.read_bytes < state.target_read_bytes) {
    grpc_pollset_worker worker;
    grpc_pollset_work(&exec_ctx, &g_pollset, &worker,
                      gpr_now(GPR_CLOCK_MONOTONIC), deadline);
    gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
    grpc_exec_ctx_finish(&exec_ctx);
    gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
  }
  GPR_ASSERT(state.read_bytes == state.target_read_bytes);
  gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));

  gpr_slice_buffer_destroy(&state.incoming);
  grpc_endpoint_destroy(&exec_ctx, ep);
  grpc_exec_ctx_finish(&exec_ctx);
}
Esempio n. 24
0
grpc_error *grpc_workqueue_create(grpc_exec_ctx *exec_ctx,
                                  grpc_workqueue **workqueue) {
  char name[32];
  *workqueue = gpr_malloc(sizeof(grpc_workqueue));
  gpr_ref_init(&(*workqueue)->refs, 1);
  gpr_atm_no_barrier_store(&(*workqueue)->state, 1);
  grpc_error *err = grpc_wakeup_fd_init(&(*workqueue)->wakeup_fd);
  if (err != GRPC_ERROR_NONE) {
    gpr_free(*workqueue);
    return err;
  }
  sprintf(name, "workqueue:%p", (void *)(*workqueue));
  (*workqueue)->wakeup_read_fd = grpc_fd_create(
      GRPC_WAKEUP_FD_GET_READ_FD(&(*workqueue)->wakeup_fd), name);
  gpr_mpscq_init(&(*workqueue)->queue);
  grpc_closure_init(&(*workqueue)->read_closure, on_readable, *workqueue);
  grpc_fd_notify_on_read(exec_ctx, (*workqueue)->wakeup_read_fd,
                         &(*workqueue)->read_closure);
  return GRPC_ERROR_NONE;
}
Esempio n. 25
0
/* Start a test server, return the TCP listening port bound to listen_fd.
   listen_cb() is registered to be interested in reading from listen_fd.
   When connection request arrives, listen_cb() is called to accept the
   connection request. */
static int server_start(server *sv) {
  int port = 0;
  int fd;
  struct sockaddr_in sin;
  socklen_t addr_len;

  create_test_socket(port, &fd, &sin);
  addr_len = sizeof(sin);
  GPR_ASSERT(bind(fd, (struct sockaddr *)&sin, addr_len) == 0);
  GPR_ASSERT(getsockname(fd, (struct sockaddr *)&sin, &addr_len) == 0);
  port = ntohs(sin.sin_port);
  GPR_ASSERT(listen(fd, MAX_NUM_FD) == 0);

  sv->em_fd = grpc_fd_create(fd, "server");
  /* Register to be interested in reading from listen_fd. */
  sv->listen_closure.cb = listen_cb;
  sv->listen_closure.cb_arg = sv;
  grpc_fd_notify_on_read(sv->em_fd, &sv->listen_closure);

  return port;
}
Esempio n. 26
0
static int add_socket_to_server(grpc_udp_server *s, int fd,
                                const grpc_resolved_address *addr,
                                grpc_udp_server_read_cb read_cb,
                                grpc_udp_server_write_cb write_cb,
                                grpc_udp_server_orphan_cb orphan_cb) {
  grpc_udp_listener *sp;
  int port;
  char *addr_str;
  char *name;

  port = prepare_socket(s->socket_factory, fd, addr);
  if (port >= 0) {
    grpc_sockaddr_to_string(&addr_str, addr, 1);
    gpr_asprintf(&name, "udp-server-listener:%s", addr_str);
    gpr_free(addr_str);
    gpr_mu_lock(&s->mu);
    s->nports++;
    sp = gpr_malloc(sizeof(grpc_udp_listener));
    sp->next = NULL;
    if (s->head == NULL) {
      s->head = sp;
    } else {
      s->tail->next = sp;
    }
    s->tail = sp;
    sp->server = s;
    sp->fd = fd;
    sp->emfd = grpc_fd_create(fd, name);
    memcpy(&sp->addr, addr, sizeof(grpc_resolved_address));
    sp->read_cb = read_cb;
    sp->write_cb = write_cb;
    sp->orphan_cb = orphan_cb;
    sp->orphan_notified = false;
    GPR_ASSERT(sp->emfd);
    gpr_mu_unlock(&s->mu);
    gpr_free(name);
  }

  return port;
}
Esempio n. 27
0
/* Start a test server, return the TCP listening port bound to listen_fd.
   listen_cb() is registered to be interested in reading from listen_fd.
   When connection request arrives, listen_cb() is called to accept the
   connection request. */
static int server_start(grpc_exec_ctx *exec_ctx, server *sv) {
  int port = 0;
  int fd;
  struct sockaddr_in sin;
  socklen_t addr_len;

  create_test_socket(port, &fd, &sin);
  addr_len = sizeof(sin);
  GPR_ASSERT(bind(fd, (struct sockaddr *)&sin, addr_len) == 0);
  GPR_ASSERT(getsockname(fd, (struct sockaddr *)&sin, &addr_len) == 0);
  port = ntohs(sin.sin_port);
  GPR_ASSERT(listen(fd, MAX_NUM_FD) == 0);

  sv->em_fd = grpc_fd_create(fd, "server");
  grpc_pollset_add_fd(exec_ctx, g_pollset, sv->em_fd);
  /* Register to be interested in reading from listen_fd. */
  GRPC_CLOSURE_INIT(&sv->listen_closure, listen_cb, sv,
                    grpc_schedule_on_exec_ctx);
  grpc_fd_notify_on_read(exec_ctx, sv->em_fd, &sv->listen_closure);

  return port;
}
Esempio n. 28
0
/* num_fds should be an even number */
static void test_fd_init(test_fd *tfds, int *fds, int num_fds) {
  int i;
  int r;

  /* Create some dummy file descriptors. Currently using pipe file descriptors
   * for this test but we could use any other type of file descriptors. Also,
   * since pipe() used in this test creates two fds in each call, num_fds should
   * be an even number */
  GPR_ASSERT((num_fds % 2) == 0);
  for (i = 0; i < num_fds; i = i + 2) {
    r = pipe(fds + i);
    if (r != 0) {
      gpr_log(GPR_ERROR, "Error in creating pipe. %d (%s)", errno,
              strerror(errno));
      return;
    }
  }

  for (i = 0; i < num_fds; i++) {
    tfds[i].inner_fd = fds[i];
    tfds[i].fd = grpc_fd_create(fds[i], "test_fd");
  }
}
Esempio n. 29
0
void grpc_tcp_client_connect(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
                             grpc_endpoint **ep,
                             grpc_pollset_set *interested_parties,
                             const struct sockaddr *addr, size_t addr_len,
                             gpr_timespec deadline) {
  int fd;
  grpc_dualstack_mode dsmode;
  int err;
  async_connect *ac;
  struct sockaddr_in6 addr6_v4mapped;
  struct sockaddr_in addr4_copy;
  grpc_fd *fdobj;
  char *name;
  char *addr_str;

  *ep = NULL;

  /* Use dualstack sockets where available. */
  if (grpc_sockaddr_to_v4mapped(addr, &addr6_v4mapped)) {
    addr = (const struct sockaddr *)&addr6_v4mapped;
    addr_len = sizeof(addr6_v4mapped);
  }

  fd = grpc_create_dualstack_socket(addr, SOCK_STREAM, 0, &dsmode);
  if (fd < 0) {
    gpr_log(GPR_ERROR, "Unable to create socket: %s", strerror(errno));
  }
  if (dsmode == GRPC_DSMODE_IPV4) {
    /* If we got an AF_INET socket, map the address back to IPv4. */
    GPR_ASSERT(grpc_sockaddr_is_v4mapped(addr, &addr4_copy));
    addr = (struct sockaddr *)&addr4_copy;
    addr_len = sizeof(addr4_copy);
  }
  if (!prepare_socket(addr, fd)) {
    grpc_exec_ctx_enqueue(exec_ctx, closure, 0);
    return;
  }

  do {
    GPR_ASSERT(addr_len < ~(socklen_t)0);
    err = connect(fd, addr, (socklen_t)addr_len);
  } while (err < 0 && errno == EINTR);

  addr_str = grpc_sockaddr_to_uri(addr);
  gpr_asprintf(&name, "tcp-client:%s", addr_str);

  fdobj = grpc_fd_create(fd, name);

  if (err >= 0) {
    *ep = grpc_tcp_create(fdobj, GRPC_TCP_DEFAULT_READ_SLICE_SIZE, addr_str);
    grpc_exec_ctx_enqueue(exec_ctx, closure, 1);
    goto done;
  }

  if (errno != EWOULDBLOCK && errno != EINPROGRESS) {
    gpr_log(GPR_ERROR, "connect error to '%s': %s", addr_str, strerror(errno));
    grpc_fd_orphan(exec_ctx, fdobj, NULL, "tcp_client_connect_error");
    grpc_exec_ctx_enqueue(exec_ctx, closure, 0);
    goto done;
  }

  grpc_pollset_set_add_fd(exec_ctx, interested_parties, fdobj);

  ac = gpr_malloc(sizeof(async_connect));
  ac->closure = closure;
  ac->ep = ep;
  ac->fd = fdobj;
  ac->interested_parties = interested_parties;
  ac->addr_str = addr_str;
  addr_str = NULL;
  gpr_mu_init(&ac->mu);
  ac->refs = 2;
  ac->write_closure.cb = on_writable;
  ac->write_closure.cb_arg = ac;

  if (grpc_tcp_trace) {
    gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %s: asynchronously connecting",
            ac->addr_str);
  }

  gpr_mu_lock(&ac->mu);
  grpc_alarm_init(exec_ctx, &ac->alarm,
                  gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC),
                  tc_on_alarm, ac, gpr_now(GPR_CLOCK_MONOTONIC));
  grpc_fd_notify_on_write(exec_ctx, ac->fd, &ac->write_closure);
  gpr_mu_unlock(&ac->mu);

done:
  gpr_free(name);
  gpr_free(addr_str);
}
Esempio n. 30
0
/* event manager callback when reads are ready */
static void on_read(grpc_exec_ctx *exec_ctx, void *arg, int success) {
    grpc_tcp_listener *sp = arg;
    grpc_fd *fdobj;
    size_t i;

    if (!success) {
        goto error;
    }

    /* loop until accept4 returns EAGAIN, and then re-arm notification */
    for (;;) {
        struct sockaddr_storage addr;
        socklen_t addrlen = sizeof(addr);
        char *addr_str;
        char *name;
        /* Note: If we ever decide to return this address to the user, remember to
           strip off the ::ffff:0.0.0.0/96 prefix first. */
        int fd = grpc_accept4(sp->fd, (struct sockaddr *)&addr, &addrlen, 1, 1);
        if (fd < 0) {
            switch (errno) {
            case EINTR:
                continue;
            case EAGAIN:
                grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure);
                return;
            default:
                gpr_log(GPR_ERROR, "Failed accept4: %s", strerror(errno));
                goto error;
            }
        }

        grpc_set_socket_no_sigpipe_if_possible(fd);

        addr_str = grpc_sockaddr_to_uri((struct sockaddr *)&addr);
        gpr_asprintf(&name, "tcp-server-connection:%s", addr_str);

        if (grpc_tcp_trace) {
            gpr_log(GPR_DEBUG, "SERVER_CONNECT: incoming connection: %s", addr_str);
        }

        fdobj = grpc_fd_create(fd, name);
        /* TODO(ctiller): revise this when we have server-side sharding
           of channels -- we certainly should not be automatically adding every
           incoming channel to every pollset owned by the server */
        for (i = 0; i < sp->server->pollset_count; i++) {
            grpc_pollset_add_fd(exec_ctx, sp->server->pollsets[i], fdobj);
        }
        sp->server->on_accept_cb(
            exec_ctx, sp->server->on_accept_cb_arg,
            grpc_tcp_create(fdobj, GRPC_TCP_DEFAULT_READ_SLICE_SIZE, addr_str));

        gpr_free(name);
        gpr_free(addr_str);
    }

    GPR_UNREACHABLE_CODE(return );

error:
    gpr_mu_lock(&sp->server->mu);
    if (0 == --sp->server->active_ports) {
        gpr_mu_unlock(&sp->server->mu);
        deactivated_all_ports(exec_ctx, sp->server);
    } else {
        gpr_mu_unlock(&sp->server->mu);
    }
}