Esempio n. 1
0
static void read_and_write_test_write_handler(grpc_exec_ctx *exec_ctx,
                                              void *data, bool success) {
  struct read_and_write_test_state *state = data;
  gpr_slice *slices = NULL;
  size_t nslices;

  if (success) {
    state->bytes_written += state->current_write_size;
    if (state->target_bytes - state->bytes_written <
        state->current_write_size) {
      state->current_write_size = state->target_bytes - state->bytes_written;
    }
    if (state->current_write_size != 0) {
      slices = allocate_blocks(state->current_write_size, 8192, &nslices,
                               &state->current_write_data);
      gpr_slice_buffer_reset_and_unref(&state->outgoing);
      gpr_slice_buffer_addn(&state->outgoing, slices, nslices);
      grpc_endpoint_write(exec_ctx, state->write_ep, &state->outgoing,
                          &state->done_write);
      free(slices);
      return;
    }
  }

  gpr_log(GPR_INFO, "Write handler done");
  gpr_mu_lock(GRPC_POLLSET_MU(g_pollset));
  state->write_done = 1 + success;
  grpc_pollset_kick(g_pollset, NULL);
  gpr_mu_unlock(GRPC_POLLSET_MU(g_pollset));
}
Esempio n. 2
0
void grpc_chttp2_transport_start_reading(grpc_transport *transport,
                                         gpr_slice *slices, size_t nslices) {
  grpc_chttp2_transport *t = (grpc_chttp2_transport *)transport;
  REF_TRANSPORT(t, "recv_data"); /* matches unref inside recv_data */
  gpr_slice_buffer_addn(&t->read_buffer, slices, nslices);
  recv_data(t, 1);
}
Esempio n. 3
0
/* Write to a socket using the grpc_tcp API, then drain it directly.
   Note that if the write does not complete immediately we need to drain the
   socket in parallel with the read. */
static void write_test(ssize_t num_bytes, ssize_t slice_size) {
  int sv[2];
  grpc_endpoint *ep;
  struct write_socket_state state;
  ssize_t read_bytes;
  size_t num_blocks;
  gpr_slice *slices;
  int current_data = 0;
  gpr_slice_buffer outgoing;
  grpc_iomgr_closure write_done_closure;
  gpr_timespec deadline = GRPC_TIMEOUT_SECONDS_TO_DEADLINE(20);

  gpr_log(GPR_INFO, "Start write test with %d bytes, slice size %d", num_bytes,
          slice_size);

  create_sockets(sv);

  ep = grpc_tcp_create(grpc_fd_create(sv[1], "write_test"),
                       GRPC_TCP_DEFAULT_READ_SLICE_SIZE, "test");
  grpc_endpoint_add_to_pollset(ep, &g_pollset);

  state.ep = ep;
  state.write_done = 0;

  slices = allocate_blocks(num_bytes, slice_size, &num_blocks, &current_data);

  gpr_slice_buffer_init(&outgoing);
  gpr_slice_buffer_addn(&outgoing, slices, num_blocks);
  grpc_iomgr_closure_init(&write_done_closure, write_done, &state);

  switch (grpc_endpoint_write(ep, &outgoing, &write_done_closure)) {
    case GRPC_ENDPOINT_DONE:
      /* Write completed immediately */
      read_bytes = drain_socket(sv[0]);
      GPR_ASSERT(read_bytes == num_bytes);
      break;
    case GRPC_ENDPOINT_PENDING:
      drain_socket_blocking(sv[0], num_bytes, num_bytes);
      gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
      for (;;) {
        grpc_pollset_worker worker;
        if (state.write_done) {
          break;
        }
        grpc_pollset_work(&g_pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC),
                          deadline);
      }
      gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
      break;
    case GRPC_ENDPOINT_ERROR:
      gpr_log(GPR_ERROR, "endpoint got error");
      abort();
  }

  gpr_slice_buffer_destroy(&outgoing);
  grpc_endpoint_destroy(ep);
  gpr_free(slices);
}
Esempio n. 4
0
/* Write to a socket using the grpc_tcp API, then drain it directly.
   Note that if the write does not complete immediately we need to drain the
   socket in parallel with the read. */
static void write_test(size_t num_bytes, size_t slice_size) {
  int sv[2];
  grpc_endpoint *ep;
  struct write_socket_state state;
  size_t num_blocks;
  gpr_slice *slices;
  gpr_uint8 current_data = 0;
  gpr_slice_buffer outgoing;
  grpc_closure write_done_closure;
  gpr_timespec deadline = GRPC_TIMEOUT_SECONDS_TO_DEADLINE(20);
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;

  gpr_log(GPR_INFO, "Start write test with %d bytes, slice size %d", num_bytes,
          slice_size);

  create_sockets(sv);

  ep = grpc_tcp_create(grpc_fd_create(sv[1], "write_test"),
                       GRPC_TCP_DEFAULT_READ_SLICE_SIZE, "test");
  grpc_endpoint_add_to_pollset(&exec_ctx, ep, &g_pollset);

  state.ep = ep;
  state.write_done = 0;

  slices = allocate_blocks(num_bytes, slice_size, &num_blocks, &current_data);

  gpr_slice_buffer_init(&outgoing);
  gpr_slice_buffer_addn(&outgoing, slices, num_blocks);
  grpc_closure_init(&write_done_closure, write_done, &state);

  grpc_endpoint_write(&exec_ctx, ep, &outgoing, &write_done_closure);
  drain_socket_blocking(sv[0], num_bytes, num_bytes);
  gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
  for (;;) {
    grpc_pollset_worker worker;
    if (state.write_done) {
      break;
    }
    grpc_pollset_work(&exec_ctx, &g_pollset, &worker,
                      gpr_now(GPR_CLOCK_MONOTONIC), deadline);
    gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
    grpc_exec_ctx_finish(&exec_ctx);
    gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
  }
  gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));

  gpr_slice_buffer_destroy(&outgoing);
  grpc_endpoint_destroy(&exec_ctx, ep);
  gpr_free(slices);
  grpc_exec_ctx_finish(&exec_ctx);
}
Esempio n. 5
0
void gpr_slice_buffer_move_into(gpr_slice_buffer *src, gpr_slice_buffer *dst) {
  /* anything to move? */
  if (src->count == 0) {
    return;
  }
  /* anything in dst? */
  if (dst->count == 0) {
    gpr_slice_buffer_swap(src, dst);
    return;
  }
  /* both buffers have data - copy, and reset src */
  gpr_slice_buffer_addn(dst, src->slices, src->count);
  src->count = 0;
  src->length = 0;
}
Esempio n. 6
0
static void read_and_write_test_write_handler(void *data, int success) {
  struct read_and_write_test_state *state = data;
  gpr_slice *slices = NULL;
  size_t nslices;
  grpc_endpoint_op_status write_status;

  if (success) {
    for (;;) {
      /* Need to do inline writes until they don't succeed synchronously or we
         finish writing */
      state->bytes_written += state->current_write_size;
      if (state->target_bytes - state->bytes_written <
          state->current_write_size) {
        state->current_write_size = state->target_bytes - state->bytes_written;
      }
      if (state->current_write_size == 0) {
        break;
      }

      slices = allocate_blocks(state->current_write_size, 8192, &nslices,
                               &state->current_write_data);
      gpr_slice_buffer_reset_and_unref(&state->outgoing);
      gpr_slice_buffer_addn(&state->outgoing, slices, nslices);
      write_status = grpc_endpoint_write(state->write_ep, &state->outgoing,
                                         &state->done_write);
      free(slices);
      if (write_status == GRPC_ENDPOINT_PENDING) {
        return;
      } else if (write_status == GRPC_ENDPOINT_ERROR) {
        goto cleanup;
      }
    }
    GPR_ASSERT(state->bytes_written == state->target_bytes);
  }

cleanup:
  gpr_log(GPR_INFO, "Write handler done");
  gpr_mu_lock(GRPC_POLLSET_MU(g_pollset));
  state->write_done = 1 + success;
  grpc_pollset_kick(g_pollset, NULL);
  gpr_mu_unlock(GRPC_POLLSET_MU(g_pollset));
}
Esempio n. 7
0
/* Initiates a write. */
static grpc_endpoint_write_status win_write(grpc_endpoint *ep,
                                            gpr_slice *slices, size_t nslices,
                                            grpc_endpoint_write_cb cb,
                                            void *arg) {
  grpc_tcp *tcp = (grpc_tcp *) ep;
  grpc_winsocket *socket = tcp->socket;
  grpc_winsocket_callback_info *info = &socket->write_info;
  unsigned i;
  DWORD bytes_sent;
  int status;
  WSABUF local_buffers[16];
  WSABUF *allocated = NULL;
  WSABUF *buffers = local_buffers;

  GPR_ASSERT(!tcp->outstanding_write);
  GPR_ASSERT(!tcp->shutting_down);
  tcp_ref(tcp);

  tcp->outstanding_write = 1;
  tcp->write_cb = cb;
  tcp->write_user_data = arg;

  gpr_slice_buffer_addn(&tcp->write_slices, slices, nslices);

  if (tcp->write_slices.count > GPR_ARRAY_SIZE(local_buffers)) {
    buffers = (WSABUF *) gpr_malloc(sizeof(WSABUF) * tcp->write_slices.count);
    allocated = buffers;
  }

  for (i = 0; i < tcp->write_slices.count; i++) {
    buffers[i].len = GPR_SLICE_LENGTH(tcp->write_slices.slices[i]);
    buffers[i].buf = (char *)GPR_SLICE_START_PTR(tcp->write_slices.slices[i]);
  }

  /* First, let's try a synchronous, non-blocking write. */
  status = WSASend(socket->socket, buffers, tcp->write_slices.count,
                   &bytes_sent, 0, NULL, NULL);
  info->wsa_error = status == 0 ? 0 : WSAGetLastError();

  /* We would kind of expect to get a WSAEWOULDBLOCK here, especially on a busy
     connection that has its send queue filled up. But if we don't, then we can
     avoid doing an async write operation at all. */
  if (info->wsa_error != WSAEWOULDBLOCK) {
    grpc_endpoint_write_status ret = GRPC_ENDPOINT_WRITE_ERROR;
    if (status == 0) {
      ret = GRPC_ENDPOINT_WRITE_DONE;
      GPR_ASSERT(bytes_sent == tcp->write_slices.length);
    } else {
      char *utf8_message = gpr_format_message(info->wsa_error);
      gpr_log(GPR_ERROR, "WSASend error: %s", utf8_message);
      gpr_free(utf8_message);
    }
    if (allocated) gpr_free(allocated);
    gpr_slice_buffer_reset_and_unref(&tcp->write_slices);
    tcp->outstanding_write = 0;
    tcp_unref(tcp);
    return ret;
  }

  /* If we got a WSAEWOULDBLOCK earlier, then we need to re-do the same
     operation, this time asynchronously. */
  memset(&socket->write_info.overlapped, 0, sizeof(OVERLAPPED));
  status = WSASend(socket->socket, buffers, tcp->write_slices.count,
                   &bytes_sent, 0, &socket->write_info.overlapped, NULL);
  if (allocated) gpr_free(allocated);

  /* It is possible the operation completed then. But we'd still get an IOCP
     notification. So let's ignore it and wait for the IOCP. */
  if (status != 0) {
    int error = WSAGetLastError();
    if (error != WSA_IO_PENDING) {
      char *utf8_message = gpr_format_message(WSAGetLastError());
      gpr_log(GPR_ERROR, "WSASend error: %s - this means we're going to leak.",
              utf8_message);
      gpr_free(utf8_message);
    /* I'm pretty sure this is a very bad situation there. Hence the log.
       What will happen now is that the socket will neither wait for read
       or write, unless the caller retry, which is unlikely, but I am not
       sure if that's guaranteed. And there might also be a read pending.
       This means that the future orphanage of that socket will be in limbo,
       and we're going to leak it. I have no idea what could cause this
       specific case however, aside from a parameter error from our call.
       Normal read errors would actually happen during the overlapped
       operation, which is the supported way to go for that. */
      tcp->outstanding_write = 0;
      tcp_unref(tcp);
      /* Per the comment above, I'm going to treat that case as a hard failure
         for now, and leave the option to catch that and debug. */
      __debugbreak();
      return GRPC_ENDPOINT_WRITE_ERROR;
    }
  }

  /* As all is now setup, we can now ask for the IOCP notification. It may
     trigger the callback immediately however, but no matter. */
  grpc_socket_notify_on_write(socket, on_write, tcp);
  return GRPC_ENDPOINT_WRITE_PENDING;
}
Esempio n. 8
0
/* Initiates a write. */
static grpc_endpoint_write_status win_write(grpc_endpoint *ep,
                                            gpr_slice *slices, size_t nslices,
                                            grpc_endpoint_write_cb cb,
                                            void *arg) {
  grpc_tcp *tcp = (grpc_tcp *) ep;
  grpc_winsocket *socket = tcp->socket;
  grpc_winsocket_callback_info *info = &socket->write_info;
  unsigned i;
  DWORD bytes_sent;
  int status;
  WSABUF local_buffers[16];
  WSABUF *allocated = NULL;
  WSABUF *buffers = local_buffers;

  GPR_ASSERT(!tcp->socket->write_info.outstanding);
  if (tcp->shutting_down) {
    return GRPC_ENDPOINT_WRITE_ERROR;
  }
  tcp_ref(tcp);

  tcp->socket->write_info.outstanding = 1;
  tcp->write_cb = cb;
  tcp->write_user_data = arg;

  gpr_slice_buffer_addn(&tcp->write_slices, slices, nslices);

  if (tcp->write_slices.count > GPR_ARRAY_SIZE(local_buffers)) {
    buffers = (WSABUF *) gpr_malloc(sizeof(WSABUF) * tcp->write_slices.count);
    allocated = buffers;
  }

  for (i = 0; i < tcp->write_slices.count; i++) {
    buffers[i].len = GPR_SLICE_LENGTH(tcp->write_slices.slices[i]);
    buffers[i].buf = (char *)GPR_SLICE_START_PTR(tcp->write_slices.slices[i]);
  }

  /* First, let's try a synchronous, non-blocking write. */
  status = WSASend(socket->socket, buffers, tcp->write_slices.count,
                   &bytes_sent, 0, NULL, NULL);
  info->wsa_error = status == 0 ? 0 : WSAGetLastError();

  /* We would kind of expect to get a WSAEWOULDBLOCK here, especially on a busy
     connection that has its send queue filled up. But if we don't, then we can
     avoid doing an async write operation at all. */
  if (info->wsa_error != WSAEWOULDBLOCK) {
    grpc_endpoint_write_status ret = GRPC_ENDPOINT_WRITE_ERROR;
    if (status == 0) {
      ret = GRPC_ENDPOINT_WRITE_DONE;
      GPR_ASSERT(bytes_sent == tcp->write_slices.length);
    } else {
      char *utf8_message = gpr_format_message(info->wsa_error);
      gpr_log(GPR_ERROR, "WSASend error: %s", utf8_message);
      gpr_free(utf8_message);
    }
    if (allocated) gpr_free(allocated);
    gpr_slice_buffer_reset_and_unref(&tcp->write_slices);
    tcp->socket->write_info.outstanding = 0;
    tcp_unref(tcp);
    return ret;
  }

  /* If we got a WSAEWOULDBLOCK earlier, then we need to re-do the same
     operation, this time asynchronously. */
  memset(&socket->write_info.overlapped, 0, sizeof(OVERLAPPED));
  status = WSASend(socket->socket, buffers, tcp->write_slices.count,
                   &bytes_sent, 0, &socket->write_info.overlapped, NULL);
  if (allocated) gpr_free(allocated);

  if (status != 0) {
    int wsa_error = WSAGetLastError();
    if (wsa_error != WSA_IO_PENDING) {
      gpr_slice_buffer_reset_and_unref(&tcp->write_slices);
      tcp->socket->write_info.outstanding = 0;
      tcp_unref(tcp);
      return GRPC_ENDPOINT_WRITE_ERROR;
    }
  }

  /* As all is now setup, we can now ask for the IOCP notification. It may
     trigger the callback immediately however, but no matter. */
  grpc_socket_notify_on_write(socket, on_write, tcp);
  return GRPC_ENDPOINT_WRITE_PENDING;
}
Esempio n. 9
0
static void on_handshake_data_received_from_peer(grpc_exec_ctx *exec_ctx,
        void *handshake,
        grpc_error *error) {
    grpc_security_handshake *h = handshake;
    size_t consumed_slice_size = 0;
    tsi_result result = TSI_OK;
    size_t i;
    size_t num_left_overs;
    int has_left_overs_in_current_slice = 0;

    if (error != GRPC_ERROR_NONE) {
        security_handshake_done(
            exec_ctx, h,
            GRPC_ERROR_CREATE_REFERENCING("Handshake read failed", &error, 1));
        return;
    }

    for (i = 0; i < h->incoming.count; i++) {
        consumed_slice_size = GPR_SLICE_LENGTH(h->incoming.slices[i]);
        result = tsi_handshaker_process_bytes_from_peer(
                     h->handshaker, GPR_SLICE_START_PTR(h->incoming.slices[i]),
                     &consumed_slice_size);
        if (!tsi_handshaker_is_in_progress(h->handshaker)) break;
    }

    if (tsi_handshaker_is_in_progress(h->handshaker)) {
        /* We may need more data. */
        if (result == TSI_INCOMPLETE_DATA) {
            grpc_endpoint_read(exec_ctx, h->wrapped_endpoint, &h->incoming,
                               &h->on_handshake_data_received_from_peer);
            return;
        } else {
            send_handshake_bytes_to_peer(exec_ctx, h);
            return;
        }
    }

    if (result != TSI_OK) {
        security_handshake_done(exec_ctx, h,
                                grpc_set_tsi_error_result(
                                    GRPC_ERROR_CREATE("Handshake failed"), result));
        return;
    }

    /* Handshake is done and successful this point. */
    has_left_overs_in_current_slice =
        (consumed_slice_size < GPR_SLICE_LENGTH(h->incoming.slices[i]));
    num_left_overs =
        (has_left_overs_in_current_slice ? 1 : 0) + h->incoming.count - i - 1;
    if (num_left_overs == 0) {
        check_peer(exec_ctx, h);
        return;
    }

    /* Put the leftovers in our buffer (ownership transfered). */
    if (has_left_overs_in_current_slice) {
        gpr_slice_buffer_add(
            &h->left_overs,
            gpr_slice_split_tail(&h->incoming.slices[i], consumed_slice_size));
        gpr_slice_unref(
            h->incoming.slices[i]); /* split_tail above increments refcount. */
    }
    gpr_slice_buffer_addn(
        &h->left_overs, &h->incoming.slices[i + 1],
        num_left_overs - (size_t)has_left_overs_in_current_slice);
    check_peer(exec_ctx, h);
}
Esempio n. 10
0
static void on_handshake_data_received_from_peer(
    void *setup, gpr_slice *slices, size_t nslices,
    grpc_endpoint_cb_status error) {
  grpc_secure_transport_setup *s = setup;
  size_t consumed_slice_size = 0;
  tsi_result result = TSI_OK;
  size_t i;
  size_t num_left_overs;
  int has_left_overs_in_current_slice = 0;

  if (error != GRPC_ENDPOINT_CB_OK) {
    gpr_log(GPR_ERROR, "Read failed.");
    cleanup_slices(slices, nslices);
    secure_transport_setup_done(s, 0);
    return;
  }

  for (i = 0; i < nslices; i++) {
    consumed_slice_size = GPR_SLICE_LENGTH(slices[i]);
    result = tsi_handshaker_process_bytes_from_peer(
        s->handshaker, GPR_SLICE_START_PTR(slices[i]), &consumed_slice_size);
    if (!tsi_handshaker_is_in_progress(s->handshaker)) break;
  }

  if (tsi_handshaker_is_in_progress(s->handshaker)) {
    /* We may need more data. */
    if (result == TSI_INCOMPLETE_DATA) {
      /* TODO(klempner,jboeuf): This should probably use the client setup
         deadline */
      grpc_endpoint_notify_on_read(s->endpoint,
                                   on_handshake_data_received_from_peer, setup);
      cleanup_slices(slices, nslices);
      return;
    } else {
      send_handshake_bytes_to_peer(s);
      cleanup_slices(slices, nslices);
      return;
    }
  }

  if (result != TSI_OK) {
    gpr_log(GPR_ERROR, "Handshake failed with error %s",
            tsi_result_to_string(result));
    cleanup_slices(slices, nslices);
    secure_transport_setup_done(s, 0);
    return;
  }

  /* Handshake is done and successful this point. */
  has_left_overs_in_current_slice =
      (consumed_slice_size < GPR_SLICE_LENGTH(slices[i]));
  num_left_overs = (has_left_overs_in_current_slice ? 1 : 0) + nslices - i - 1;
  if (num_left_overs == 0) {
    cleanup_slices(slices, nslices);
    check_peer(s);
    return;
  }
  cleanup_slices(slices, nslices - num_left_overs);

  /* Put the leftovers in our buffer (ownership transfered). */
  if (has_left_overs_in_current_slice) {
    gpr_slice_buffer_add(&s->left_overs,
                         gpr_slice_split_tail(&slices[i], consumed_slice_size));
    gpr_slice_unref(slices[i]); /* split_tail above increments refcount. */
  }
  gpr_slice_buffer_addn(&s->left_overs, &slices[i + 1],
                        num_left_overs - has_left_overs_in_current_slice);
  check_peer(s);
}