Пример #1
0
/* Asynchronous callback from the IOCP, or the background thread. */
static void on_write(void *tcpp, int from_iocp) {
  grpc_tcp *tcp = (grpc_tcp *) tcpp;
  grpc_winsocket *handle = tcp->socket;
  grpc_winsocket_callback_info *info = &handle->write_info;
  grpc_endpoint_cb_status status = GRPC_ENDPOINT_CB_OK;
  grpc_endpoint_write_cb cb;
  void *opaque = tcp->write_user_data;
  int do_abort = 0;

  gpr_mu_lock(&tcp->mu);
  cb = tcp->write_cb;
  tcp->write_cb = NULL;
  if (!from_iocp || tcp->shutting_down) {
    /* If we are here with from_iocp set to true, it means we got raced to
        shutting down the endpoint. No actual abort callback will happen
        though, so we're going to do it from here. */
    do_abort = 1;
  }
  gpr_mu_unlock(&tcp->mu);

  if (do_abort) {
    if (from_iocp) {
      tcp->socket->write_info.outstanding = 0;
      gpr_slice_buffer_reset_and_unref(&tcp->write_slices);
    }
    tcp_unref(tcp);
    if (cb) cb(opaque, GRPC_ENDPOINT_CB_SHUTDOWN);
    return;
  }

  GPR_ASSERT(tcp->socket->write_info.outstanding);

  if (info->wsa_error != 0) {
    if (info->wsa_error != WSAECONNRESET) {
      char *utf8_message = gpr_format_message(info->wsa_error);
      gpr_log(GPR_ERROR, "WSASend overlapped error: %s", utf8_message);
      gpr_free(utf8_message);
    }
    status = GRPC_ENDPOINT_CB_ERROR;
  } else {
    GPR_ASSERT(info->bytes_transfered == tcp->write_slices.length);
  }

  gpr_slice_buffer_reset_and_unref(&tcp->write_slices);
  tcp->socket->write_info.outstanding = 0;

  tcp_unref(tcp);
  cb(opaque, status);
}
Пример #2
0
static void read_and_write_test_write_handler(grpc_exec_ctx *exec_ctx,
                                              void *data, bool success) {
  struct read_and_write_test_state *state = data;
  gpr_slice *slices = NULL;
  size_t nslices;

  if (success) {
    state->bytes_written += state->current_write_size;
    if (state->target_bytes - state->bytes_written <
        state->current_write_size) {
      state->current_write_size = state->target_bytes - state->bytes_written;
    }
    if (state->current_write_size != 0) {
      slices = allocate_blocks(state->current_write_size, 8192, &nslices,
                               &state->current_write_data);
      gpr_slice_buffer_reset_and_unref(&state->outgoing);
      gpr_slice_buffer_addn(&state->outgoing, slices, nslices);
      grpc_endpoint_write(exec_ctx, state->write_ep, &state->outgoing,
                          &state->done_write);
      free(slices);
      return;
    }
  }

  gpr_log(GPR_INFO, "Write handler done");
  gpr_mu_lock(GRPC_POLLSET_MU(g_pollset));
  state->write_done = 1 + success;
  grpc_pollset_kick(g_pollset, NULL);
  gpr_mu_unlock(GRPC_POLLSET_MU(g_pollset));
}
Пример #3
0
static grpc_endpoint_op_status win_read(grpc_endpoint *ep,
                                        gpr_slice_buffer *read_slices,
                                        grpc_iomgr_closure *cb) {
  grpc_tcp *tcp = (grpc_tcp *)ep;
  grpc_winsocket *handle = tcp->socket;
  grpc_winsocket_callback_info *info = &handle->read_info;
  int status;
  DWORD bytes_read = 0;
  DWORD flags = 0;
  WSABUF buffer;

  GPR_ASSERT(!tcp->socket->read_info.outstanding);
  if (tcp->shutting_down) {
    return GRPC_ENDPOINT_ERROR;
  }

  TCP_REF(tcp, "read");

  tcp->socket->read_info.outstanding = 1;
  tcp->read_cb = cb;
  tcp->read_slices = read_slices;
  gpr_slice_buffer_reset_and_unref(read_slices);

  tcp->read_slice = gpr_slice_malloc(8192);

  buffer.len = GPR_SLICE_LENGTH(tcp->read_slice);
  buffer.buf = (char *)GPR_SLICE_START_PTR(tcp->read_slice);

  /* First let's try a synchronous, non-blocking read. */
  status =
      WSARecv(tcp->socket->socket, &buffer, 1, &bytes_read, &flags, NULL, NULL);
  info->wsa_error = status == 0 ? 0 : WSAGetLastError();

  /* Did we get data immediately ? Yay. */
  if (info->wsa_error != WSAEWOULDBLOCK) {
    int ok;
    info->bytes_transfered = bytes_read;
    ok = on_read(tcp, 1);
    TCP_UNREF(tcp, "read");
    return ok ? GRPC_ENDPOINT_DONE : GRPC_ENDPOINT_ERROR;
  }

  /* Otherwise, let's retry, by queuing a read. */
  memset(&tcp->socket->read_info.overlapped, 0, sizeof(OVERLAPPED));
  status = WSARecv(tcp->socket->socket, &buffer, 1, &bytes_read, &flags,
                   &info->overlapped, NULL);

  if (status != 0) {
    int wsa_error = WSAGetLastError();
    if (wsa_error != WSA_IO_PENDING) {
      int ok;
      info->wsa_error = wsa_error;
      ok = on_read(tcp, 1);
      return ok ? GRPC_ENDPOINT_DONE : GRPC_ENDPOINT_ERROR;
    }
  }

  grpc_socket_notify_on_read(tcp->socket, on_read_cb, tcp);
  return GRPC_ENDPOINT_PENDING;
}
Пример #4
0
static void send_handshake_bytes_to_peer(grpc_exec_ctx *exec_ctx,
        grpc_security_handshake *h) {
    size_t offset = 0;
    tsi_result result = TSI_OK;
    gpr_slice to_send;

    do {
        size_t to_send_size = h->handshake_buffer_size - offset;
        result = tsi_handshaker_get_bytes_to_send_to_peer(
                     h->handshaker, h->handshake_buffer + offset, &to_send_size);
        offset += to_send_size;
        if (result == TSI_INCOMPLETE_DATA) {
            h->handshake_buffer_size *= 2;
            h->handshake_buffer =
                gpr_realloc(h->handshake_buffer, h->handshake_buffer_size);
        }
    } while (result == TSI_INCOMPLETE_DATA);

    if (result != TSI_OK) {
        security_handshake_done(exec_ctx, h,
                                grpc_set_tsi_error_result(
                                    GRPC_ERROR_CREATE("Handshake failed"), result));
        return;
    }

    to_send =
        gpr_slice_from_copied_buffer((const char *)h->handshake_buffer, offset);
    gpr_slice_buffer_reset_and_unref(&h->outgoing);
    gpr_slice_buffer_add(&h->outgoing, to_send);
    /* TODO(klempner,jboeuf): This should probably use the client setup
       deadline */
    grpc_endpoint_write(exec_ctx, h->wrapped_endpoint, &h->outgoing,
                        &h->on_handshake_data_sent_to_peer);
}
Пример #5
0
void grpc_chttp2_cleanup_writing(
    grpc_exec_ctx *exec_ctx, grpc_chttp2_transport_global *transport_global,
    grpc_chttp2_transport_writing *transport_writing) {
  grpc_chttp2_stream_writing *stream_writing;
  grpc_chttp2_stream_global *stream_global;

  while (grpc_chttp2_list_pop_written_stream(
      transport_global, transport_writing, &stream_global, &stream_writing)) {
    GPR_ASSERT(stream_global->writing_now != 0);
    if (stream_writing->send_closed != GRPC_DONT_SEND_CLOSED) {
      stream_global->write_state = GRPC_WRITE_STATE_SENT_CLOSE;
      if (!transport_global->is_client) {
        stream_global->read_closed = 1;
      }
    }
    if (stream_global->writing_now & GRPC_CHTTP2_WRITING_DATA) {
      if (stream_global->outgoing_sopb != NULL &&
          stream_global->outgoing_sopb->nops == 0) {
        GPR_ASSERT(stream_global->write_state != GRPC_WRITE_STATE_QUEUED_CLOSE);
        stream_global->outgoing_sopb = NULL;
        grpc_exec_ctx_enqueue(exec_ctx, stream_global->send_done_closure, 1);
      }
    }
    stream_global->writing_now = 0;
    grpc_chttp2_list_add_read_write_state_changed(transport_global,
                                                  stream_global);
  }
  gpr_slice_buffer_reset_and_unref(&transport_writing->outbuf);
}
Пример #6
0
static void handle_write(grpc_exec_ctx *exec_ctx) {
  gpr_slice slice = gpr_slice_from_copied_buffer(state.response_payload,
                                                 state.response_payload_length);

  gpr_slice_buffer_reset_and_unref(&state.outgoing_buffer);
  gpr_slice_buffer_add(&state.outgoing_buffer, slice);
  grpc_endpoint_write(exec_ctx, state.tcp, &state.outgoing_buffer, &on_write);
}
Пример #7
0
static void win_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
                     gpr_slice_buffer *read_slices, grpc_closure *cb) {
  grpc_tcp *tcp = (grpc_tcp *)ep;
  grpc_winsocket *handle = tcp->socket;
  grpc_winsocket_callback_info *info = &handle->read_info;
  int status;
  DWORD bytes_read = 0;
  DWORD flags = 0;
  WSABUF buffer;

  if (tcp->shutting_down) {
    grpc_exec_ctx_sched(exec_ctx, cb,
                        GRPC_ERROR_CREATE("TCP socket is shutting down"), NULL);
    return;
  }

  tcp->read_cb = cb;
  tcp->read_slices = read_slices;
  gpr_slice_buffer_reset_and_unref(read_slices);

  tcp->read_slice = gpr_slice_malloc(8192);

  buffer.len = (ULONG)GPR_SLICE_LENGTH(
      tcp->read_slice);  // we know slice size fits in 32bit.
  buffer.buf = (char *)GPR_SLICE_START_PTR(tcp->read_slice);

  TCP_REF(tcp, "read");

  /* First let's try a synchronous, non-blocking read. */
  status =
      WSARecv(tcp->socket->socket, &buffer, 1, &bytes_read, &flags, NULL, NULL);
  info->wsa_error = status == 0 ? 0 : WSAGetLastError();

  /* Did we get data immediately ? Yay. */
  if (info->wsa_error != WSAEWOULDBLOCK) {
    info->bytes_transfered = bytes_read;
    grpc_exec_ctx_sched(exec_ctx, &tcp->on_read, GRPC_ERROR_NONE, NULL);
    return;
  }

  /* Otherwise, let's retry, by queuing a read. */
  memset(&tcp->socket->read_info.overlapped, 0, sizeof(OVERLAPPED));
  status = WSARecv(tcp->socket->socket, &buffer, 1, &bytes_read, &flags,
                   &info->overlapped, NULL);

  if (status != 0) {
    int wsa_error = WSAGetLastError();
    if (wsa_error != WSA_IO_PENDING) {
      info->wsa_error = wsa_error;
      grpc_exec_ctx_sched(exec_ctx, &tcp->on_read,
                          GRPC_WSA_ERROR(info->wsa_error, "WSARecv"), NULL);
      return;
    }
  }

  grpc_socket_notify_on_read(exec_ctx, tcp->socket, &tcp->on_read);
}
Пример #8
0
/* tcp read callback */
static void recv_data(grpc_exec_ctx *exec_ctx, void *tp, int success) {
  size_t i;
  int keep_reading = 0;
  grpc_chttp2_transport *t = tp;

  lock(t);
  i = 0;
  GPR_ASSERT(!t->parsing_active);
  if (!t->closed) {
    t->parsing_active = 1;
    /* merge stream lists */
    grpc_chttp2_stream_map_move_into(&t->new_stream_map,
                                     &t->parsing_stream_map);
    grpc_chttp2_prepare_to_read(&t->global, &t->parsing);
    gpr_mu_unlock(&t->mu);
    for (; i < t->read_buffer.count &&
               grpc_chttp2_perform_read(exec_ctx, &t->parsing,
                                        t->read_buffer.slices[i]);
         i++)
      ;
    gpr_mu_lock(&t->mu);
    if (i != t->read_buffer.count) {
      drop_connection(exec_ctx, t);
    }
    /* merge stream lists */
    grpc_chttp2_stream_map_move_into(&t->new_stream_map,
                                     &t->parsing_stream_map);
    t->global.concurrent_stream_count =
        (gpr_uint32)grpc_chttp2_stream_map_size(&t->parsing_stream_map);
    if (t->parsing.initial_window_update != 0) {
      grpc_chttp2_stream_map_for_each(&t->parsing_stream_map,
                                      update_global_window, t);
      t->parsing.initial_window_update = 0;
    }
    /* handle higher level things */
    grpc_chttp2_publish_reads(exec_ctx, &t->global, &t->parsing);
    t->parsing_active = 0;
  }
  if (!success || i != t->read_buffer.count) {
    drop_connection(exec_ctx, t);
    read_error_locked(exec_ctx, t);
  } else if (!t->closed) {
    keep_reading = 1;
    REF_TRANSPORT(t, "keep_reading");
    prevent_endpoint_shutdown(t);
  }
  gpr_slice_buffer_reset_and_unref(&t->read_buffer);
  unlock(exec_ctx, t);

  if (keep_reading) {
    grpc_endpoint_read(exec_ctx, t->ep, &t->read_buffer, &t->recv_data);
    allow_endpoint_shutdown_unlocked(exec_ctx, t);
    UNREF_TRANSPORT(exec_ctx, t, "keep_reading");
  } else {
    UNREF_TRANSPORT(exec_ctx, t, "recv_data");
  }
}
Пример #9
0
static void finish_message(grpc_call *call) {
  /* TODO(ctiller): this could be a lot faster if coded directly */
  grpc_byte_buffer *byte_buffer = grpc_byte_buffer_create(
      call->incoming_message.slices, call->incoming_message.count);
  gpr_slice_buffer_reset_and_unref(&call->incoming_message);

  grpc_bbq_push(&call->incoming_queue, byte_buffer);

  GPR_ASSERT(call->incoming_message.count == 0);
  call->reading_message = 0;
}
static void tcp_handle_read(void *arg /* grpc_tcp */, int success) {
  grpc_tcp *tcp = (grpc_tcp *)arg;
  GPR_ASSERT(!tcp->finished_edge);

  if (!success) {
    gpr_slice_buffer_reset_and_unref(tcp->incoming_buffer);
    call_read_cb(tcp, 0);
    TCP_UNREF(tcp, "read");
  } else {
    tcp_continue_read(tcp);
  }
}
Пример #11
0
static void tcp_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
                     gpr_slice_buffer *incoming_buffer, grpc_closure *cb) {
  grpc_tcp *tcp = (grpc_tcp *)ep;
  GPR_ASSERT(tcp->read_cb == NULL);
  tcp->read_cb = cb;
  tcp->incoming_buffer = incoming_buffer;
  gpr_slice_buffer_reset_and_unref(incoming_buffer);
  gpr_slice_buffer_swap(incoming_buffer, &tcp->last_read_buffer);
  TCP_REF(tcp, "read");
  if (tcp->finished_edge) {
    tcp->finished_edge = 0;
    grpc_fd_notify_on_read(exec_ctx, tcp->em_fd, &tcp->read_closure);
  } else {
    grpc_exec_ctx_enqueue(exec_ctx, &tcp->read_closure, true, NULL);
  }
}
Пример #12
0
static void endpoint_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *secure_ep,
                          gpr_slice_buffer *slices, grpc_closure *cb) {
  secure_endpoint *ep = (secure_endpoint *)secure_ep;
  ep->read_cb = cb;
  ep->read_buffer = slices;
  gpr_slice_buffer_reset_and_unref(ep->read_buffer);

  SECURE_ENDPOINT_REF(ep, "read");
  if (ep->leftover_bytes.count) {
    gpr_slice_buffer_swap(&ep->leftover_bytes, &ep->source_buffer);
    GPR_ASSERT(ep->leftover_bytes.count == 0);
    on_read(exec_ctx, ep, GRPC_ERROR_NONE);
    return;
  }

  grpc_endpoint_read(exec_ctx, ep->wrapped_ep, &ep->source_buffer,
                     &ep->on_read);
}
static grpc_endpoint_op_status tcp_read(grpc_endpoint *ep,
                                        gpr_slice_buffer *incoming_buffer,
                                        grpc_iomgr_closure *cb) {
  grpc_tcp *tcp = (grpc_tcp *)ep;
  GPR_ASSERT(tcp->read_cb == NULL);
  tcp->read_cb = cb;
  tcp->incoming_buffer = incoming_buffer;
  gpr_slice_buffer_reset_and_unref(incoming_buffer);
  TCP_REF(tcp, "read");
  if (tcp->finished_edge) {
    tcp->finished_edge = 0;
    grpc_fd_notify_on_read(tcp->em_fd, &tcp->read_closure);
  } else {
    grpc_iomgr_add_delayed_callback(&tcp->read_closure, 1);
  }
  /* TODO(ctiller): immediate return */
  return GRPC_ENDPOINT_PENDING;
}
Пример #14
0
// Callback to write the HTTP response for the CONNECT request.
static void on_write_response_done(grpc_exec_ctx* exec_ctx, void* arg,
                                   grpc_error* error) {
  proxy_connection* conn = arg;
  if (error != GRPC_ERROR_NONE) {
    proxy_connection_failed(exec_ctx, conn, true /* is_client */,
                            "HTTP proxy write response", error);
    return;
  }
  // Clear write buffer.
  gpr_slice_buffer_reset_and_unref(&conn->client_write_buffer);
  // Start reading from both client and server.  One of the read
  // requests inherits our ref to conn, but we need to take a new ref
  // for the other one.
  gpr_ref(&conn->refcount);
  grpc_endpoint_read(exec_ctx, conn->client_endpoint, &conn->client_read_buffer,
                     &conn->on_client_read_done);
  grpc_endpoint_read(exec_ctx, conn->server_endpoint, &conn->server_read_buffer,
                     &conn->on_server_read_done);
}
Пример #15
0
static void read_and_write_test_write_handler(void *data, int success) {
  struct read_and_write_test_state *state = data;
  gpr_slice *slices = NULL;
  size_t nslices;
  grpc_endpoint_op_status write_status;

  if (success) {
    for (;;) {
      /* Need to do inline writes until they don't succeed synchronously or we
         finish writing */
      state->bytes_written += state->current_write_size;
      if (state->target_bytes - state->bytes_written <
          state->current_write_size) {
        state->current_write_size = state->target_bytes - state->bytes_written;
      }
      if (state->current_write_size == 0) {
        break;
      }

      slices = allocate_blocks(state->current_write_size, 8192, &nslices,
                               &state->current_write_data);
      gpr_slice_buffer_reset_and_unref(&state->outgoing);
      gpr_slice_buffer_addn(&state->outgoing, slices, nslices);
      write_status = grpc_endpoint_write(state->write_ep, &state->outgoing,
                                         &state->done_write);
      free(slices);
      if (write_status == GRPC_ENDPOINT_PENDING) {
        return;
      } else if (write_status == GRPC_ENDPOINT_ERROR) {
        goto cleanup;
      }
    }
    GPR_ASSERT(state->bytes_written == state->target_bytes);
  }

cleanup:
  gpr_log(GPR_INFO, "Write handler done");
  gpr_mu_lock(GRPC_POLLSET_MU(g_pollset));
  state->write_done = 1 + success;
  grpc_pollset_kick(g_pollset, NULL);
  gpr_mu_unlock(GRPC_POLLSET_MU(g_pollset));
}
Пример #16
0
// Callback for writing proxy data to the backend server.
static void on_server_write_done(grpc_exec_ctx* exec_ctx, void* arg,
                                 grpc_error* error) {
  proxy_connection* conn = arg;
  if (error != GRPC_ERROR_NONE) {
    proxy_connection_failed(exec_ctx, conn, false /* is_client */,
                            "HTTP proxy server write", error);
    return;
  }
  // Clear write buffer (the data we just wrote).
  gpr_slice_buffer_reset_and_unref(&conn->server_write_buffer);
  // If more data was read from the client since we started this write,
  // write that data now.
  if (conn->server_deferred_write_buffer.length > 0) {
    gpr_slice_buffer_move_into(&conn->server_deferred_write_buffer,
                               &conn->server_write_buffer);
    grpc_endpoint_write(exec_ctx, conn->server_endpoint,
                        &conn->server_write_buffer,
                        &conn->on_server_write_done);
  } else {
    // No more writes.  Unref the connection.
    proxy_connection_unref(exec_ctx, conn);
  }
}
Пример #17
0
void gpr_slice_buffer_destroy(gpr_slice_buffer *sb) {
  gpr_slice_buffer_reset_and_unref(sb);
  gpr_free(sb->slices);
}
Пример #18
0
void gpr_slice_buffer_destroy(gpr_slice_buffer *sb) {
  gpr_slice_buffer_reset_and_unref(sb);
  if (sb->slices != sb->inlined) {
    gpr_free(sb->slices);
  }
}
Пример #19
0
// Callback to read the HTTP CONNECT request.
// TODO(roth): Technically, for any of the failure modes handled by this
// function, we should handle the error by returning an HTTP response to
// the client indicating that the request failed.  However, for the purposes
// of this test code, it's fine to pretend this is a client-side error,
// which will cause the client connection to be dropped.
static void on_read_request_done(grpc_exec_ctx* exec_ctx, void* arg,
                                 grpc_error* error) {
  proxy_connection* conn = arg;
  if (error != GRPC_ERROR_NONE) {
    proxy_connection_failed(exec_ctx, conn, true /* is_client */,
                            "HTTP proxy read request", error);
    return;
  }
  // Read request and feed it to the parser.
  for (size_t i = 0; i < conn->client_read_buffer.count; ++i) {
    if (GPR_SLICE_LENGTH(conn->client_read_buffer.slices[i]) > 0) {
      error = grpc_http_parser_parse(&conn->http_parser,
                                     conn->client_read_buffer.slices[i], NULL);
      if (error != GRPC_ERROR_NONE) {
        proxy_connection_failed(exec_ctx, conn, true /* is_client */,
                                "HTTP proxy request parse", error);
        GRPC_ERROR_UNREF(error);
        return;
      }
    }
  }
  gpr_slice_buffer_reset_and_unref(&conn->client_read_buffer);
  // If we're not done reading the request, read more data.
  if (conn->http_parser.state != GRPC_HTTP_BODY) {
    grpc_endpoint_read(exec_ctx, conn->client_endpoint,
                       &conn->client_read_buffer, &conn->on_read_request_done);
    return;
  }
  // Make sure we got a CONNECT request.
  if (strcmp(conn->http_request.method, "CONNECT") != 0) {
    char* msg;
    gpr_asprintf(&msg, "HTTP proxy got request method %s",
                 conn->http_request.method);
    error = GRPC_ERROR_CREATE(msg);
    gpr_free(msg);
    proxy_connection_failed(exec_ctx, conn, true /* is_client */,
                            "HTTP proxy read request", error);
    GRPC_ERROR_UNREF(error);
    return;
  }
  // Resolve address.
  grpc_resolved_addresses* resolved_addresses = NULL;
  error = grpc_blocking_resolve_address(conn->http_request.path, "80",
                                        &resolved_addresses);
  if (error != GRPC_ERROR_NONE) {
    proxy_connection_failed(exec_ctx, conn, true /* is_client */,
                            "HTTP proxy DNS lookup", error);
    GRPC_ERROR_UNREF(error);
    return;
  }
  GPR_ASSERT(resolved_addresses->naddrs >= 1);
  // Connect to requested address.
  // The connection callback inherits our reference to conn.
  const gpr_timespec deadline = gpr_time_add(
      gpr_now(GPR_CLOCK_MONOTONIC), gpr_time_from_seconds(10, GPR_TIMESPAN));
  grpc_tcp_client_connect(exec_ctx, &conn->on_server_connect_done,
                          &conn->server_endpoint, conn->pollset_set,
                          (struct sockaddr*)&resolved_addresses->addrs[0].addr,
                          resolved_addresses->addrs[0].len, deadline);
  grpc_resolved_addresses_destroy(resolved_addresses);
}
Пример #20
0
/* Initiates a write. */
static grpc_endpoint_write_status win_write(grpc_endpoint *ep,
                                            gpr_slice *slices, size_t nslices,
                                            grpc_endpoint_write_cb cb,
                                            void *arg) {
  grpc_tcp *tcp = (grpc_tcp *) ep;
  grpc_winsocket *socket = tcp->socket;
  grpc_winsocket_callback_info *info = &socket->write_info;
  unsigned i;
  DWORD bytes_sent;
  int status;
  WSABUF local_buffers[16];
  WSABUF *allocated = NULL;
  WSABUF *buffers = local_buffers;

  GPR_ASSERT(!tcp->outstanding_write);
  GPR_ASSERT(!tcp->shutting_down);
  tcp_ref(tcp);

  tcp->outstanding_write = 1;
  tcp->write_cb = cb;
  tcp->write_user_data = arg;

  gpr_slice_buffer_addn(&tcp->write_slices, slices, nslices);

  if (tcp->write_slices.count > GPR_ARRAY_SIZE(local_buffers)) {
    buffers = (WSABUF *) gpr_malloc(sizeof(WSABUF) * tcp->write_slices.count);
    allocated = buffers;
  }

  for (i = 0; i < tcp->write_slices.count; i++) {
    buffers[i].len = GPR_SLICE_LENGTH(tcp->write_slices.slices[i]);
    buffers[i].buf = (char *)GPR_SLICE_START_PTR(tcp->write_slices.slices[i]);
  }

  /* First, let's try a synchronous, non-blocking write. */
  status = WSASend(socket->socket, buffers, tcp->write_slices.count,
                   &bytes_sent, 0, NULL, NULL);
  info->wsa_error = status == 0 ? 0 : WSAGetLastError();

  /* We would kind of expect to get a WSAEWOULDBLOCK here, especially on a busy
     connection that has its send queue filled up. But if we don't, then we can
     avoid doing an async write operation at all. */
  if (info->wsa_error != WSAEWOULDBLOCK) {
    grpc_endpoint_write_status ret = GRPC_ENDPOINT_WRITE_ERROR;
    if (status == 0) {
      ret = GRPC_ENDPOINT_WRITE_DONE;
      GPR_ASSERT(bytes_sent == tcp->write_slices.length);
    } else {
      char *utf8_message = gpr_format_message(info->wsa_error);
      gpr_log(GPR_ERROR, "WSASend error: %s", utf8_message);
      gpr_free(utf8_message);
    }
    if (allocated) gpr_free(allocated);
    gpr_slice_buffer_reset_and_unref(&tcp->write_slices);
    tcp->outstanding_write = 0;
    tcp_unref(tcp);
    return ret;
  }

  /* If we got a WSAEWOULDBLOCK earlier, then we need to re-do the same
     operation, this time asynchronously. */
  memset(&socket->write_info.overlapped, 0, sizeof(OVERLAPPED));
  status = WSASend(socket->socket, buffers, tcp->write_slices.count,
                   &bytes_sent, 0, &socket->write_info.overlapped, NULL);
  if (allocated) gpr_free(allocated);

  /* It is possible the operation completed then. But we'd still get an IOCP
     notification. So let's ignore it and wait for the IOCP. */
  if (status != 0) {
    int error = WSAGetLastError();
    if (error != WSA_IO_PENDING) {
      char *utf8_message = gpr_format_message(WSAGetLastError());
      gpr_log(GPR_ERROR, "WSASend error: %s - this means we're going to leak.",
              utf8_message);
      gpr_free(utf8_message);
    /* I'm pretty sure this is a very bad situation there. Hence the log.
       What will happen now is that the socket will neither wait for read
       or write, unless the caller retry, which is unlikely, but I am not
       sure if that's guaranteed. And there might also be a read pending.
       This means that the future orphanage of that socket will be in limbo,
       and we're going to leak it. I have no idea what could cause this
       specific case however, aside from a parameter error from our call.
       Normal read errors would actually happen during the overlapped
       operation, which is the supported way to go for that. */
      tcp->outstanding_write = 0;
      tcp_unref(tcp);
      /* Per the comment above, I'm going to treat that case as a hard failure
         for now, and leave the option to catch that and debug. */
      __debugbreak();
      return GRPC_ENDPOINT_WRITE_ERROR;
    }
  }

  /* As all is now setup, we can now ask for the IOCP notification. It may
     trigger the callback immediately however, but no matter. */
  grpc_socket_notify_on_write(socket, on_write, tcp);
  return GRPC_ENDPOINT_WRITE_PENDING;
}
Пример #21
0
static void send_done(grpc_exec_ctx *exec_ctx, void *elemp, bool success) {
    grpc_call_element *elem = elemp;
    call_data *calld = elem->call_data;
    gpr_slice_buffer_reset_and_unref(&calld->slices);
    calld->post_send->cb(exec_ctx, calld->post_send->cb_arg, success);
}
Пример #22
0
/* tcp read callback */
static int recv_data_loop(grpc_chttp2_transport *t, int *success) {
  size_t i;
  int keep_reading = 0;

  lock(t);
  i = 0;
  GPR_ASSERT(!t->parsing_active);
  if (!t->closed) {
    t->parsing_active = 1;
    /* merge stream lists */
    grpc_chttp2_stream_map_move_into(&t->new_stream_map,
                                     &t->parsing_stream_map);
    grpc_chttp2_prepare_to_read(&t->global, &t->parsing);
    gpr_mu_unlock(&t->mu);
    for (; i < t->read_buffer.count &&
           grpc_chttp2_perform_read(&t->parsing, t->read_buffer.slices[i]);
         i++)
      ;
    gpr_mu_lock(&t->mu);
    if (i != t->read_buffer.count) {
      drop_connection(t);
    }
    /* merge stream lists */
    grpc_chttp2_stream_map_move_into(&t->new_stream_map,
                                     &t->parsing_stream_map);
    t->global.concurrent_stream_count =
        grpc_chttp2_stream_map_size(&t->parsing_stream_map);
    if (t->parsing.initial_window_update != 0) {
      grpc_chttp2_stream_map_for_each(&t->parsing_stream_map,
                                      update_global_window, t);
      t->parsing.initial_window_update = 0;
    }
    /* handle higher level things */
    grpc_chttp2_publish_reads(&t->global, &t->parsing);
    t->parsing_active = 0;
  }
  if (!*success || i != t->read_buffer.count) {
    drop_connection(t);
    read_error_locked(t);
  } else if (!t->closed) {
    keep_reading = 1;
    REF_TRANSPORT(t, "keep_reading");
    prevent_endpoint_shutdown(t);
  }
  gpr_slice_buffer_reset_and_unref(&t->read_buffer);
  unlock(t);

  if (keep_reading) {
    int ret = -1;
    switch (grpc_endpoint_read(t->ep, &t->read_buffer, &t->recv_data)) {
      case GRPC_ENDPOINT_DONE:
        *success = 1;
        ret = 1;
        break;
      case GRPC_ENDPOINT_ERROR:
        *success = 0;
        ret = 1;
        break;
      case GRPC_ENDPOINT_PENDING:
        ret = 0;
        break;
    }
    allow_endpoint_shutdown_unlocked(t);
    UNREF_TRANSPORT(t, "keep_reading");
    return ret;
  } else {
    UNREF_TRANSPORT(t, "recv_data");
    return 0;
  }

  gpr_log(GPR_ERROR, "should never reach here");
  abort();
}
Пример #23
0
/* Initiates a write. */
static grpc_endpoint_write_status win_write(grpc_endpoint *ep,
                                            gpr_slice *slices, size_t nslices,
                                            grpc_endpoint_write_cb cb,
                                            void *arg) {
  grpc_tcp *tcp = (grpc_tcp *) ep;
  grpc_winsocket *socket = tcp->socket;
  grpc_winsocket_callback_info *info = &socket->write_info;
  unsigned i;
  DWORD bytes_sent;
  int status;
  WSABUF local_buffers[16];
  WSABUF *allocated = NULL;
  WSABUF *buffers = local_buffers;

  GPR_ASSERT(!tcp->socket->write_info.outstanding);
  if (tcp->shutting_down) {
    return GRPC_ENDPOINT_WRITE_ERROR;
  }
  tcp_ref(tcp);

  tcp->socket->write_info.outstanding = 1;
  tcp->write_cb = cb;
  tcp->write_user_data = arg;

  gpr_slice_buffer_addn(&tcp->write_slices, slices, nslices);

  if (tcp->write_slices.count > GPR_ARRAY_SIZE(local_buffers)) {
    buffers = (WSABUF *) gpr_malloc(sizeof(WSABUF) * tcp->write_slices.count);
    allocated = buffers;
  }

  for (i = 0; i < tcp->write_slices.count; i++) {
    buffers[i].len = GPR_SLICE_LENGTH(tcp->write_slices.slices[i]);
    buffers[i].buf = (char *)GPR_SLICE_START_PTR(tcp->write_slices.slices[i]);
  }

  /* First, let's try a synchronous, non-blocking write. */
  status = WSASend(socket->socket, buffers, tcp->write_slices.count,
                   &bytes_sent, 0, NULL, NULL);
  info->wsa_error = status == 0 ? 0 : WSAGetLastError();

  /* We would kind of expect to get a WSAEWOULDBLOCK here, especially on a busy
     connection that has its send queue filled up. But if we don't, then we can
     avoid doing an async write operation at all. */
  if (info->wsa_error != WSAEWOULDBLOCK) {
    grpc_endpoint_write_status ret = GRPC_ENDPOINT_WRITE_ERROR;
    if (status == 0) {
      ret = GRPC_ENDPOINT_WRITE_DONE;
      GPR_ASSERT(bytes_sent == tcp->write_slices.length);
    } else {
      char *utf8_message = gpr_format_message(info->wsa_error);
      gpr_log(GPR_ERROR, "WSASend error: %s", utf8_message);
      gpr_free(utf8_message);
    }
    if (allocated) gpr_free(allocated);
    gpr_slice_buffer_reset_and_unref(&tcp->write_slices);
    tcp->socket->write_info.outstanding = 0;
    tcp_unref(tcp);
    return ret;
  }

  /* If we got a WSAEWOULDBLOCK earlier, then we need to re-do the same
     operation, this time asynchronously. */
  memset(&socket->write_info.overlapped, 0, sizeof(OVERLAPPED));
  status = WSASend(socket->socket, buffers, tcp->write_slices.count,
                   &bytes_sent, 0, &socket->write_info.overlapped, NULL);
  if (allocated) gpr_free(allocated);

  if (status != 0) {
    int wsa_error = WSAGetLastError();
    if (wsa_error != WSA_IO_PENDING) {
      gpr_slice_buffer_reset_and_unref(&tcp->write_slices);
      tcp->socket->write_info.outstanding = 0;
      tcp_unref(tcp);
      return GRPC_ENDPOINT_WRITE_ERROR;
    }
  }

  /* As all is now setup, we can now ask for the IOCP notification. It may
     trigger the callback immediately however, but no matter. */
  grpc_socket_notify_on_write(socket, on_write, tcp);
  return GRPC_ENDPOINT_WRITE_PENDING;
}
Пример #24
0
static void tcp_continue_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) {
  struct msghdr msg;
  struct iovec iov[MAX_READ_IOVEC];
  ssize_t read_bytes;
  size_t i;

  GPR_ASSERT(!tcp->finished_edge);
  GPR_ASSERT(tcp->iov_size <= MAX_READ_IOVEC);
  GPR_ASSERT(tcp->incoming_buffer->count <= MAX_READ_IOVEC);
  GPR_TIMER_BEGIN("tcp_continue_read", 0);

  while (tcp->incoming_buffer->count < (size_t)tcp->iov_size) {
    gpr_slice_buffer_add_indexed(tcp->incoming_buffer,
                                 gpr_slice_malloc(tcp->slice_size));
  }
  for (i = 0; i < tcp->incoming_buffer->count; i++) {
    iov[i].iov_base = GPR_SLICE_START_PTR(tcp->incoming_buffer->slices[i]);
    iov[i].iov_len = GPR_SLICE_LENGTH(tcp->incoming_buffer->slices[i]);
  }

  msg.msg_name = NULL;
  msg.msg_namelen = 0;
  msg.msg_iov = iov;
  msg.msg_iovlen = tcp->iov_size;
  msg.msg_control = NULL;
  msg.msg_controllen = 0;
  msg.msg_flags = 0;

  GPR_TIMER_BEGIN("recvmsg", 1);
  do {
    read_bytes = recvmsg(tcp->fd, &msg, 0);
  } while (read_bytes < 0 && errno == EINTR);
  GPR_TIMER_END("recvmsg", 0);

  if (read_bytes < 0) {
    /* NB: After calling call_read_cb a parallel call of the read handler may
     * be running. */
    if (errno == EAGAIN) {
      if (tcp->iov_size > 1) {
        tcp->iov_size /= 2;
      }
      /* We've consumed the edge, request a new one */
      grpc_fd_notify_on_read(exec_ctx, tcp->em_fd, &tcp->read_closure);
    } else {
      /* TODO(klempner): Log interesting errors */
      gpr_slice_buffer_reset_and_unref(tcp->incoming_buffer);
      call_read_cb(exec_ctx, tcp, 0);
      TCP_UNREF(exec_ctx, tcp, "read");
    }
  } else if (read_bytes == 0) {
    /* 0 read size ==> end of stream */
    gpr_slice_buffer_reset_and_unref(tcp->incoming_buffer);
    call_read_cb(exec_ctx, tcp, 0);
    TCP_UNREF(exec_ctx, tcp, "read");
  } else {
    GPR_ASSERT((size_t)read_bytes <= tcp->incoming_buffer->length);
    if ((size_t)read_bytes < tcp->incoming_buffer->length) {
      gpr_slice_buffer_trim_end(
          tcp->incoming_buffer,
          tcp->incoming_buffer->length - (size_t)read_bytes,
          &tcp->last_read_buffer);
    } else if (tcp->iov_size < MAX_READ_IOVEC) {
      ++tcp->iov_size;
    }
    GPR_ASSERT((size_t)read_bytes == tcp->incoming_buffer->length);
    call_read_cb(exec_ctx, tcp, 1);
    TCP_UNREF(exec_ctx, tcp, "read");
  }

  GPR_TIMER_END("tcp_continue_read", 0);
}
Пример #25
0
static void endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *secure_ep,
                           gpr_slice_buffer *slices, grpc_closure *cb) {
  unsigned i;
  tsi_result result = TSI_OK;
  secure_endpoint *ep = (secure_endpoint *)secure_ep;
  uint8_t *cur = GPR_SLICE_START_PTR(ep->write_staging_buffer);
  uint8_t *end = GPR_SLICE_END_PTR(ep->write_staging_buffer);

  gpr_slice_buffer_reset_and_unref(&ep->output_buffer);

  if (false && grpc_trace_secure_endpoint) {
    for (i = 0; i < slices->count; i++) {
      char *data =
          gpr_dump_slice(slices->slices[i], GPR_DUMP_HEX | GPR_DUMP_ASCII);
      gpr_log(GPR_DEBUG, "WRITE %p: %s", ep, data);
      gpr_free(data);
    }
  }

  for (i = 0; i < slices->count; i++) {
    gpr_slice plain = slices->slices[i];
    uint8_t *message_bytes = GPR_SLICE_START_PTR(plain);
    size_t message_size = GPR_SLICE_LENGTH(plain);
    while (message_size > 0) {
      size_t protected_buffer_size_to_send = (size_t)(end - cur);
      size_t processed_message_size = message_size;
      gpr_mu_lock(&ep->protector_mu);
      result = tsi_frame_protector_protect(ep->protector, message_bytes,
                                           &processed_message_size, cur,
                                           &protected_buffer_size_to_send);
      gpr_mu_unlock(&ep->protector_mu);
      if (result != TSI_OK) {
        gpr_log(GPR_ERROR, "Encryption error: %s",
                tsi_result_to_string(result));
        break;
      }
      message_bytes += processed_message_size;
      message_size -= processed_message_size;
      cur += protected_buffer_size_to_send;

      if (cur == end) {
        flush_write_staging_buffer(ep, &cur, &end);
      }
    }
    if (result != TSI_OK) break;
  }
  if (result == TSI_OK) {
    size_t still_pending_size;
    do {
      size_t protected_buffer_size_to_send = (size_t)(end - cur);
      gpr_mu_lock(&ep->protector_mu);
      result = tsi_frame_protector_protect_flush(ep->protector, cur,
                                                 &protected_buffer_size_to_send,
                                                 &still_pending_size);
      gpr_mu_unlock(&ep->protector_mu);
      if (result != TSI_OK) break;
      cur += protected_buffer_size_to_send;
      if (cur == end) {
        flush_write_staging_buffer(ep, &cur, &end);
      }
    } while (still_pending_size > 0);
    if (cur != GPR_SLICE_START_PTR(ep->write_staging_buffer)) {
      gpr_slice_buffer_add(
          &ep->output_buffer,
          gpr_slice_split_head(
              &ep->write_staging_buffer,
              (size_t)(cur - GPR_SLICE_START_PTR(ep->write_staging_buffer))));
    }
  }

  if (result != TSI_OK) {
    /* TODO(yangg) do different things according to the error type? */
    gpr_slice_buffer_reset_and_unref(&ep->output_buffer);
    grpc_exec_ctx_sched(
        exec_ctx, cb,
        grpc_set_tsi_error_result(GRPC_ERROR_CREATE("Wrap failed"), result),
        NULL);
    return;
  }

  grpc_endpoint_write(exec_ctx, ep->wrapped_ep, &ep->output_buffer, cb);
}
Пример #26
0
static void on_read(grpc_exec_ctx *exec_ctx, void *user_data,
                    grpc_error *error) {
  unsigned i;
  uint8_t keep_looping = 0;
  tsi_result result = TSI_OK;
  secure_endpoint *ep = (secure_endpoint *)user_data;
  uint8_t *cur = GPR_SLICE_START_PTR(ep->read_staging_buffer);
  uint8_t *end = GPR_SLICE_END_PTR(ep->read_staging_buffer);

  if (error != GRPC_ERROR_NONE) {
    gpr_slice_buffer_reset_and_unref(ep->read_buffer);
    call_read_cb(exec_ctx, ep, GRPC_ERROR_CREATE_REFERENCING(
                                   "Secure read failed", &error, 1));
    return;
  }

  /* TODO(yangg) check error, maybe bail out early */
  for (i = 0; i < ep->source_buffer.count; i++) {
    gpr_slice encrypted = ep->source_buffer.slices[i];
    uint8_t *message_bytes = GPR_SLICE_START_PTR(encrypted);
    size_t message_size = GPR_SLICE_LENGTH(encrypted);

    while (message_size > 0 || keep_looping) {
      size_t unprotected_buffer_size_written = (size_t)(end - cur);
      size_t processed_message_size = message_size;
      gpr_mu_lock(&ep->protector_mu);
      result = tsi_frame_protector_unprotect(ep->protector, message_bytes,
                                             &processed_message_size, cur,
                                             &unprotected_buffer_size_written);
      gpr_mu_unlock(&ep->protector_mu);
      if (result != TSI_OK) {
        gpr_log(GPR_ERROR, "Decryption error: %s",
                tsi_result_to_string(result));
        break;
      }
      message_bytes += processed_message_size;
      message_size -= processed_message_size;
      cur += unprotected_buffer_size_written;

      if (cur == end) {
        flush_read_staging_buffer(ep, &cur, &end);
        /* Force to enter the loop again to extract buffered bytes in protector.
           The bytes could be buffered because of running out of staging_buffer.
           If this happens at the end of all slices, doing another unprotect
           avoids leaving data in the protector. */
        keep_looping = 1;
      } else if (unprotected_buffer_size_written > 0) {
        keep_looping = 1;
      } else {
        keep_looping = 0;
      }
    }
    if (result != TSI_OK) break;
  }

  if (cur != GPR_SLICE_START_PTR(ep->read_staging_buffer)) {
    gpr_slice_buffer_add(
        ep->read_buffer,
        gpr_slice_split_head(
            &ep->read_staging_buffer,
            (size_t)(cur - GPR_SLICE_START_PTR(ep->read_staging_buffer))));
  }

  /* TODO(yangg) experiment with moving this block after read_cb to see if it
     helps latency */
  gpr_slice_buffer_reset_and_unref(&ep->source_buffer);

  if (result != TSI_OK) {
    gpr_slice_buffer_reset_and_unref(ep->read_buffer);
    call_read_cb(exec_ctx, ep, grpc_set_tsi_error_result(
                                   GRPC_ERROR_CREATE("Unwrap failed"), result));
    return;
  }

  call_read_cb(exec_ctx, ep, GRPC_ERROR_NONE);
}
Пример #27
0
static void test_strsplit(void) {
  gpr_slice_buffer *parts;
  gpr_slice str;

  LOG_TEST_NAME("test_strsplit");

  parts = gpr_malloc(sizeof(gpr_slice_buffer));
  gpr_slice_buffer_init(parts);

  str = gpr_slice_from_copied_string("one, two, three, four");
  gpr_slice_split(str, ", ", parts);
  GPR_ASSERT(4 == parts->count);
  GPR_ASSERT(0 == gpr_slice_str_cmp(parts->slices[0], "one"));
  GPR_ASSERT(0 == gpr_slice_str_cmp(parts->slices[1], "two"));
  GPR_ASSERT(0 == gpr_slice_str_cmp(parts->slices[2], "three"));
  GPR_ASSERT(0 == gpr_slice_str_cmp(parts->slices[3], "four"));
  gpr_slice_buffer_reset_and_unref(parts);
  gpr_slice_unref(str);

  /* separator not present in string */
  str = gpr_slice_from_copied_string("one two three four");
  gpr_slice_split(str, ", ", parts);
  GPR_ASSERT(1 == parts->count);
  GPR_ASSERT(0 == gpr_slice_str_cmp(parts->slices[0], "one two three four"));
  gpr_slice_buffer_reset_and_unref(parts);
  gpr_slice_unref(str);

  /* separator at the end */
  str = gpr_slice_from_copied_string("foo,");
  gpr_slice_split(str, ",", parts);
  GPR_ASSERT(2 == parts->count);
  GPR_ASSERT(0 == gpr_slice_str_cmp(parts->slices[0], "foo"));
  GPR_ASSERT(0 == gpr_slice_str_cmp(parts->slices[1], ""));
  gpr_slice_buffer_reset_and_unref(parts);
  gpr_slice_unref(str);

  /* separator at the beginning */
  str = gpr_slice_from_copied_string(",foo");
  gpr_slice_split(str, ",", parts);
  GPR_ASSERT(2 == parts->count);
  GPR_ASSERT(0 == gpr_slice_str_cmp(parts->slices[0], ""));
  GPR_ASSERT(0 == gpr_slice_str_cmp(parts->slices[1], "foo"));
  gpr_slice_buffer_reset_and_unref(parts);
  gpr_slice_unref(str);

  /* standalone separator */
  str = gpr_slice_from_copied_string(",");
  gpr_slice_split(str, ",", parts);
  GPR_ASSERT(2 == parts->count);
  GPR_ASSERT(0 == gpr_slice_str_cmp(parts->slices[0], ""));
  GPR_ASSERT(0 == gpr_slice_str_cmp(parts->slices[1], ""));
  gpr_slice_buffer_reset_and_unref(parts);
  gpr_slice_unref(str);

  /* empty input */
  str = gpr_slice_from_copied_string("");
  gpr_slice_split(str, ", ", parts);
  GPR_ASSERT(1 == parts->count);
  GPR_ASSERT(0 == gpr_slice_str_cmp(parts->slices[0], ""));
  gpr_slice_buffer_reset_and_unref(parts);
  gpr_slice_unref(str);

  gpr_slice_buffer_destroy(parts);
  gpr_free(parts);
}
Пример #28
0
static void on_read(void *user_data, gpr_slice *slices, size_t nslices,
                    grpc_endpoint_cb_status error) {
  unsigned i;
  gpr_uint8 keep_looping = 0;
  size_t input_buffer_count = 0;
  tsi_result result = TSI_OK;
  secure_endpoint *ep = (secure_endpoint *)user_data;
  gpr_uint8 *cur = GPR_SLICE_START_PTR(ep->read_staging_buffer);
  gpr_uint8 *end = GPR_SLICE_END_PTR(ep->read_staging_buffer);

  /* TODO(yangg) check error, maybe bail out early */
  for (i = 0; i < nslices; i++) {
    gpr_slice encrypted = slices[i];
    gpr_uint8 *message_bytes = GPR_SLICE_START_PTR(encrypted);
    size_t message_size = GPR_SLICE_LENGTH(encrypted);

    while (message_size > 0 || keep_looping) {
      size_t unprotected_buffer_size_written = (size_t)(end - cur);
      size_t processed_message_size = message_size;
      gpr_mu_lock(&ep->protector_mu);
      result = tsi_frame_protector_unprotect(ep->protector, message_bytes,
                                             &processed_message_size, cur,
                                             &unprotected_buffer_size_written);
      gpr_mu_unlock(&ep->protector_mu);
      if (result != TSI_OK) {
        gpr_log(GPR_ERROR, "Decryption error: %s",
                tsi_result_to_string(result));
        break;
      }
      message_bytes += processed_message_size;
      message_size -= processed_message_size;
      cur += unprotected_buffer_size_written;

      if (cur == end) {
        flush_read_staging_buffer(ep, &cur, &end);
        /* Force to enter the loop again to extract buffered bytes in protector.
           The bytes could be buffered because of running out of staging_buffer.
           If this happens at the end of all slices, doing another unprotect
           avoids leaving data in the protector. */
        keep_looping = 1;
      } else if (unprotected_buffer_size_written > 0) {
        keep_looping = 1;
      } else {
        keep_looping = 0;
      }
    }
    if (result != TSI_OK) break;
  }

  if (cur != GPR_SLICE_START_PTR(ep->read_staging_buffer)) {
    gpr_slice_buffer_add(
        &ep->input_buffer,
        gpr_slice_split_head(
            &ep->read_staging_buffer,
            (size_t)(cur - GPR_SLICE_START_PTR(ep->read_staging_buffer))));
  }

  /* TODO(yangg) experiment with moving this block after read_cb to see if it
     helps latency */
  for (i = 0; i < nslices; i++) {
    gpr_slice_unref(slices[i]);
  }

  if (result != TSI_OK) {
    gpr_slice_buffer_reset_and_unref(&ep->input_buffer);
    call_read_cb(ep, NULL, 0, GRPC_ENDPOINT_CB_ERROR);
    return;
  }
  /* The upper level will unref the slices. */
  input_buffer_count = ep->input_buffer.count;
  ep->input_buffer.count = 0;
  call_read_cb(ep, ep->input_buffer.slices, input_buffer_count, error);
}
Пример #29
0
/** Filter's "main" function, called for any incoming grpc_transport_stream_op
 * instance that holds a non-zero number of send operations, accesible to this
 * function in \a send_ops.  */
static void process_send_ops(grpc_call_element *elem,
                             grpc_stream_op_buffer *send_ops) {
  call_data *calld = elem->call_data;
  channel_data *channeld = elem->channel_data;
  size_t i;
  int did_compress = 0;

  /* In streaming calls, we need to reset the previously accumulated slices */
  gpr_slice_buffer_reset_and_unref(&calld->slices);
  for (i = 0; i < send_ops->nops; ++i) {
    grpc_stream_op *sop = &send_ops->ops[i];
    switch (sop->type) {
      case GRPC_OP_BEGIN_MESSAGE:
        /* buffer up slices until we've processed all the expected ones (as
         * given by GRPC_OP_BEGIN_MESSAGE) */
        calld->remaining_slice_bytes = sop->data.begin_message.length;
        if (sop->data.begin_message.flags & GRPC_WRITE_NO_COMPRESS) {
          calld->has_compression_algorithm = 1; /* GPR_TRUE */
          calld->compression_algorithm = GRPC_COMPRESS_NONE;
        }
        break;
      case GRPC_OP_METADATA:
        if (!calld->written_initial_metadata) {
          /* Parse incoming request for compression. If any, it'll be available
           * at calld->compression_algorithm */
          grpc_metadata_batch_filter(&(sop->data.metadata),
                                     compression_md_filter, elem);
          if (!calld->has_compression_algorithm) {
            /* If no algorithm was found in the metadata and we aren't
             * exceptionally skipping compression, fall back to the channel
             * default */
            calld->compression_algorithm =
                channeld->default_compression_algorithm;
            calld->has_compression_algorithm = 1; /* GPR_TRUE */
          }
          /* hint compression algorithm */
          grpc_metadata_batch_add_tail(
              &(sop->data.metadata), &calld->compression_algorithm_storage,
              GRPC_MDELEM_REF(channeld->mdelem_compression_algorithms
                                  [calld->compression_algorithm]));

          /* convey supported compression algorithms */
          grpc_metadata_batch_add_tail(
              &(sop->data.metadata), &calld->accept_encoding_storage,
              GRPC_MDELEM_REF(channeld->mdelem_accept_encoding));

          calld->written_initial_metadata = 1; /* GPR_TRUE */
        }
        break;
      case GRPC_OP_SLICE:
        if (skip_compression(channeld, calld)) continue;
        GPR_ASSERT(calld->remaining_slice_bytes > 0);
        /* Increase input ref count, gpr_slice_buffer_add takes ownership.  */
        gpr_slice_buffer_add(&calld->slices, gpr_slice_ref(sop->data.slice));
        GPR_ASSERT(GPR_SLICE_LENGTH(sop->data.slice) >=
                   calld->remaining_slice_bytes);
        calld->remaining_slice_bytes -=
            (gpr_uint32)GPR_SLICE_LENGTH(sop->data.slice);
        if (calld->remaining_slice_bytes == 0) {
          did_compress =
              compress_send_sb(calld->compression_algorithm, &calld->slices);
        }
        break;
      case GRPC_NO_OP:
        break;
    }
  }

  /* Modify the send_ops stream_op_buffer depending on whether compression was
   * carried out */
  if (did_compress) {
    finish_compressed_sopb(send_ops, elem);
  }
}
Пример #30
0
static grpc_endpoint_write_status endpoint_write(grpc_endpoint *secure_ep,
                                                 gpr_slice *slices,
                                                 size_t nslices,
                                                 grpc_endpoint_write_cb cb,
                                                 void *user_data) {
  unsigned i;
  size_t output_buffer_count = 0;
  tsi_result result = TSI_OK;
  secure_endpoint *ep = (secure_endpoint *)secure_ep;
  gpr_uint8 *cur = GPR_SLICE_START_PTR(ep->write_staging_buffer);
  gpr_uint8 *end = GPR_SLICE_END_PTR(ep->write_staging_buffer);
  grpc_endpoint_write_status status;
  GPR_ASSERT(ep->output_buffer.count == 0);

  if (grpc_trace_secure_endpoint) {
    for (i = 0; i < nslices; i++) {
      char *data = gpr_dump_slice(slices[i], GPR_DUMP_HEX | GPR_DUMP_ASCII);
      gpr_log(GPR_DEBUG, "WRITE %p: %s", ep, data);
      gpr_free(data);
    }
  }

  for (i = 0; i < nslices; i++) {
    gpr_slice plain = slices[i];
    gpr_uint8 *message_bytes = GPR_SLICE_START_PTR(plain);
    size_t message_size = GPR_SLICE_LENGTH(plain);
    while (message_size > 0) {
      size_t protected_buffer_size_to_send = (size_t)(end - cur);
      size_t processed_message_size = message_size;
      gpr_mu_lock(&ep->protector_mu);
      result = tsi_frame_protector_protect(ep->protector, message_bytes,
                                           &processed_message_size, cur,
                                           &protected_buffer_size_to_send);
      gpr_mu_unlock(&ep->protector_mu);
      if (result != TSI_OK) {
        gpr_log(GPR_ERROR, "Encryption error: %s",
                tsi_result_to_string(result));
        break;
      }
      message_bytes += processed_message_size;
      message_size -= processed_message_size;
      cur += protected_buffer_size_to_send;

      if (cur == end) {
        flush_write_staging_buffer(ep, &cur, &end);
      }
    }
    if (result != TSI_OK) break;
  }
  if (result == TSI_OK) {
    size_t still_pending_size;
    do {
      size_t protected_buffer_size_to_send = (size_t)(end - cur);
      gpr_mu_lock(&ep->protector_mu);
      result = tsi_frame_protector_protect_flush(ep->protector, cur,
                                                 &protected_buffer_size_to_send,
                                                 &still_pending_size);
      gpr_mu_unlock(&ep->protector_mu);
      if (result != TSI_OK) break;
      cur += protected_buffer_size_to_send;
      if (cur == end) {
        flush_write_staging_buffer(ep, &cur, &end);
      }
    } while (still_pending_size > 0);
    if (cur != GPR_SLICE_START_PTR(ep->write_staging_buffer)) {
      gpr_slice_buffer_add(
          &ep->output_buffer,
          gpr_slice_split_head(
              &ep->write_staging_buffer,
              (size_t)(cur - GPR_SLICE_START_PTR(ep->write_staging_buffer))));
    }
  }

  for (i = 0; i < nslices; i++) {
    gpr_slice_unref(slices[i]);
  }

  if (result != TSI_OK) {
    /* TODO(yangg) do different things according to the error type? */
    gpr_slice_buffer_reset_and_unref(&ep->output_buffer);
    return GRPC_ENDPOINT_WRITE_ERROR;
  }

  /* clear output_buffer and let the lower level handle its slices. */
  output_buffer_count = ep->output_buffer.count;
  ep->output_buffer.count = 0;
  ep->write_cb = cb;
  ep->write_user_data = user_data;
  /* Need to keep the endpoint alive across a transport */
  secure_endpoint_ref(ep);
  status = grpc_endpoint_write(ep->wrapped_ep, ep->output_buffer.slices,
                               output_buffer_count, on_write, ep);
  if (status != GRPC_ENDPOINT_WRITE_PENDING) {
    secure_endpoint_unref(ep);
  }
  return status;
}