示例#1
0
static void win_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
                     grpc_slice_buffer *read_slices, grpc_closure *cb) {
  grpc_tcp *tcp = (grpc_tcp *)ep;
  grpc_winsocket *handle = tcp->socket;
  grpc_winsocket_callback_info *info = &handle->read_info;
  int status;
  DWORD bytes_read = 0;
  DWORD flags = 0;
  WSABUF buffer;

  if (tcp->shutting_down) {
    GRPC_CLOSURE_SCHED(
        exec_ctx, cb,
        GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
            "TCP socket is shutting down", &tcp->shutdown_error, 1));
    return;
  }

  tcp->read_cb = cb;
  tcp->read_slices = read_slices;
  grpc_slice_buffer_reset_and_unref_internal(exec_ctx, read_slices);

  tcp->read_slice = GRPC_SLICE_MALLOC(8192);

  buffer.len = (ULONG)GRPC_SLICE_LENGTH(
      tcp->read_slice);  // we know slice size fits in 32bit.
  buffer.buf = (char *)GRPC_SLICE_START_PTR(tcp->read_slice);

  TCP_REF(tcp, "read");

  /* First let's try a synchronous, non-blocking read. */
  status =
      WSARecv(tcp->socket->socket, &buffer, 1, &bytes_read, &flags, NULL, NULL);
  info->wsa_error = status == 0 ? 0 : WSAGetLastError();

  /* Did we get data immediately ? Yay. */
  if (info->wsa_error != WSAEWOULDBLOCK) {
    info->bytes_transfered = bytes_read;
    GRPC_CLOSURE_SCHED(exec_ctx, &tcp->on_read, GRPC_ERROR_NONE);
    return;
  }

  /* Otherwise, let's retry, by queuing a read. */
  memset(&tcp->socket->read_info.overlapped, 0, sizeof(OVERLAPPED));
  status = WSARecv(tcp->socket->socket, &buffer, 1, &bytes_read, &flags,
                   &info->overlapped, NULL);

  if (status != 0) {
    int wsa_error = WSAGetLastError();
    if (wsa_error != WSA_IO_PENDING) {
      info->wsa_error = wsa_error;
      GRPC_CLOSURE_SCHED(exec_ctx, &tcp->on_read,
                         GRPC_WSA_ERROR(info->wsa_error, "WSARecv"));
      return;
    }
  }

  grpc_socket_notify_on_read(exec_ctx, tcp->socket, &tcp->on_read);
}
示例#2
0
static void on_peer_checked(grpc_exec_ctx *exec_ctx, void *arg,
                            grpc_error *error) {
  security_handshaker *h = arg;
  gpr_mu_lock(&h->mu);
  if (error != GRPC_ERROR_NONE || h->shutdown) {
    security_handshake_failed_locked(exec_ctx, h, GRPC_ERROR_REF(error));
    goto done;
  }
  // Create frame protector.
  tsi_frame_protector *protector;
  tsi_result result = tsi_handshaker_result_create_frame_protector(
      h->handshaker_result, NULL, &protector);
  if (result != TSI_OK) {
    error = grpc_set_tsi_error_result(
        GRPC_ERROR_CREATE_FROM_STATIC_STRING("Frame protector creation failed"),
        result);
    security_handshake_failed_locked(exec_ctx, h, error);
    goto done;
  }
  // Get unused bytes.
  unsigned char *unused_bytes = NULL;
  size_t unused_bytes_size = 0;
  result = tsi_handshaker_result_get_unused_bytes(
      h->handshaker_result, &unused_bytes, &unused_bytes_size);
  // Create secure endpoint.
  if (unused_bytes_size > 0) {
    grpc_slice slice =
        grpc_slice_from_copied_buffer((char *)unused_bytes, unused_bytes_size);
    h->args->endpoint =
        grpc_secure_endpoint_create(protector, h->args->endpoint, &slice, 1);
    grpc_slice_unref_internal(exec_ctx, slice);
  } else {
    h->args->endpoint =
        grpc_secure_endpoint_create(protector, h->args->endpoint, NULL, 0);
  }
  tsi_handshaker_result_destroy(h->handshaker_result);
  h->handshaker_result = NULL;
  // Clear out the read buffer before it gets passed to the transport.
  grpc_slice_buffer_reset_and_unref_internal(exec_ctx, h->args->read_buffer);
  // Add auth context to channel args.
  grpc_arg auth_context_arg = grpc_auth_context_to_arg(h->auth_context);
  grpc_channel_args *tmp_args = h->args->args;
  h->args->args =
      grpc_channel_args_copy_and_add(tmp_args, &auth_context_arg, 1);
  grpc_channel_args_destroy(exec_ctx, tmp_args);
  // Invoke callback.
  GRPC_CLOSURE_SCHED(exec_ctx, h->on_handshake_done, GRPC_ERROR_NONE);
  // Set shutdown to true so that subsequent calls to
  // security_handshaker_shutdown() do nothing.
  h->shutdown = true;
done:
  gpr_mu_unlock(&h->mu);
  security_handshaker_unref(exec_ctx, h);
}
示例#3
0
static void endpoint_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *secure_ep,
                          grpc_slice_buffer *slices, grpc_closure *cb) {
  secure_endpoint *ep = (secure_endpoint *)secure_ep;
  ep->read_cb = cb;
  ep->read_buffer = slices;
  grpc_slice_buffer_reset_and_unref_internal(exec_ctx, ep->read_buffer);

  SECURE_ENDPOINT_REF(ep, "read");
  if (ep->leftover_bytes.count) {
    grpc_slice_buffer_swap(&ep->leftover_bytes, &ep->source_buffer);
    GPR_ASSERT(ep->leftover_bytes.count == 0);
    on_read(exec_ctx, ep, GRPC_ERROR_NONE);
    return;
  }

  grpc_endpoint_read(exec_ctx, ep->wrapped_ep, &ep->source_buffer,
                     &ep->on_read);
}
示例#4
0
static grpc_error *on_handshake_next_done_locked(
    grpc_exec_ctx *exec_ctx, security_handshaker *h, tsi_result result,
    const unsigned char *bytes_to_send, size_t bytes_to_send_size,
    tsi_handshaker_result *handshaker_result) {
  grpc_error *error = GRPC_ERROR_NONE;
  // Read more if we need to.
  if (result == TSI_INCOMPLETE_DATA) {
    GPR_ASSERT(bytes_to_send_size == 0);
    grpc_endpoint_read(exec_ctx, h->args->endpoint, h->args->read_buffer,
                       &h->on_handshake_data_received_from_peer);
    return error;
  }
  if (result != TSI_OK) {
    return grpc_set_tsi_error_result(
        GRPC_ERROR_CREATE_FROM_STATIC_STRING("Handshake failed"), result);
  }
  // Update handshaker result.
  if (handshaker_result != NULL) {
    GPR_ASSERT(h->handshaker_result == NULL);
    h->handshaker_result = handshaker_result;
  }
  if (bytes_to_send_size > 0) {
    // Send data to peer, if needed.
    grpc_slice to_send = grpc_slice_from_copied_buffer(
        (const char *)bytes_to_send, bytes_to_send_size);
    grpc_slice_buffer_reset_and_unref_internal(exec_ctx, &h->outgoing);
    grpc_slice_buffer_add(&h->outgoing, to_send);
    grpc_endpoint_write(exec_ctx, h->args->endpoint, &h->outgoing,
                        &h->on_handshake_data_sent_to_peer);
  } else if (handshaker_result == NULL) {
    // There is nothing to send, but need to read from peer.
    grpc_endpoint_read(exec_ctx, h->args->endpoint, h->args->read_buffer,
                       &h->on_handshake_data_received_from_peer);
  } else {
    // Handshake has finished, check peer and so on.
    error = check_peer_locked(exec_ctx, h);
  }
  return error;
}
示例#5
0
文件: tcp_uv.c 项目: yugui/grpc
static void uv_endpoint_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
                             grpc_slice_buffer *read_slices, grpc_closure *cb) {
  grpc_tcp *tcp = (grpc_tcp *)ep;
  int status;
  grpc_error *error = GRPC_ERROR_NONE;
  GPR_ASSERT(tcp->read_cb == NULL);
  tcp->read_cb = cb;
  tcp->read_slices = read_slices;
  grpc_slice_buffer_reset_and_unref_internal(exec_ctx, read_slices);
  TCP_REF(tcp, "read");
  // TODO(murgatroid99): figure out what the return value here means
  status =
      uv_read_start((uv_stream_t *)tcp->handle, alloc_uv_buf, read_callback);
  if (status != 0) {
    error = GRPC_ERROR_CREATE("TCP Read failed at start");
    error =
        grpc_error_set_str(error, GRPC_ERROR_STR_OS_ERROR, uv_strerror(status));
    grpc_closure_sched(exec_ctx, cb, error);
  }
  if (grpc_tcp_trace) {
    const char *str = grpc_error_string(error);
    gpr_log(GPR_DEBUG, "Initiating read on %p: error=%s", tcp, str);
  }
}
// Callback invoked for reading HTTP CONNECT response.
static void on_read_done(grpc_exec_ctx* exec_ctx, void* arg,
                         grpc_error* error) {
  http_connect_handshaker* handshaker = arg;
  gpr_mu_lock(&handshaker->mu);
  if (error != GRPC_ERROR_NONE || handshaker->shutdown) {
    // If the read failed or we're shutting down, clean up and invoke the
    // callback with the error.
    handshake_failed_locked(exec_ctx, handshaker, GRPC_ERROR_REF(error));
    goto done;
  }
  // Add buffer to parser.
  for (size_t i = 0; i < handshaker->args->read_buffer->count; ++i) {
    if (GRPC_SLICE_LENGTH(handshaker->args->read_buffer->slices[i]) > 0) {
      size_t body_start_offset = 0;
      error = grpc_http_parser_parse(&handshaker->http_parser,
                                     handshaker->args->read_buffer->slices[i],
                                     &body_start_offset);
      if (error != GRPC_ERROR_NONE) {
        handshake_failed_locked(exec_ctx, handshaker, error);
        goto done;
      }
      if (handshaker->http_parser.state == GRPC_HTTP_BODY) {
        // Remove the data we've already read from the read buffer,
        // leaving only the leftover bytes (if any).
        grpc_slice_buffer tmp_buffer;
        grpc_slice_buffer_init(&tmp_buffer);
        if (body_start_offset <
            GRPC_SLICE_LENGTH(handshaker->args->read_buffer->slices[i])) {
          grpc_slice_buffer_add(
              &tmp_buffer,
              grpc_slice_split_tail(&handshaker->args->read_buffer->slices[i],
                                    body_start_offset));
        }
        grpc_slice_buffer_addn(&tmp_buffer,
                               &handshaker->args->read_buffer->slices[i + 1],
                               handshaker->args->read_buffer->count - i - 1);
        grpc_slice_buffer_swap(handshaker->args->read_buffer, &tmp_buffer);
        grpc_slice_buffer_destroy_internal(exec_ctx, &tmp_buffer);
        break;
      }
    }
  }
  // If we're not done reading the response, read more data.
  // TODO(roth): In practice, I suspect that the response to a CONNECT
  // request will never include a body, in which case this check is
  // sufficient.  However, the language of RFC-2817 doesn't explicitly
  // forbid the response from including a body.  If there is a body,
  // it's possible that we might have parsed part but not all of the
  // body, in which case this check will cause us to fail to parse the
  // remainder of the body.  If that ever becomes an issue, we may
  // need to fix the HTTP parser to understand when the body is
  // complete (e.g., handling chunked transfer encoding or looking
  // at the Content-Length: header).
  if (handshaker->http_parser.state != GRPC_HTTP_BODY) {
    grpc_slice_buffer_reset_and_unref_internal(exec_ctx,
                                               handshaker->args->read_buffer);
    grpc_endpoint_read(exec_ctx, handshaker->args->endpoint,
                       handshaker->args->read_buffer,
                       &handshaker->response_read_closure);
    gpr_mu_unlock(&handshaker->mu);
    return;
  }
  // Make sure we got a 2xx response.
  if (handshaker->http_response.status < 200 ||
      handshaker->http_response.status >= 300) {
    char* msg;
    gpr_asprintf(&msg, "HTTP proxy returned response code %d",
                 handshaker->http_response.status);
    error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
    gpr_free(msg);
    handshake_failed_locked(exec_ctx, handshaker, error);
    goto done;
  }
  // Success.  Invoke handshake-done callback.
  grpc_closure_sched(exec_ctx, handshaker->on_handshake_done, error);
done:
  // Set shutdown to true so that subsequent calls to
  // http_connect_handshaker_shutdown() do nothing.
  handshaker->shutdown = true;
  gpr_mu_unlock(&handshaker->mu);
  http_connect_handshaker_unref(exec_ctx, handshaker);
}
示例#7
0
static void endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *secure_ep,
                           grpc_slice_buffer *slices, grpc_closure *cb) {
  GPR_TIMER_BEGIN("secure_endpoint.endpoint_write", 0);

  unsigned i;
  tsi_result result = TSI_OK;
  secure_endpoint *ep = (secure_endpoint *)secure_ep;
  uint8_t *cur = GRPC_SLICE_START_PTR(ep->write_staging_buffer);
  uint8_t *end = GRPC_SLICE_END_PTR(ep->write_staging_buffer);

  grpc_slice_buffer_reset_and_unref_internal(exec_ctx, &ep->output_buffer);

  if (GRPC_TRACER_ON(grpc_trace_secure_endpoint)) {
    for (i = 0; i < slices->count; i++) {
      char *data =
          grpc_dump_slice(slices->slices[i], GPR_DUMP_HEX | GPR_DUMP_ASCII);
      gpr_log(GPR_DEBUG, "WRITE %p: %s", ep, data);
      gpr_free(data);
    }
  }

  if (ep->zero_copy_protector != NULL) {
    // Use zero-copy grpc protector to protect.
    result = tsi_zero_copy_grpc_protector_protect(
        exec_ctx, ep->zero_copy_protector, slices, &ep->output_buffer);
  } else {
    // Use frame protector to protect.
    for (i = 0; i < slices->count; i++) {
      grpc_slice plain = slices->slices[i];
      uint8_t *message_bytes = GRPC_SLICE_START_PTR(plain);
      size_t message_size = GRPC_SLICE_LENGTH(plain);
      while (message_size > 0) {
        size_t protected_buffer_size_to_send = (size_t)(end - cur);
        size_t processed_message_size = message_size;
        gpr_mu_lock(&ep->protector_mu);
        result = tsi_frame_protector_protect(ep->protector, message_bytes,
                                             &processed_message_size, cur,
                                             &protected_buffer_size_to_send);
        gpr_mu_unlock(&ep->protector_mu);
        if (result != TSI_OK) {
          gpr_log(GPR_ERROR, "Encryption error: %s",
                  tsi_result_to_string(result));
          break;
        }
        message_bytes += processed_message_size;
        message_size -= processed_message_size;
        cur += protected_buffer_size_to_send;

        if (cur == end) {
          flush_write_staging_buffer(ep, &cur, &end);
        }
      }
      if (result != TSI_OK) break;
    }
    if (result == TSI_OK) {
      size_t still_pending_size;
      do {
        size_t protected_buffer_size_to_send = (size_t)(end - cur);
        gpr_mu_lock(&ep->protector_mu);
        result = tsi_frame_protector_protect_flush(
            ep->protector, cur, &protected_buffer_size_to_send,
            &still_pending_size);
        gpr_mu_unlock(&ep->protector_mu);
        if (result != TSI_OK) break;
        cur += protected_buffer_size_to_send;
        if (cur == end) {
          flush_write_staging_buffer(ep, &cur, &end);
        }
      } while (still_pending_size > 0);
      if (cur != GRPC_SLICE_START_PTR(ep->write_staging_buffer)) {
        grpc_slice_buffer_add(
            &ep->output_buffer,
            grpc_slice_split_head(
                &ep->write_staging_buffer,
                (size_t)(cur -
                         GRPC_SLICE_START_PTR(ep->write_staging_buffer))));
      }
    }
  }

  if (result != TSI_OK) {
    /* TODO(yangg) do different things according to the error type? */
    grpc_slice_buffer_reset_and_unref_internal(exec_ctx, &ep->output_buffer);
    GRPC_CLOSURE_SCHED(
        exec_ctx, cb,
        grpc_set_tsi_error_result(
            GRPC_ERROR_CREATE_FROM_STATIC_STRING("Wrap failed"), result));
    GPR_TIMER_END("secure_endpoint.endpoint_write", 0);
    return;
  }

  grpc_endpoint_write(exec_ctx, ep->wrapped_ep, &ep->output_buffer, cb);
  GPR_TIMER_END("secure_endpoint.endpoint_write", 0);
}
示例#8
0
static void on_read(grpc_exec_ctx *exec_ctx, void *user_data,
                    grpc_error *error) {
  unsigned i;
  uint8_t keep_looping = 0;
  tsi_result result = TSI_OK;
  secure_endpoint *ep = (secure_endpoint *)user_data;
  uint8_t *cur = GRPC_SLICE_START_PTR(ep->read_staging_buffer);
  uint8_t *end = GRPC_SLICE_END_PTR(ep->read_staging_buffer);

  if (error != GRPC_ERROR_NONE) {
    grpc_slice_buffer_reset_and_unref_internal(exec_ctx, ep->read_buffer);
    call_read_cb(exec_ctx, ep, GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
                                   "Secure read failed", &error, 1));
    return;
  }

  if (ep->zero_copy_protector != NULL) {
    // Use zero-copy grpc protector to unprotect.
    result = tsi_zero_copy_grpc_protector_unprotect(
        exec_ctx, ep->zero_copy_protector, &ep->source_buffer, ep->read_buffer);
  } else {
    // Use frame protector to unprotect.
    /* TODO(yangg) check error, maybe bail out early */
    for (i = 0; i < ep->source_buffer.count; i++) {
      grpc_slice encrypted = ep->source_buffer.slices[i];
      uint8_t *message_bytes = GRPC_SLICE_START_PTR(encrypted);
      size_t message_size = GRPC_SLICE_LENGTH(encrypted);

      while (message_size > 0 || keep_looping) {
        size_t unprotected_buffer_size_written = (size_t)(end - cur);
        size_t processed_message_size = message_size;
        gpr_mu_lock(&ep->protector_mu);
        result = tsi_frame_protector_unprotect(
            ep->protector, message_bytes, &processed_message_size, cur,
            &unprotected_buffer_size_written);
        gpr_mu_unlock(&ep->protector_mu);
        if (result != TSI_OK) {
          gpr_log(GPR_ERROR, "Decryption error: %s",
                  tsi_result_to_string(result));
          break;
        }
        message_bytes += processed_message_size;
        message_size -= processed_message_size;
        cur += unprotected_buffer_size_written;

        if (cur == end) {
          flush_read_staging_buffer(ep, &cur, &end);
          /* Force to enter the loop again to extract buffered bytes in
             protector. The bytes could be buffered because of running out of
             staging_buffer. If this happens at the end of all slices, doing
             another unprotect avoids leaving data in the protector. */
          keep_looping = 1;
        } else if (unprotected_buffer_size_written > 0) {
          keep_looping = 1;
        } else {
          keep_looping = 0;
        }
      }
      if (result != TSI_OK) break;
    }

    if (cur != GRPC_SLICE_START_PTR(ep->read_staging_buffer)) {
      grpc_slice_buffer_add(
          ep->read_buffer,
          grpc_slice_split_head(
              &ep->read_staging_buffer,
              (size_t)(cur - GRPC_SLICE_START_PTR(ep->read_staging_buffer))));
    }
  }

  /* TODO(yangg) experiment with moving this block after read_cb to see if it
     helps latency */
  grpc_slice_buffer_reset_and_unref_internal(exec_ctx, &ep->source_buffer);

  if (result != TSI_OK) {
    grpc_slice_buffer_reset_and_unref_internal(exec_ctx, ep->read_buffer);
    call_read_cb(
        exec_ctx, ep,
        grpc_set_tsi_error_result(
            GRPC_ERROR_CREATE_FROM_STATIC_STRING("Unwrap failed"), result));
    return;
  }

  call_read_cb(exec_ctx, ep, GRPC_ERROR_NONE);
}
示例#9
0
static void send_done(grpc_exec_ctx *exec_ctx, void *elemp, grpc_error *error) {
  grpc_call_element *elem = elemp;
  call_data *calld = elem->call_data;
  grpc_slice_buffer_reset_and_unref_internal(exec_ctx, &calld->slices);
  calld->post_send->cb(exec_ctx, calld->post_send->cb_arg, error);
}