static void tcp_read_allocation_done(grpc_exec_ctx *exec_ctx, void *tcpp, grpc_error *error) { grpc_tcp *tcp = tcpp; if (error != GRPC_ERROR_NONE) { grpc_slice_buffer_reset_and_unref(tcp->incoming_buffer); grpc_slice_buffer_reset_and_unref(&tcp->last_read_buffer); call_read_cb(exec_ctx, tcp, GRPC_ERROR_REF(error)); TCP_UNREF(exec_ctx, tcp, "read"); } else { tcp_do_read(exec_ctx, tcp); } }
static void tcp_handle_read(void *arg /* grpc_tcp */, int success) { grpc_tcp *tcp = (grpc_tcp *)arg; GPR_ASSERT(!tcp->finished_edge); if (!success) { gpr_slice_buffer_reset_and_unref(tcp->incoming_buffer); call_read_cb(tcp, 0); TCP_UNREF(tcp, "read"); } else { tcp_continue_read(tcp); } }
static void tcp_handle_read(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */, grpc_error *error) { grpc_tcp *tcp = (grpc_tcp *)arg; GPR_ASSERT(!tcp->finished_edge); if (error != GRPC_ERROR_NONE) { grpc_slice_buffer_reset_and_unref(tcp->incoming_buffer); grpc_slice_buffer_reset_and_unref(&tcp->last_read_buffer); call_read_cb(exec_ctx, tcp, GRPC_ERROR_REF(error)); TCP_UNREF(exec_ctx, tcp, "read"); } else { tcp_continue_read(exec_ctx, tcp); } }
static void on_read(grpc_exec_ctx *exec_ctx, void *user_data, grpc_error *error) { unsigned i; uint8_t keep_looping = 0; tsi_result result = TSI_OK; secure_endpoint *ep = (secure_endpoint *)user_data; uint8_t *cur = GPR_SLICE_START_PTR(ep->read_staging_buffer); uint8_t *end = GPR_SLICE_END_PTR(ep->read_staging_buffer); if (error != GRPC_ERROR_NONE) { gpr_slice_buffer_reset_and_unref(ep->read_buffer); call_read_cb(exec_ctx, ep, GRPC_ERROR_CREATE_REFERENCING( "Secure read failed", &error, 1)); return; } /* TODO(yangg) check error, maybe bail out early */ for (i = 0; i < ep->source_buffer.count; i++) { gpr_slice encrypted = ep->source_buffer.slices[i]; uint8_t *message_bytes = GPR_SLICE_START_PTR(encrypted); size_t message_size = GPR_SLICE_LENGTH(encrypted); while (message_size > 0 || keep_looping) { size_t unprotected_buffer_size_written = (size_t)(end - cur); size_t processed_message_size = message_size; gpr_mu_lock(&ep->protector_mu); result = tsi_frame_protector_unprotect(ep->protector, message_bytes, &processed_message_size, cur, &unprotected_buffer_size_written); gpr_mu_unlock(&ep->protector_mu); if (result != TSI_OK) { gpr_log(GPR_ERROR, "Decryption error: %s", tsi_result_to_string(result)); break; } message_bytes += processed_message_size; message_size -= processed_message_size; cur += unprotected_buffer_size_written; if (cur == end) { flush_read_staging_buffer(ep, &cur, &end); /* Force to enter the loop again to extract buffered bytes in protector. The bytes could be buffered because of running out of staging_buffer. If this happens at the end of all slices, doing another unprotect avoids leaving data in the protector. */ keep_looping = 1; } else if (unprotected_buffer_size_written > 0) { keep_looping = 1; } else { keep_looping = 0; } } if (result != TSI_OK) break; } if (cur != GPR_SLICE_START_PTR(ep->read_staging_buffer)) { gpr_slice_buffer_add( ep->read_buffer, gpr_slice_split_head( &ep->read_staging_buffer, (size_t)(cur - GPR_SLICE_START_PTR(ep->read_staging_buffer)))); } /* TODO(yangg) experiment with moving this block after read_cb to see if it helps latency */ gpr_slice_buffer_reset_and_unref(&ep->source_buffer); if (result != TSI_OK) { gpr_slice_buffer_reset_and_unref(ep->read_buffer); call_read_cb(exec_ctx, ep, grpc_set_tsi_error_result( GRPC_ERROR_CREATE("Unwrap failed"), result)); return; } call_read_cb(exec_ctx, ep, GRPC_ERROR_NONE); }
static void tcp_do_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) { struct msghdr msg; struct iovec iov[MAX_READ_IOVEC]; ssize_t read_bytes; size_t i; GPR_ASSERT(!tcp->finished_edge); GPR_ASSERT(tcp->iov_size <= MAX_READ_IOVEC); GPR_ASSERT(tcp->incoming_buffer->count <= MAX_READ_IOVEC); GPR_TIMER_BEGIN("tcp_continue_read", 0); for (i = 0; i < tcp->incoming_buffer->count; i++) { iov[i].iov_base = GRPC_SLICE_START_PTR(tcp->incoming_buffer->slices[i]); iov[i].iov_len = GRPC_SLICE_LENGTH(tcp->incoming_buffer->slices[i]); } msg.msg_name = NULL; msg.msg_namelen = 0; msg.msg_iov = iov; msg.msg_iovlen = tcp->iov_size; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_flags = 0; GPR_TIMER_BEGIN("recvmsg", 0); do { read_bytes = recvmsg(tcp->fd, &msg, 0); } while (read_bytes < 0 && errno == EINTR); GPR_TIMER_END("recvmsg", read_bytes >= 0); if (read_bytes < 0) { /* NB: After calling call_read_cb a parallel call of the read handler may * be running. */ if (errno == EAGAIN) { if (tcp->iov_size > 1) { tcp->iov_size /= 2; } /* We've consumed the edge, request a new one */ grpc_fd_notify_on_read(exec_ctx, tcp->em_fd, &tcp->read_closure); } else { grpc_slice_buffer_reset_and_unref(tcp->incoming_buffer); call_read_cb(exec_ctx, tcp, tcp_annotate_error(GRPC_OS_ERROR(errno, "recvmsg"), tcp)); TCP_UNREF(exec_ctx, tcp, "read"); } } else if (read_bytes == 0) { /* 0 read size ==> end of stream */ grpc_slice_buffer_reset_and_unref(tcp->incoming_buffer); call_read_cb(exec_ctx, tcp, tcp_annotate_error(GRPC_ERROR_CREATE("Socket closed"), tcp)); TCP_UNREF(exec_ctx, tcp, "read"); } else { GPR_ASSERT((size_t)read_bytes <= tcp->incoming_buffer->length); if ((size_t)read_bytes < tcp->incoming_buffer->length) { grpc_slice_buffer_trim_end( tcp->incoming_buffer, tcp->incoming_buffer->length - (size_t)read_bytes, &tcp->last_read_buffer); } else if (tcp->iov_size < MAX_READ_IOVEC) { ++tcp->iov_size; } GPR_ASSERT((size_t)read_bytes == tcp->incoming_buffer->length); call_read_cb(exec_ctx, tcp, GRPC_ERROR_NONE); TCP_UNREF(exec_ctx, tcp, "read"); } GPR_TIMER_END("tcp_continue_read", 0); }
static void on_read(void *user_data, gpr_slice *slices, size_t nslices, grpc_endpoint_cb_status error) { unsigned i; gpr_uint8 keep_looping = 0; size_t input_buffer_count = 0; tsi_result result = TSI_OK; secure_endpoint *ep = (secure_endpoint *)user_data; gpr_uint8 *cur = GPR_SLICE_START_PTR(ep->read_staging_buffer); gpr_uint8 *end = GPR_SLICE_END_PTR(ep->read_staging_buffer); /* TODO(yangg) check error, maybe bail out early */ for (i = 0; i < nslices; i++) { gpr_slice encrypted = slices[i]; gpr_uint8 *message_bytes = GPR_SLICE_START_PTR(encrypted); size_t message_size = GPR_SLICE_LENGTH(encrypted); while (message_size > 0 || keep_looping) { size_t unprotected_buffer_size_written = (size_t)(end - cur); size_t processed_message_size = message_size; gpr_mu_lock(&ep->protector_mu); result = tsi_frame_protector_unprotect(ep->protector, message_bytes, &processed_message_size, cur, &unprotected_buffer_size_written); gpr_mu_unlock(&ep->protector_mu); if (result != TSI_OK) { gpr_log(GPR_ERROR, "Decryption error: %s", tsi_result_to_string(result)); break; } message_bytes += processed_message_size; message_size -= processed_message_size; cur += unprotected_buffer_size_written; if (cur == end) { flush_read_staging_buffer(ep, &cur, &end); /* Force to enter the loop again to extract buffered bytes in protector. The bytes could be buffered because of running out of staging_buffer. If this happens at the end of all slices, doing another unprotect avoids leaving data in the protector. */ keep_looping = 1; } else if (unprotected_buffer_size_written > 0) { keep_looping = 1; } else { keep_looping = 0; } } if (result != TSI_OK) break; } if (cur != GPR_SLICE_START_PTR(ep->read_staging_buffer)) { gpr_slice_buffer_add( &ep->input_buffer, gpr_slice_split_head( &ep->read_staging_buffer, (size_t)(cur - GPR_SLICE_START_PTR(ep->read_staging_buffer)))); } /* TODO(yangg) experiment with moving this block after read_cb to see if it helps latency */ for (i = 0; i < nslices; i++) { gpr_slice_unref(slices[i]); } if (result != TSI_OK) { gpr_slice_buffer_reset_and_unref(&ep->input_buffer); call_read_cb(ep, NULL, 0, GRPC_ENDPOINT_CB_ERROR); return; } /* The upper level will unref the slices. */ input_buffer_count = ep->input_buffer.count; ep->input_buffer.count = 0; call_read_cb(ep, ep->input_buffer.slices, input_buffer_count, error); }
static void tcp_continue_read(grpc_tcp *tcp) { struct msghdr msg; struct iovec iov[MAX_READ_IOVEC]; ssize_t read_bytes; size_t i; GPR_ASSERT(!tcp->finished_edge); GPR_ASSERT(tcp->iov_size <= MAX_READ_IOVEC); GPR_ASSERT(tcp->incoming_buffer->count <= MAX_READ_IOVEC); GRPC_TIMER_BEGIN(GRPC_PTAG_HANDLE_READ, 0); while (tcp->incoming_buffer->count < (size_t)tcp->iov_size) { gpr_slice_buffer_add_indexed(tcp->incoming_buffer, gpr_slice_malloc(tcp->slice_size)); } for (i = 0; i < tcp->incoming_buffer->count; i++) { iov[i].iov_base = GPR_SLICE_START_PTR(tcp->incoming_buffer->slices[i]); iov[i].iov_len = GPR_SLICE_LENGTH(tcp->incoming_buffer->slices[i]); } msg.msg_name = NULL; msg.msg_namelen = 0; msg.msg_iov = iov; msg.msg_iovlen = tcp->iov_size; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_flags = 0; GRPC_TIMER_BEGIN(GRPC_PTAG_RECVMSG, 0); do { read_bytes = recvmsg(tcp->fd, &msg, 0); } while (read_bytes < 0 && errno == EINTR); GRPC_TIMER_END(GRPC_PTAG_RECVMSG, 0); if (read_bytes < 0) { /* NB: After calling call_read_cb a parallel call of the read handler may * be running. */ if (errno == EAGAIN) { if (tcp->iov_size > 1) { tcp->iov_size /= 2; } /* We've consumed the edge, request a new one */ grpc_fd_notify_on_read(tcp->em_fd, &tcp->read_closure); } else { /* TODO(klempner): Log interesting errors */ gpr_slice_buffer_reset_and_unref(tcp->incoming_buffer); call_read_cb(tcp, 0); TCP_UNREF(tcp, "read"); } } else if (read_bytes == 0) { /* 0 read size ==> end of stream */ gpr_slice_buffer_reset_and_unref(tcp->incoming_buffer); call_read_cb(tcp, 0); TCP_UNREF(tcp, "read"); } else { GPR_ASSERT((size_t)read_bytes <= tcp->incoming_buffer->length); if ((size_t)read_bytes < tcp->incoming_buffer->length) { gpr_slice_buffer_trim_end(tcp->incoming_buffer, tcp->incoming_buffer->length - read_bytes); } else if (tcp->iov_size < MAX_READ_IOVEC) { ++tcp->iov_size; } GPR_ASSERT((size_t)read_bytes == tcp->incoming_buffer->length); call_read_cb(tcp, 1); TCP_UNREF(tcp, "read"); } GRPC_TIMER_END(GRPC_PTAG_HANDLE_READ, 0); }