static void tcp_handle_write(void *arg /* grpc_tcp */, int success) { grpc_tcp *tcp = (grpc_tcp *)arg; grpc_endpoint_op_status status; grpc_iomgr_closure *cb; if (!success) { cb = tcp->write_cb; tcp->write_cb = NULL; cb->cb(cb->cb_arg, 0); TCP_UNREF(tcp, "write"); return; } GRPC_TIMER_BEGIN(GRPC_PTAG_TCP_CB_WRITE, 0); status = tcp_flush(tcp); if (status == GRPC_ENDPOINT_PENDING) { grpc_fd_notify_on_write(tcp->em_fd, &tcp->write_closure); } else { cb = tcp->write_cb; tcp->write_cb = NULL; cb->cb(cb->cb_arg, status == GRPC_ENDPOINT_DONE); TCP_UNREF(tcp, "write"); } GRPC_TIMER_END(GRPC_PTAG_TCP_CB_WRITE, 0); }
static void tcp_handle_write(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */, bool success) { grpc_tcp *tcp = (grpc_tcp *)arg; flush_result status; grpc_closure *cb; if (!success) { cb = tcp->write_cb; tcp->write_cb = NULL; cb->cb(exec_ctx, cb->cb_arg, 0); TCP_UNREF(exec_ctx, tcp, "write"); return; } status = tcp_flush(tcp); if (status == FLUSH_PENDING) { grpc_fd_notify_on_write(exec_ctx, tcp->em_fd, &tcp->write_closure); } else { cb = tcp->write_cb; tcp->write_cb = NULL; GPR_TIMER_BEGIN("tcp_handle_write.cb", 0); cb->cb(exec_ctx, cb->cb_arg, status == FLUSH_DONE); GPR_TIMER_END("tcp_handle_write.cb", 0); TCP_UNREF(exec_ctx, tcp, "write"); } }
static void tcp_handle_write(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */, grpc_error *error) { grpc_tcp *tcp = (grpc_tcp *)arg; grpc_closure *cb; if (error != GRPC_ERROR_NONE) { cb = tcp->write_cb; tcp->write_cb = NULL; cb->cb(exec_ctx, cb->cb_arg, error); TCP_UNREF(exec_ctx, tcp, "write"); return; } if (!tcp_flush(tcp, &error)) { if (grpc_tcp_trace) { gpr_log(GPR_DEBUG, "write: delayed"); } grpc_fd_notify_on_write(exec_ctx, tcp->em_fd, &tcp->write_closure); } else { cb = tcp->write_cb; tcp->write_cb = NULL; if (grpc_tcp_trace) { const char *str = grpc_error_string(error); gpr_log(GPR_DEBUG, "write: %s", str); grpc_error_free_string(str); } grpc_closure_run(exec_ctx, cb, error); TCP_UNREF(exec_ctx, tcp, "write"); } }
/* Asynchronous callback from the IOCP, or the background thread. */ static void on_write(void *tcpp, int from_iocp) { grpc_tcp *tcp = (grpc_tcp *)tcpp; grpc_winsocket *handle = tcp->socket; grpc_winsocket_callback_info *info = &handle->write_info; grpc_iomgr_closure *cb; int success; int do_abort = 0; gpr_mu_lock(&tcp->mu); cb = tcp->write_cb; tcp->write_cb = NULL; if (!from_iocp || tcp->shutting_down) { /* If we are here with from_iocp set to true, it means we got raced to shutting down the endpoint. No actual abort callback will happen though, so we're going to do it from here. */ do_abort = 1; } gpr_mu_unlock(&tcp->mu); if (do_abort) { if (from_iocp) { tcp->socket->write_info.outstanding = 0; } TCP_UNREF(tcp, "write"); if (cb) { cb->cb(cb->cb_arg, 0); } return; } GPR_ASSERT(tcp->socket->write_info.outstanding); if (info->wsa_error != 0) { if (info->wsa_error != WSAECONNRESET) { char *utf8_message = gpr_format_message(info->wsa_error); gpr_log(GPR_ERROR, "WSASend overlapped error: %s", utf8_message); gpr_free(utf8_message); } success = 0; } else { GPR_ASSERT(info->bytes_transfered == tcp->write_slices->length); success = 1; } tcp->socket->write_info.outstanding = 0; TCP_UNREF(tcp, "write"); cb->cb(cb->cb_arg, success); }
/* Asynchronous callback from the IOCP, or the background thread. */ static void on_read(grpc_exec_ctx *exec_ctx, void *tcpp, grpc_error *error) { grpc_tcp *tcp = tcpp; grpc_closure *cb = tcp->read_cb; grpc_winsocket *socket = tcp->socket; grpc_slice sub; grpc_winsocket_callback_info *info = &socket->read_info; GRPC_ERROR_REF(error); if (error == GRPC_ERROR_NONE) { if (info->wsa_error != 0 && !tcp->shutting_down) { char *utf8_message = gpr_format_message(info->wsa_error); error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(utf8_message); gpr_free(utf8_message); grpc_slice_unref_internal(exec_ctx, tcp->read_slice); } else { if (info->bytes_transfered != 0 && !tcp->shutting_down) { sub = grpc_slice_sub_no_ref(tcp->read_slice, 0, info->bytes_transfered); grpc_slice_buffer_add(tcp->read_slices, sub); } else { grpc_slice_unref_internal(exec_ctx, tcp->read_slice); error = tcp->shutting_down ? GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( "TCP stream shutting down", &tcp->shutdown_error, 1) : GRPC_ERROR_CREATE_FROM_STATIC_STRING("End of TCP stream"); } } } tcp->read_cb = NULL; TCP_UNREF(exec_ctx, tcp, "read"); GRPC_CLOSURE_SCHED(exec_ctx, cb, error); }
static grpc_endpoint_op_status win_read(grpc_endpoint *ep, gpr_slice_buffer *read_slices, grpc_iomgr_closure *cb) { grpc_tcp *tcp = (grpc_tcp *)ep; grpc_winsocket *handle = tcp->socket; grpc_winsocket_callback_info *info = &handle->read_info; int status; DWORD bytes_read = 0; DWORD flags = 0; WSABUF buffer; GPR_ASSERT(!tcp->socket->read_info.outstanding); if (tcp->shutting_down) { return GRPC_ENDPOINT_ERROR; } TCP_REF(tcp, "read"); tcp->socket->read_info.outstanding = 1; tcp->read_cb = cb; tcp->read_slices = read_slices; gpr_slice_buffer_reset_and_unref(read_slices); tcp->read_slice = gpr_slice_malloc(8192); buffer.len = GPR_SLICE_LENGTH(tcp->read_slice); buffer.buf = (char *)GPR_SLICE_START_PTR(tcp->read_slice); /* First let's try a synchronous, non-blocking read. */ status = WSARecv(tcp->socket->socket, &buffer, 1, &bytes_read, &flags, NULL, NULL); info->wsa_error = status == 0 ? 0 : WSAGetLastError(); /* Did we get data immediately ? Yay. */ if (info->wsa_error != WSAEWOULDBLOCK) { int ok; info->bytes_transfered = bytes_read; ok = on_read(tcp, 1); TCP_UNREF(tcp, "read"); return ok ? GRPC_ENDPOINT_DONE : GRPC_ENDPOINT_ERROR; } /* Otherwise, let's retry, by queuing a read. */ memset(&tcp->socket->read_info.overlapped, 0, sizeof(OVERLAPPED)); status = WSARecv(tcp->socket->socket, &buffer, 1, &bytes_read, &flags, &info->overlapped, NULL); if (status != 0) { int wsa_error = WSAGetLastError(); if (wsa_error != WSA_IO_PENDING) { int ok; info->wsa_error = wsa_error; ok = on_read(tcp, 1); return ok ? GRPC_ENDPOINT_DONE : GRPC_ENDPOINT_ERROR; } } grpc_socket_notify_on_read(tcp->socket, on_read_cb, tcp); return GRPC_ENDPOINT_PENDING; }
/* Asynchronous callback from the IOCP, or the background thread. */ static void on_read(grpc_exec_ctx *exec_ctx, void *tcpp, grpc_error *error) { grpc_tcp *tcp = tcpp; grpc_closure *cb = tcp->read_cb; grpc_winsocket *socket = tcp->socket; gpr_slice sub; grpc_winsocket_callback_info *info = &socket->read_info; GRPC_ERROR_REF(error); if (error == GRPC_ERROR_NONE) { if (info->wsa_error != 0 && !tcp->shutting_down) { char *utf8_message = gpr_format_message(info->wsa_error); error = GRPC_ERROR_CREATE(utf8_message); gpr_free(utf8_message); gpr_slice_unref(tcp->read_slice); } else { if (info->bytes_transfered != 0 && !tcp->shutting_down) { sub = gpr_slice_sub_no_ref(tcp->read_slice, 0, info->bytes_transfered); gpr_slice_buffer_add(tcp->read_slices, sub); } else { gpr_slice_unref(tcp->read_slice); error = GRPC_ERROR_CREATE("End of TCP stream"); } } } tcp->read_cb = NULL; TCP_UNREF(tcp, "read"); grpc_exec_ctx_sched(exec_ctx, cb, error, NULL); }
/* Asynchronous callback from the IOCP, or the background thread. */ static void on_write(grpc_exec_ctx *exec_ctx, void *tcpp, bool success) { grpc_tcp *tcp = (grpc_tcp *)tcpp; grpc_winsocket *handle = tcp->socket; grpc_winsocket_callback_info *info = &handle->write_info; grpc_closure *cb; gpr_mu_lock(&tcp->mu); cb = tcp->write_cb; tcp->write_cb = NULL; gpr_mu_unlock(&tcp->mu); if (success) { if (info->wsa_error != 0) { if (info->wsa_error != WSAECONNRESET) { char *utf8_message = gpr_format_message(info->wsa_error); gpr_log(GPR_ERROR, "WSASend overlapped error: %s", utf8_message); gpr_free(utf8_message); } success = 0; } else { GPR_ASSERT(info->bytes_transfered == tcp->write_slices->length); } } TCP_UNREF(tcp, "write"); cb->cb(exec_ctx, cb->cb_arg, success); }
/* Asynchronous callback from the IOCP, or the background thread. */ static void on_read(grpc_exec_ctx *exec_ctx, void *tcpp, bool success) { grpc_tcp *tcp = tcpp; grpc_closure *cb = tcp->read_cb; grpc_winsocket *socket = tcp->socket; gpr_slice sub; grpc_winsocket_callback_info *info = &socket->read_info; if (success) { if (socket->read_info.wsa_error != 0 && !tcp->shutting_down) { if (socket->read_info.wsa_error != WSAECONNRESET) { char *utf8_message = gpr_format_message(info->wsa_error); gpr_log(GPR_ERROR, "ReadFile overlapped error: %s", utf8_message); gpr_free(utf8_message); } success = 0; gpr_slice_unref(tcp->read_slice); } else { if (info->bytes_transfered != 0 && !tcp->shutting_down) { sub = gpr_slice_sub_no_ref(tcp->read_slice, 0, info->bytes_transfered); gpr_slice_buffer_add(tcp->read_slices, sub); success = 1; } else { gpr_slice_unref(tcp->read_slice); success = 0; } } } tcp->read_cb = NULL; TCP_UNREF(tcp, "read"); if (cb) { cb->cb(exec_ctx, cb->cb_arg, success); } }
void grpc_tcp_destroy_and_release_fd(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, int *fd, grpc_closure *done) { grpc_tcp *tcp = (grpc_tcp *)ep; GPR_ASSERT(ep->vtable == &vtable); tcp->release_fd = fd; tcp->release_fd_cb = done; TCP_UNREF(exec_ctx, tcp, "destroy"); }
static void on_read_cb(void *tcpp, int from_iocp) { grpc_tcp *tcp = tcpp; grpc_iomgr_closure *cb = tcp->read_cb; int success = on_read(tcp, from_iocp); tcp->read_cb = NULL; TCP_UNREF(tcp, "read"); if (cb) { cb->cb(cb->cb_arg, success); } }
void grpc_tcp_destroy_and_release_fd(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, int *fd, grpc_closure *done) { grpc_network_status_unregister_endpoint(ep); grpc_tcp *tcp = (grpc_tcp *)ep; GPR_ASSERT(ep->vtable == &vtable); tcp->release_fd = fd; tcp->release_fd_cb = done; grpc_slice_buffer_reset_and_unref(&tcp->last_read_buffer); TCP_UNREF(exec_ctx, tcp, "destroy"); }
static void tcp_handle_read(void *arg /* grpc_tcp */, int success) { grpc_tcp *tcp = (grpc_tcp *)arg; GPR_ASSERT(!tcp->finished_edge); if (!success) { gpr_slice_buffer_reset_and_unref(tcp->incoming_buffer); call_read_cb(tcp, 0); TCP_UNREF(tcp, "read"); } else { tcp_continue_read(tcp); } }
static void tcp_read_allocation_done(grpc_exec_ctx *exec_ctx, void *tcpp, grpc_error *error) { grpc_tcp *tcp = tcpp; if (error != GRPC_ERROR_NONE) { grpc_slice_buffer_reset_and_unref(tcp->incoming_buffer); grpc_slice_buffer_reset_and_unref(&tcp->last_read_buffer); call_read_cb(exec_ctx, tcp, GRPC_ERROR_REF(error)); TCP_UNREF(exec_ctx, tcp, "read"); } else { tcp_do_read(exec_ctx, tcp); } }
static void tcp_handle_read(grpc_exec_ctx *exec_ctx, void *arg /* grpc_tcp */, grpc_error *error) { grpc_tcp *tcp = (grpc_tcp *)arg; GPR_ASSERT(!tcp->finished_edge); if (error != GRPC_ERROR_NONE) { grpc_slice_buffer_reset_and_unref(tcp->incoming_buffer); grpc_slice_buffer_reset_and_unref(&tcp->last_read_buffer); call_read_cb(exec_ctx, tcp, GRPC_ERROR_REF(error)); TCP_UNREF(exec_ctx, tcp, "read"); } else { tcp_continue_read(exec_ctx, tcp); } }
static void read_callback(uv_stream_t *stream, ssize_t nread, const uv_buf_t *buf) { grpc_slice sub; grpc_error *error; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_tcp *tcp = stream->data; grpc_closure *cb = tcp->read_cb; if (nread == 0) { // Nothing happened. Wait for the next callback return; } TCP_UNREF(&exec_ctx, tcp, "read"); tcp->read_cb = NULL; // TODO(murgatroid99): figure out what the return value here means uv_read_stop(stream); if (nread == UV_EOF) { error = GRPC_ERROR_CREATE("EOF"); } else if (nread > 0) { // Successful read sub = grpc_slice_sub_no_ref(tcp->read_slice, 0, (size_t)nread); grpc_slice_buffer_add(tcp->read_slices, sub); error = GRPC_ERROR_NONE; if (grpc_tcp_trace) { size_t i; const char *str = grpc_error_string(error); gpr_log(GPR_DEBUG, "read: error=%s", str); for (i = 0; i < tcp->read_slices->count; i++) { char *dump = grpc_dump_slice(tcp->read_slices->slices[i], GPR_DUMP_HEX | GPR_DUMP_ASCII); gpr_log(GPR_DEBUG, "READ %p (peer=%s): %s", tcp, tcp->peer_string, dump); gpr_free(dump); } } } else { // nread < 0: Error error = GRPC_ERROR_CREATE("TCP Read failed"); } grpc_closure_sched(&exec_ctx, cb, error); grpc_exec_ctx_finish(&exec_ctx); }
static void write_callback(uv_write_t *req, int status) { grpc_tcp *tcp = req->data; grpc_error *error; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_closure *cb = tcp->write_cb; tcp->write_cb = NULL; TCP_UNREF(&exec_ctx, tcp, "write"); if (status == 0) { error = GRPC_ERROR_NONE; } else { error = GRPC_ERROR_CREATE("TCP Write failed"); } if (grpc_tcp_trace) { const char *str = grpc_error_string(error); gpr_log(GPR_DEBUG, "write complete on %p: error=%s", tcp, str); } gpr_free(tcp->write_buffers); grpc_resource_user_free(&exec_ctx, tcp->resource_user, sizeof(uv_buf_t) * tcp->write_slices->count); grpc_closure_sched(&exec_ctx, cb, error); grpc_exec_ctx_finish(&exec_ctx); }
/* Asynchronous callback from the IOCP, or the background thread. */ static void on_write(grpc_exec_ctx *exec_ctx, void *tcpp, grpc_error *error) { grpc_tcp *tcp = (grpc_tcp *)tcpp; grpc_winsocket *handle = tcp->socket; grpc_winsocket_callback_info *info = &handle->write_info; grpc_closure *cb; GRPC_ERROR_REF(error); gpr_mu_lock(&tcp->mu); cb = tcp->write_cb; tcp->write_cb = NULL; gpr_mu_unlock(&tcp->mu); if (error == GRPC_ERROR_NONE) { if (info->wsa_error != 0) { error = GRPC_WSA_ERROR(info->wsa_error, "WSASend"); } else { GPR_ASSERT(info->bytes_transfered == tcp->write_slices->length); } } TCP_UNREF(exec_ctx, tcp, "write"); GRPC_CLOSURE_SCHED(exec_ctx, cb, error); }
/* Initiates a write. */ static grpc_endpoint_op_status win_write(grpc_endpoint *ep, gpr_slice_buffer *slices, grpc_iomgr_closure *cb) { grpc_tcp *tcp = (grpc_tcp *)ep; grpc_winsocket *socket = tcp->socket; grpc_winsocket_callback_info *info = &socket->write_info; unsigned i; DWORD bytes_sent; int status; WSABUF local_buffers[16]; WSABUF *allocated = NULL; WSABUF *buffers = local_buffers; GPR_ASSERT(!tcp->socket->write_info.outstanding); if (tcp->shutting_down) { return GRPC_ENDPOINT_ERROR; } TCP_REF(tcp, "write"); tcp->socket->write_info.outstanding = 1; tcp->write_cb = cb; tcp->write_slices = slices; if (tcp->write_slices->count > GPR_ARRAY_SIZE(local_buffers)) { buffers = (WSABUF *)gpr_malloc(sizeof(WSABUF) * tcp->write_slices->count); allocated = buffers; } for (i = 0; i < tcp->write_slices->count; i++) { buffers[i].len = GPR_SLICE_LENGTH(tcp->write_slices->slices[i]); buffers[i].buf = (char *)GPR_SLICE_START_PTR(tcp->write_slices->slices[i]); } /* First, let's try a synchronous, non-blocking write. */ status = WSASend(socket->socket, buffers, tcp->write_slices->count, &bytes_sent, 0, NULL, NULL); info->wsa_error = status == 0 ? 0 : WSAGetLastError(); /* We would kind of expect to get a WSAEWOULDBLOCK here, especially on a busy connection that has its send queue filled up. But if we don't, then we can avoid doing an async write operation at all. */ if (info->wsa_error != WSAEWOULDBLOCK) { grpc_endpoint_op_status ret = GRPC_ENDPOINT_ERROR; if (status == 0) { ret = GRPC_ENDPOINT_DONE; GPR_ASSERT(bytes_sent == tcp->write_slices->length); } else { if (socket->read_info.wsa_error != WSAECONNRESET) { char *utf8_message = gpr_format_message(info->wsa_error); gpr_log(GPR_ERROR, "WSASend error: %s", utf8_message); gpr_free(utf8_message); } } if (allocated) gpr_free(allocated); tcp->socket->write_info.outstanding = 0; TCP_UNREF(tcp, "write"); return ret; } /* If we got a WSAEWOULDBLOCK earlier, then we need to re-do the same operation, this time asynchronously. */ memset(&socket->write_info.overlapped, 0, sizeof(OVERLAPPED)); status = WSASend(socket->socket, buffers, tcp->write_slices->count, &bytes_sent, 0, &socket->write_info.overlapped, NULL); if (allocated) gpr_free(allocated); if (status != 0) { int wsa_error = WSAGetLastError(); if (wsa_error != WSA_IO_PENDING) { tcp->socket->write_info.outstanding = 0; TCP_UNREF(tcp, "write"); return GRPC_ENDPOINT_ERROR; } } /* As all is now setup, we can now ask for the IOCP notification. It may trigger the callback immediately however, but no matter. */ grpc_socket_notify_on_write(socket, on_write, tcp); return GRPC_ENDPOINT_PENDING; }
static void uv_destroy(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) { grpc_network_status_unregister_endpoint(ep); grpc_tcp *tcp = (grpc_tcp *)ep; uv_close((uv_handle_t *)tcp->handle, uv_close_callback); TCP_UNREF(exec_ctx, tcp, "destroy"); }
static void win_destroy(grpc_endpoint *ep) { grpc_tcp *tcp = (grpc_tcp *)ep; TCP_UNREF(tcp, "destroy"); }
static void tcp_destroy(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) { grpc_tcp *tcp = (grpc_tcp *)ep; TCP_UNREF(exec_ctx, tcp, "destroy"); }
/* Initiates a write. */ static void win_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, grpc_slice_buffer *slices, grpc_closure *cb) { grpc_tcp *tcp = (grpc_tcp *)ep; grpc_winsocket *socket = tcp->socket; grpc_winsocket_callback_info *info = &socket->write_info; unsigned i; DWORD bytes_sent; int status; WSABUF local_buffers[16]; WSABUF *allocated = NULL; WSABUF *buffers = local_buffers; size_t len; if (tcp->shutting_down) { GRPC_CLOSURE_SCHED( exec_ctx, cb, GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( "TCP socket is shutting down", &tcp->shutdown_error, 1)); return; } tcp->write_cb = cb; tcp->write_slices = slices; GPR_ASSERT(tcp->write_slices->count <= UINT_MAX); if (tcp->write_slices->count > GPR_ARRAY_SIZE(local_buffers)) { buffers = (WSABUF *)gpr_malloc(sizeof(WSABUF) * tcp->write_slices->count); allocated = buffers; } for (i = 0; i < tcp->write_slices->count; i++) { len = GRPC_SLICE_LENGTH(tcp->write_slices->slices[i]); GPR_ASSERT(len <= ULONG_MAX); buffers[i].len = (ULONG)len; buffers[i].buf = (char *)GRPC_SLICE_START_PTR(tcp->write_slices->slices[i]); } /* First, let's try a synchronous, non-blocking write. */ status = WSASend(socket->socket, buffers, (DWORD)tcp->write_slices->count, &bytes_sent, 0, NULL, NULL); info->wsa_error = status == 0 ? 0 : WSAGetLastError(); /* We would kind of expect to get a WSAEWOULDBLOCK here, especially on a busy connection that has its send queue filled up. But if we don't, then we can avoid doing an async write operation at all. */ if (info->wsa_error != WSAEWOULDBLOCK) { grpc_error *error = status == 0 ? GRPC_ERROR_NONE : GRPC_WSA_ERROR(info->wsa_error, "WSASend"); GRPC_CLOSURE_SCHED(exec_ctx, cb, error); if (allocated) gpr_free(allocated); return; } TCP_REF(tcp, "write"); /* If we got a WSAEWOULDBLOCK earlier, then we need to re-do the same operation, this time asynchronously. */ memset(&socket->write_info.overlapped, 0, sizeof(OVERLAPPED)); status = WSASend(socket->socket, buffers, (DWORD)tcp->write_slices->count, &bytes_sent, 0, &socket->write_info.overlapped, NULL); if (allocated) gpr_free(allocated); if (status != 0) { int wsa_error = WSAGetLastError(); if (wsa_error != WSA_IO_PENDING) { TCP_UNREF(exec_ctx, tcp, "write"); GRPC_CLOSURE_SCHED(exec_ctx, cb, GRPC_WSA_ERROR(wsa_error, "WSASend")); return; } } /* As all is now setup, we can now ask for the IOCP notification. It may trigger the callback immediately however, but no matter. */ grpc_socket_notify_on_write(exec_ctx, socket, &tcp->on_write); }
static void tcp_destroy(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) { grpc_network_status_unregister_endpoint(ep); grpc_tcp *tcp = (grpc_tcp *)ep; grpc_slice_buffer_reset_and_unref(&tcp->last_read_buffer); TCP_UNREF(exec_ctx, tcp, "destroy"); }
static void tcp_do_read(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp) { struct msghdr msg; struct iovec iov[MAX_READ_IOVEC]; ssize_t read_bytes; size_t i; GPR_ASSERT(!tcp->finished_edge); GPR_ASSERT(tcp->iov_size <= MAX_READ_IOVEC); GPR_ASSERT(tcp->incoming_buffer->count <= MAX_READ_IOVEC); GPR_TIMER_BEGIN("tcp_continue_read", 0); for (i = 0; i < tcp->incoming_buffer->count; i++) { iov[i].iov_base = GRPC_SLICE_START_PTR(tcp->incoming_buffer->slices[i]); iov[i].iov_len = GRPC_SLICE_LENGTH(tcp->incoming_buffer->slices[i]); } msg.msg_name = NULL; msg.msg_namelen = 0; msg.msg_iov = iov; msg.msg_iovlen = tcp->iov_size; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_flags = 0; GPR_TIMER_BEGIN("recvmsg", 0); do { read_bytes = recvmsg(tcp->fd, &msg, 0); } while (read_bytes < 0 && errno == EINTR); GPR_TIMER_END("recvmsg", read_bytes >= 0); if (read_bytes < 0) { /* NB: After calling call_read_cb a parallel call of the read handler may * be running. */ if (errno == EAGAIN) { if (tcp->iov_size > 1) { tcp->iov_size /= 2; } /* We've consumed the edge, request a new one */ grpc_fd_notify_on_read(exec_ctx, tcp->em_fd, &tcp->read_closure); } else { grpc_slice_buffer_reset_and_unref(tcp->incoming_buffer); call_read_cb(exec_ctx, tcp, tcp_annotate_error(GRPC_OS_ERROR(errno, "recvmsg"), tcp)); TCP_UNREF(exec_ctx, tcp, "read"); } } else if (read_bytes == 0) { /* 0 read size ==> end of stream */ grpc_slice_buffer_reset_and_unref(tcp->incoming_buffer); call_read_cb(exec_ctx, tcp, tcp_annotate_error(GRPC_ERROR_CREATE("Socket closed"), tcp)); TCP_UNREF(exec_ctx, tcp, "read"); } else { GPR_ASSERT((size_t)read_bytes <= tcp->incoming_buffer->length); if ((size_t)read_bytes < tcp->incoming_buffer->length) { grpc_slice_buffer_trim_end( tcp->incoming_buffer, tcp->incoming_buffer->length - (size_t)read_bytes, &tcp->last_read_buffer); } else if (tcp->iov_size < MAX_READ_IOVEC) { ++tcp->iov_size; } GPR_ASSERT((size_t)read_bytes == tcp->incoming_buffer->length); call_read_cb(exec_ctx, tcp, GRPC_ERROR_NONE); TCP_UNREF(exec_ctx, tcp, "read"); } GPR_TIMER_END("tcp_continue_read", 0); }
static void win_unref_closure(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { TCP_UNREF(arg, "resource_user"); }
static void win_destroy(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) { grpc_network_status_unregister_endpoint(ep); grpc_tcp *tcp = (grpc_tcp *)ep; win_maybe_shutdown_resource_user(exec_ctx, tcp); TCP_UNREF(tcp, "destroy"); }
static void win_destroy(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) { grpc_network_status_unregister_endpoint(ep); grpc_tcp *tcp = (grpc_tcp *)ep; TCP_UNREF(exec_ctx, tcp, "destroy"); }
static void tcp_continue_read(grpc_tcp *tcp) { struct msghdr msg; struct iovec iov[MAX_READ_IOVEC]; ssize_t read_bytes; size_t i; GPR_ASSERT(!tcp->finished_edge); GPR_ASSERT(tcp->iov_size <= MAX_READ_IOVEC); GPR_ASSERT(tcp->incoming_buffer->count <= MAX_READ_IOVEC); GRPC_TIMER_BEGIN(GRPC_PTAG_HANDLE_READ, 0); while (tcp->incoming_buffer->count < (size_t)tcp->iov_size) { gpr_slice_buffer_add_indexed(tcp->incoming_buffer, gpr_slice_malloc(tcp->slice_size)); } for (i = 0; i < tcp->incoming_buffer->count; i++) { iov[i].iov_base = GPR_SLICE_START_PTR(tcp->incoming_buffer->slices[i]); iov[i].iov_len = GPR_SLICE_LENGTH(tcp->incoming_buffer->slices[i]); } msg.msg_name = NULL; msg.msg_namelen = 0; msg.msg_iov = iov; msg.msg_iovlen = tcp->iov_size; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_flags = 0; GRPC_TIMER_BEGIN(GRPC_PTAG_RECVMSG, 0); do { read_bytes = recvmsg(tcp->fd, &msg, 0); } while (read_bytes < 0 && errno == EINTR); GRPC_TIMER_END(GRPC_PTAG_RECVMSG, 0); if (read_bytes < 0) { /* NB: After calling call_read_cb a parallel call of the read handler may * be running. */ if (errno == EAGAIN) { if (tcp->iov_size > 1) { tcp->iov_size /= 2; } /* We've consumed the edge, request a new one */ grpc_fd_notify_on_read(tcp->em_fd, &tcp->read_closure); } else { /* TODO(klempner): Log interesting errors */ gpr_slice_buffer_reset_and_unref(tcp->incoming_buffer); call_read_cb(tcp, 0); TCP_UNREF(tcp, "read"); } } else if (read_bytes == 0) { /* 0 read size ==> end of stream */ gpr_slice_buffer_reset_and_unref(tcp->incoming_buffer); call_read_cb(tcp, 0); TCP_UNREF(tcp, "read"); } else { GPR_ASSERT((size_t)read_bytes <= tcp->incoming_buffer->length); if ((size_t)read_bytes < tcp->incoming_buffer->length) { gpr_slice_buffer_trim_end(tcp->incoming_buffer, tcp->incoming_buffer->length - read_bytes); } else if (tcp->iov_size < MAX_READ_IOVEC) { ++tcp->iov_size; } GPR_ASSERT((size_t)read_bytes == tcp->incoming_buffer->length); call_read_cb(tcp, 1); TCP_UNREF(tcp, "read"); } GRPC_TIMER_END(GRPC_PTAG_HANDLE_READ, 0); }