/** * Flush the write buffers */ void lcb_luv_flush(lcb_luv_socket_t sock) { int status; struct lcb_luv_evstate_st *evstate; if (sock->write.nb == 0) { return; } evstate = EVSTATE_FIND(sock, WRITE); if (EVSTATE_IS(evstate, FLUSHING)) { log_write_info("Not flushing because we are in the middle of a flush"); return; } sock->write.buf.base = sock->write.data; sock->write.buf.len = sock->write.nb; log_write_debug("Will flush"); status = uv_write(&sock->u_req.write, (uv_stream_t *)&sock->tcp, &sock->write.buf, 1, write_cb); lcb_luv_socket_ref(sock); if (status) { evstate->err = lcb_luv_errno_map((uv_last_error(sock->parent->loop)).code); } evstate->flags |= LCB_LUV_EVf_FLUSHING; }
/** * Deliver an asynchronous 'write-ready' notification to libcouchbase. * This will invoke the normal callback chains.. * * So how this works is rather complicated. It is used primarily for * write-readiness (i.e. to let libcouchbase put data into our socket buffer). * * If requested from within the callback (i.e. async_cb itself), then we need * to heuristically decide what exactly lcb will do. * * If it's a simple write event (i.e. actually copying the data between buffers) * then our buffer will eventually become full and this function will fail * to set the async_redo flag. * * The case is different in connect though: while a connect-readiness * notification is a write event, it doesn't actually fill the socket with * anything, so there is the possibility of recursion. * * Furthermore, connect-'readiness' is an actual event in uv, so there is no * need for this readiness emulation. */ void lcb_luv_send_async_write_ready(lcb_luv_socket_t sock) { if (sock->async_entered) { /** * Doing extra checks here to ensure we don't end up inside a busy * loop. */ struct lcb_luv_evstate_st *wev = EVSTATE_FIND(sock, WRITE); struct lcb_luv_evstate_st *cev = EVSTATE_FIND(sock, CONNECT); if (!EVSTATE_IS(cev, CONNECTED)) { log_loop_debug("Not iterating again for phony write event"); return; } if (EVSTATE_IS(wev, FLUSHING)) { log_loop_debug("Not requesting second iteration. " "Already inside a flush"); return; } if (sock->write.nb >= sizeof(sock->write.data)) { log_loop_debug("Not enough space to write.."); return; } sock->async_redo = 1; return; } if (sock->async_active) { log_loop_trace("prep_active is true"); return; } log_loop_debug("Will try and schedule prepare callback for %d", sock->idx); lcb_luv_socket_ref(sock); uv_async_send(&sock->async); sock->async_active = 1; }
static lcb_ssize_t write_common(lcb_luv_socket_t sock, const void *buf, size_t len, int *errno_out) { lcb_ssize_t ret; struct lcb_luv_evstate_st *evstate = EVSTATE_FIND(sock, WRITE); log_write_debug("%d: Requested to write %d bytes from %p", sock->idx, len, buf); if (evstate->err) { log_write_warn("Socket has pending error %d", evstate->err); *errno_out = evstate->err; evstate->err = 0; return -1; } if (EVSTATE_IS(evstate, FLUSHING)) { log_write_info("Will not write because we are inside a flush"); *errno_out = EWOULDBLOCK; return -1; } ret = MINIMUM(len, sizeof(sock->write.data) - sock->write.nb); if (ret == 0) { log_write_info("We have no more space inside the buffer"); *errno_out = EWOULDBLOCK; return -1; } memcpy(sock->write.data + sock->write.pos, buf, ret); // lcb_luv_hexdump(sock->write.data + sock->write.pos, ret); sock->write.pos += ret; sock->write.nb += ret; log_write_trace("Returning %d", ret); return ret; }