int ringbuffer_memcpy(ringbuffer_t *dst, ringbuffer_t *src, lcb_size_t nbytes) { ringbuffer_t copy = *src; struct lcb_iovec_st iov[2]; int ii = 0; lcb_size_t towrite = nbytes; lcb_size_t toread, nb; if (nbytes > ringbuffer_get_nbytes(src)) { /* EINVAL */ return -1; } if (!ringbuffer_ensure_capacity(dst, nbytes)) { /* Failed to allocate space */ return -1; } ringbuffer_get_iov(dst, RINGBUFFER_WRITE, iov); toread = minimum(iov[ii].iov_len, nbytes); do { assert(ii < 2); nb = ringbuffer_read(©, iov[ii].iov_base, toread); toread -= nb; towrite -= nb; ++ii; } while (towrite > 0); ringbuffer_produced(dst, nbytes); return 0; }
static int do_fill_input_buffer(libcouchbase_server_t *c) { struct libcouchbase_iovec_st iov[2]; libcouchbase_ssize_t nr; if (!ringbuffer_ensure_capacity(&c->input, 8192)) { libcouchbase_error_handler(c->instance, LIBCOUCHBASE_CLIENT_ENOMEM, NULL); return -1; } ringbuffer_get_iov(&c->input, RINGBUFFER_WRITE, iov); nr = c->instance->io->recvv(c->instance->io, c->sock, iov, 2); if (nr == -1) { switch (c->instance->io->error) { case EINTR: break; case EWOULDBLOCK: return 0; default: libcouchbase_failout_server(c, LIBCOUCHBASE_NETWORK_ERROR); return -1; } } else if (nr == 0) { assert((iov[0].iov_len + iov[1].iov_len) != 0); /* TODO stash error message somewhere * "Connection closed... we should resend to other nodes or reconnect!!" */ libcouchbase_failout_server(c, LIBCOUCHBASE_NETWORK_ERROR); return -1; } else { ringbuffer_produced(&c->input, (libcouchbase_size_t)nr); } return 1; }
static int do_send_data(libcouchbase_server_t *c) { do { struct libcouchbase_iovec_st iov[2]; libcouchbase_ssize_t nw; ringbuffer_get_iov(&c->output, RINGBUFFER_READ, iov); nw = c->instance->io->sendv(c->instance->io, c->sock, iov, 2); if (nw == -1) { switch (c->instance->io->error) { case EINTR: /* retry */ break; case EWOULDBLOCK: return 0; default: libcouchbase_failout_server(c, LIBCOUCHBASE_NETWORK_ERROR); return -1; } } else { ringbuffer_consumed(&c->output, (libcouchbase_size_t)nw); } } while (c->output.nbytes > 0); return 0; }
lcb_sockrw_status_t lcb_sockrw_v0_write(lcb_connection_t conn, ringbuffer_t *buf) { lcb_io_opt_t io = conn->io; while (buf->nbytes > 0) { struct lcb_iovec_st iov[2]; lcb_ssize_t nw; ringbuffer_get_iov(buf, RINGBUFFER_READ, iov); nw = io->v.v0.sendv(io, conn->sockfd, iov, 2); if (nw == -1) { switch (io->v.v0.error) { case EINTR: /* retry */ break; case EWOULDBLOCK: #ifdef USE_EAGAIN case EAGAIN: #endif return LCB_SOCKRW_WOULDBLOCK; default: return LCB_SOCKRW_IO_ERROR; } } else if (nw > 0) { ringbuffer_consumed(buf, (lcb_size_t)nw); } } return LCB_SOCKRW_WROTE; }
static void C_schedule(lcbio_CTX *ctx) { lcbio_TABLE *io = ctx->io; lcb_sockdata_t *sd = CTX_SD(ctx); int rv; if (ctx->output && ctx->output->rb.nbytes) { /** Schedule a write */ lcb_IOV iov[2]; unsigned niov; ringbuffer_get_iov(&ctx->output->rb, RINGBUFFER_READ, iov); niov = iov[1].iov_len ? 2 : 1; rv = IOT_V1(io).write2(IOT_ARG(io), sd, iov, niov, ctx->output, Cw_handler); if (rv) { lcbio_ctx_senderr(ctx, convert_lcberr(ctx, LCBIO_IOERR)); return; } else { ctx->output = NULL; ctx->npending++; } } if (ctx->wwant) { ctx->wwant = 0; ctx->procs.cb_flush_ready(ctx); } if (ctx->rdwant && sd->is_reading == 0) { lcb_IOV iov[RWINL_IOVSIZE]; unsigned ii; unsigned niov = rdb_rdstart(&ctx->ior, (nb_IOV *)iov, RWINL_IOVSIZE); assert(niov); for (ii = 0; ii < niov; ++ii) { assert(iov[ii].iov_len); } rv = IOT_V1(io).read2(IOT_ARG(io), sd, iov, niov, ctx, Cr_handler); if (rv) { lcbio_ctx_senderr(ctx, convert_lcberr(ctx, LCBIO_IOERR)); } else { sd->is_reading = 1; ctx->npending++; } } }
/** * Request a read of data into the buffer * @param conn the connection object * @param buf a ringbuffer structure. If the read request is successful, * the ringbuffer is destroyed. Its allocated data is owned by the IO plugin * for the duration of the operation. It may be restored via * ringbuffer_take_buffer once the operation has finished. */ lcb_sockrw_status_t lcb_sockrw_v1_start_read(lcb_connection_t conn, ringbuffer_t **buf, lcb_io_read_cb callback, lcb_io_error_cb error_callback) { int ret; lcb_io_opt_t io; struct lcb_buf_info *bi = &conn->sockptr->read_buffer; if (conn->sockptr->is_reading) { return LCB_SOCKRW_PENDING; } ringbuffer_ensure_capacity(*buf, conn->settings ? conn->settings->rbufsize : LCB_DEFAULT_RBUFSIZE); ringbuffer_get_iov(*buf, RINGBUFFER_WRITE, bi->iov); lcb_assert(bi->ringbuffer == NULL); lcb_assert(bi->root == NULL); bi->ringbuffer = *buf; bi->root = bi->ringbuffer->root; *buf = NULL; io = conn->io; ret = io->v.v1.start_read(io, conn->sockptr, callback); if (ret == 0) { conn->sockptr->is_reading = 1; return LCB_SOCKRW_PENDING; } else { *buf = bi->ringbuffer; memset(bi, 0, sizeof(*bi)); if (error_callback) { io->v.v1.send_error(io, conn->sockptr, error_callback); } } return LCB_SOCKRW_IO_ERROR; }
/** * Request that a write begin. * @param conn the connection object * @param buf a pointer to a ringbuffer_t*. If the write request is successful, * the IO system takes exclusive ownership of the buffer, and the contents * of *buf are zeroed. */ lcb_sockrw_status_t lcb_sockrw_v1_start_write(lcb_connection_t conn, ringbuffer_t **buf, lcb_io_write_cb callback, lcb_io_error_cb error_callback) { int ret; lcb_io_opt_t io; lcb_io_writebuf_t *wbuf; struct lcb_buf_info *bi; io = conn->io; wbuf = io->v.v1.create_writebuf(io, conn->sockptr); if (wbuf == NULL) { return LCB_SOCKRW_GENERIC_ERROR; } bi = &wbuf->buffer; bi->ringbuffer = *buf; bi->root = bi->ringbuffer->root; *buf = NULL; ringbuffer_get_iov(bi->ringbuffer, RINGBUFFER_READ, bi->iov); ret = io->v.v1.start_write(io, conn->sockptr, wbuf, callback); if (ret == 0) { return LCB_SOCKRW_PENDING; } else { *buf = bi->ringbuffer; memset(bi, 0, sizeof(*bi)); io->v.v1.release_writebuf(io, conn->sockptr, wbuf); lcb_assert(error_callback); io->v.v1.send_error(io, conn->sockptr, error_callback); return LCB_SOCKRW_IO_ERROR; } }
lcb_sockrw_status_t lcb_sockrw_v0_read(lcb_connection_t conn, ringbuffer_t *buf) { struct lcb_iovec_st iov[2]; lcb_ssize_t nr; lcb_io_opt_t io = conn->io; if (!ringbuffer_ensure_capacity(buf, conn->settings ? conn->settings->rbufsize : LCB_DEFAULT_RBUFSIZE)) { return LCB_SOCKRW_GENERIC_ERROR; } ringbuffer_get_iov(buf, RINGBUFFER_WRITE, iov); nr = io->v.v0.recvv(io, conn->sockfd, iov, 2); if (nr == -1) { switch (io->v.v0.error) { case EINTR: break; case EWOULDBLOCK: #ifdef USE_EAGAIN case EAGAIN: #endif return LCB_SOCKRW_WOULDBLOCK; default: return LCB_SOCKRW_IO_ERROR; return -1; } } else if (nr == 0) { lcb_assert((iov[0].iov_len + iov[1].iov_len) != 0); /* TODO stash error message somewhere * "Connection closed... we should resend to other nodes or reconnect!!" */ return LCB_SOCKRW_SHUTDOWN; } else { ringbuffer_produced(buf, (lcb_size_t)nr); } return LCB_SOCKRW_READ; }