static size_t copy_evbuffer(struct bufferevent * dst, const struct bufferevent * src) { int n, i; size_t written = 0; struct evbuffer_iovec *v; struct evbuffer_iovec quick_v[5];/* a vector with 5 elements is usually enough */ size_t maxlen = dst->wm_write.high - EVBUFFER_LENGTH(dst->output); maxlen = EVBUFFER_LENGTH(src->input)> maxlen?maxlen: EVBUFFER_LENGTH(src->input); n = evbuffer_peek(src->input, maxlen, NULL, NULL, 0); if (n>sizeof(quick_v)/sizeof(struct evbuffer_iovec)) v = malloc(sizeof(struct evbuffer_iovec)*n); else v = quick_v; n = evbuffer_peek(src->input, maxlen, NULL, v, n); for (i=0; i<n; ++i) { size_t len = v[i].iov_len; if (written + len > maxlen) len = maxlen - written; if (bufferevent_write(dst, v[i].iov_base, len)) break; /* We keep track of the bytes written separately; if we don't, * we may write more than we need if the last chunk puts * us over the limit. */ written += len; } if (n>sizeof(quick_v)/sizeof(struct evbuffer_iovec)) free(v); return written; }
static void ssle_readcb(struct bufferevent *bev, void *ctx) { SocketLibEvent* s = reinterpret_cast<SocketLibEvent*>(ctx); struct evbuffer *input = bufferevent_get_input(bev); SocketEvents* sev = s->sl_->sev; if (s->closing_) { auto n = evbuffer_get_length(input); evbuffer_drain(input, n); } else if (sev) { const int num_of_vecs = 16; evbuffer_iovec v[num_of_vecs]; while (evbuffer_get_length(input)) { int n = evbuffer_peek(input, -1, 0, v, num_of_vecs); if (n >= num_of_vecs) n = num_of_vecs; for (int k = 0; k < n; ++k) { auto p = reinterpret_cast<const char*>(v[k].iov_base); sev->onSocketRead(s, p, v[k].iov_len); evbuffer_drain(input, v[k].iov_len); } } } }
/* copy event buffer from source to destination as much as possible. * If parameter skip is not zero, copy will start from the number of skip bytes. */ size_t copy_evbuffer(struct bufferevent * dst, struct bufferevent * src, size_t skip) { int n, i; size_t written = 0; struct evbuffer_iovec *v; struct evbuffer_iovec quick_v[5];/* a vector with 5 elements is usually enough */ struct evbuffer * evbinput = bufferevent_get_input(src); size_t maxlen = get_write_hwm(dst) - evbuffer_get_length(bufferevent_get_output(dst)); maxlen = evbuffer_get_length(evbinput) - skip > maxlen ? maxlen: evbuffer_get_length(evbinput)-skip; n = evbuffer_peek(evbinput, maxlen+skip, NULL, NULL, 0); if (n > sizeof(quick_v)/sizeof(struct evbuffer_iovec)) v = malloc(sizeof(struct evbuffer_iovec)*n); else v = &quick_v[0]; n = evbuffer_peek(evbinput, maxlen+skip, NULL, v, n); for (i=0; i<n; ++i) { size_t len = v[i].iov_len; if (skip >= len) { skip -= len; continue; } else { len -= skip; } if (written + len > maxlen) len = maxlen - written; if (bufferevent_write(dst, v[i].iov_base+skip, len)) break; skip = 0; /* We keep track of the bytes written separately; if we don't, * we may write more than we need if the last chunk puts * us over the limit. */ written += len; } if (v != &quick_v[0]) free(v); return written; }
static enum bufferevent_filter_result zlib_output_filter(struct evbuffer *src, struct evbuffer *dst, ev_ssize_t lim, enum bufferevent_flush_mode state, void *ctx) { struct evbuffer_iovec v_in[1]; struct evbuffer_iovec v_out[1]; int nread, nwrite; int res, n; z_streamp p = ctx; do { /* let's do some compression */ n = evbuffer_peek(src, -1, NULL, v_in, 1); if (n) { p->avail_in = v_in[0].iov_len; p->next_in = v_in[0].iov_base; } else { p->avail_in = 0; p->next_in = 0; } evbuffer_reserve_space(dst, 4096, v_out, 1); p->next_out = v_out[0].iov_base; p->avail_out = v_out[0].iov_len; /* we need to flush zlib if we got a flush */ res = deflate(p, getstate(state)); /* let's figure out how much was decompressed */ nread = v_in[0].iov_len - p->avail_in; nwrite = v_out[0].iov_len - p->avail_out; evbuffer_drain(src, nread); v_out[0].iov_len = nwrite; evbuffer_commit_space(dst, v_out, 1); if (res==Z_BUF_ERROR) { /* We're out of space, or out of decodeable input. Only if nwrite == 0 assume the latter. */ if (nwrite == 0) return BEV_NEED_MORE; } else { assert(res == Z_OK || res == Z_STREAM_END); } } while (evbuffer_get_length(src) > 0); ++outfilter_calls; return (BEV_OK); }
static void pipe_bev_readable(struct bufferevent *bev, void *ctx) { struct pipe_data *data = ctx; int avail; struct evbuffer *buf; struct evbuffer_iovec vec_out; err_t ret; int wait_for_more = 0; u8_t apiflags; avail = tcp_sndbuf(data->pcb); if (!avail) { bufferevent_disable(bev, EV_READ); return; } buf = bufferevent_get_input(data->bev); if (avail < evbuffer_get_length(buf)) wait_for_more = 1; else if (avail > evbuffer_get_length(buf)) avail = evbuffer_get_length(buf); if (!avail) return; evbuffer_pullup(buf, avail); evbuffer_peek(buf, avail, NULL, &vec_out, 1); apiflags = TCP_WRITE_FLAG_COPY; if (wait_for_more) apiflags |= TCP_WRITE_FLAG_MORE; ret = tcp_write(data->pcb, vec_out.iov_base, avail, apiflags); if (ret < 0) { bufferevent_disable(bev, EV_READ); if (ret != ERR_MEM) { pipe_tcp_free(data); pipe_bev_flush(data); } } else { evbuffer_drain(buf, avail); if (wait_for_more) bufferevent_disable(bev, EV_READ); } }
uint64_t first_complete_message_size(const CNetworkConfig& config, evbuffer* input, bool& fComplete, bool& fBadMsgStart) { size_t nTotal = evbuffer_get_length(input); uint64_t nMessageSize; const int size_needed = config.header_msg_size_offset + config.header_msg_size_size; fComplete = false; fBadMsgStart = false; // Assume 4-bytes until there's a reason not to. assert(config.header_msg_size_size == 4); if (nTotal < static_cast<size_t>(size_needed)) return 0; evbuffer_iovec v; if (evbuffer_peek(input, size_needed, nullptr, &v, 1) == 1) { const unsigned char* ptr = static_cast<const unsigned char*>(v.iov_base); if (!config.message_start.empty() && memcmp(ptr, &config.message_start[0], config.message_start.size()) != 0) { fBadMsgStart = true; return 0; } nMessageSize = get_message_length(ptr + config.header_msg_size_offset) + config.header_size; } else { std::vector<unsigned char> partial_header(size_needed); int ret = evbuffer_copyout(input, &partial_header[0], size_needed); assert(ret == size_needed); (void)ret; if (!config.message_start.empty() && memcmp(&partial_header[0], &config.message_start[0], config.message_start.size()) != 0) { fBadMsgStart = true; return 0; } nMessageSize = get_message_length(&partial_header[0] + config.header_msg_size_offset) + config.header_size; } if (nTotal >= nMessageSize) fComplete = true; return nMessageSize; }
static void test_evbuffer_reserve2(void *ptr) { /* Test the two-vector cases of reserve/commit. */ struct evbuffer *buf = evbuffer_new(); int n, i; struct evbuffer_iovec v[2]; size_t remaining; char *cp, *cp2; /* First chunk will necessarily be one chunk. Use 512 bytes of it.*/ n = evbuffer_reserve_space(buf, 1024, v, 2); tt_int_op(n, ==, 1); tt_int_op(evbuffer_get_length(buf), ==, 0); tt_assert(v[0].iov_base != NULL); tt_int_op(v[0].iov_len, >=, 1024); memset(v[0].iov_base, 'X', 512); cp = v[0].iov_base; remaining = v[0].iov_len - 512; v[0].iov_len = 512; tt_int_op(0, ==, evbuffer_commit_space(buf, v, 1)); tt_int_op(evbuffer_get_length(buf), ==, 512); /* Ask for another same-chunk request, in an existing chunk. Use 8 * bytes of it. */ n = evbuffer_reserve_space(buf, 32, v, 2); tt_int_op(n, ==, 1); tt_assert(cp + 512 == v[0].iov_base); tt_int_op(remaining, ==, v[0].iov_len); memset(v[0].iov_base, 'Y', 8); v[0].iov_len = 8; tt_int_op(0, ==, evbuffer_commit_space(buf, v, 1)); tt_int_op(evbuffer_get_length(buf), ==, 520); remaining -= 8; /* Now ask for a request that will be split. Use only one byte of it, though. */ n = evbuffer_reserve_space(buf, remaining+64, v, 2); tt_int_op(n, ==, 2); tt_assert(cp + 520 == v[0].iov_base); tt_int_op(remaining, ==, v[0].iov_len); tt_assert(v[1].iov_base); tt_assert(v[1].iov_len >= 64); cp2 = v[1].iov_base; memset(v[0].iov_base, 'Z', 1); v[0].iov_len = 1; tt_int_op(0, ==, evbuffer_commit_space(buf, v, 1)); tt_int_op(evbuffer_get_length(buf), ==, 521); remaining -= 1; /* Now ask for a request that will be split. Use some of the first * part and some of the second. */ n = evbuffer_reserve_space(buf, remaining+64, v, 2); tt_int_op(n, ==, 2); tt_assert(cp + 521 == v[0].iov_base); tt_int_op(remaining, ==, v[0].iov_len); tt_assert(v[1].iov_base == cp2); tt_assert(v[1].iov_len >= 64); memset(v[0].iov_base, 'W', 400); v[0].iov_len = 400; memset(v[1].iov_base, 'x', 60); v[1].iov_len = 60; tt_int_op(0, ==, evbuffer_commit_space(buf, v, 2)); tt_int_op(evbuffer_get_length(buf), ==, 981); /* Now peek to make sure stuff got made how we like. */ memset(v,0,sizeof(v)); n = evbuffer_peek(buf, -1, NULL, v, 2); tt_int_op(n, ==, 2); tt_int_op(v[0].iov_len, ==, 921); tt_int_op(v[1].iov_len, ==, 60); cp = v[0].iov_base; for (i=0; i<512; ++i) tt_int_op(cp[i], ==, 'X'); for (i=512; i<520; ++i) tt_int_op(cp[i], ==, 'Y'); for (i=520; i<521; ++i) tt_int_op(cp[i], ==, 'Z'); for (i=521; i<921; ++i) tt_int_op(cp[i], ==, 'W'); cp = v[1].iov_base; for (i=0; i<60; ++i) tt_int_op(cp[i], ==, 'x'); end: evbuffer_free(buf); }
void TcpTransport::readNextMessageIntCallback(struct bufferevent *bev, void *ctx) { /* This callback is invoked when there is data to read on bev. */ // protocol: <length> <header length> <header data> <body data> // 1 2 3 4 // rocketmq protocol contains 4 parts as following: // 1¡¢big endian 4 bytes int, its length is sum of 2,3 and 4 // 2¡¢big endian 4 bytes int, its length is 3 // 3¡¢use json to serialization data // 4¡¢application could self-defination binary data struct evbuffer *input = bufferevent_get_input(bev); while (1) { struct evbuffer_iovec v[4]; int n = evbuffer_peek(input, 4, NULL, v, sizeof(v) / sizeof(v[0])); int idx = 0; char hdr[4]; char *p = hdr; unsigned int needed = 4; for (idx = 0; idx < n; idx++) { if (needed) { unsigned int tmp = needed < v[idx].iov_len ? needed : v[idx].iov_len; memcpy(p, v[idx].iov_base, tmp); p += tmp; needed -= tmp; } else { break; } } if (needed) { LOG_DEBUG(" too little data received with sum = %d ", 4 - needed); return; } uint32 totalLenOfOneMsg = *(uint32 *)hdr; // first 4 bytes, which indicates 1st part of protocol uint32 bytesInMessage = ntohl(totalLenOfOneMsg); LOG_DEBUG("fd:%d, totalLen:" SIZET_FMT ", bytesInMessage:%d", bufferevent_getfd(bev), v[0].iov_len, bytesInMessage); uint32 len = evbuffer_get_length(input); if (len >= bytesInMessage + 4) { LOG_DEBUG("had received all data with len:%d from fd:%d", len, bufferevent_getfd(bev)); } else { LOG_DEBUG( "didn't received whole bytesInMessage:%d, from fd:%d, totalLen:%d", bytesInMessage, bufferevent_getfd(bev), len); return; // consider large data which was not received completely by now } if (bytesInMessage > 0) { MemoryBlock messageData(bytesInMessage, true); uint32 bytesRead = 0; char *data = messageData.getData() + bytesRead; bufferevent_read(bev, data, 4); bytesRead = bufferevent_read(bev, data, bytesInMessage); TcpTransport *tcpTrans = (TcpTransport *)ctx; tcpTrans->messageReceived(messageData); } } }