bool SocketManager<B>::read_bytes(B &pkt_buf, size_t bytes) { size_t window, pkt_buf_idx = 0; // while data still needs to be read while (bytes) { // how much data is available window = rbuf.buf_size - rbuf.buf_ptr; if (bytes <= window) { pkt_buf.insert(std::end(pkt_buf), std::begin(rbuf.buf) + rbuf.buf_ptr, std::begin(rbuf.buf) + rbuf.buf_ptr + bytes); // move the pointer rbuf.buf_ptr += bytes; // move pkt_buf_idx as well pkt_buf_idx += bytes; return true; } else { // read what is available for non-trivial window if (window > 0) { pkt_buf.insert(std::end(pkt_buf), std::begin(rbuf.buf) + rbuf.buf_ptr, std::begin(rbuf.buf) + rbuf.buf_size); // update bytes leftover bytes -= window; // update pkt_buf_idx pkt_buf_idx += window; } // refill buffer, reset buf ptr here if (!refill_read_buffer()) { // nothing more to read, end return false; } } } return true; }
std::tuple<aku_Status, size_t> CombineGroupAggregateOperator::copy_to(aku_Timestamp* desttx, AggregationResult* destxs, size_t size) { aku_Status status = AKU_SUCCESS; size_t copied = 0; while (status == AKU_SUCCESS && size > 0) { size_t n = elements_in_rdbuf(); if (iter_index_ != iter_.size()) { if (n < 2) { status = refill_read_buffer(); continue; } // We can copy last element of the rdbuf_ to the output only if all // iterators were consumed! Otherwise invariant will be broken. n--; } else { if (n == 0) { break; } } auto tocopy = std::min(n, size); // Copy elements for (size_t i = 0; i < tocopy; i++) { auto const& bottom = rdbuf_.at(rdpos_); rdpos_++; const bool forward = dir_ == Direction::FORWARD; aku_Timestamp bin = forward ? (bottom._begin - begin_) / step_ : (begin_ - bottom._begin) / step_; *desttx++ = forward ? begin_ + (step_ * bin) : begin_ - (step_ * bin); *destxs++ = bottom; size--; } copied += tocopy; } return std::make_tuple(status, copied); }