void * ringbuf_memcpy_from(void *dst, ringbuf_t src, size_t count, bool destroy) { size_t bytes_used = ringbuf_bytes_used(src); if (count > bytes_used) return 0; uint8_t *u8dst = dst; const uint8_t *bufend = ringbuf_end(src); uint8_t *tail = src->tail; size_t nwritten = 0; while (nwritten != count) { assert(bufend > src->tail); size_t n = MIN(bufend - src->tail, count - nwritten); memcpy(u8dst + nwritten, src->tail, n); src->tail += n; nwritten += n; /* wrap ? */ if (src->tail == bufend) src->tail = src->buf; } if (!destroy) { src->tail = tail; assert(ringbuf_bytes_used(src) == bytes_used); } else { assert(count + ringbuf_bytes_used(src) == bytes_used); } return src->tail; }
void* ringbuf_memcpy_from(void* dst, ringbuf_t src, size_t count) { size_t bytes_used = ringbuf_bytes_used(src); if (count > bytes_used) { return NULL; } const uint8_t* u8dst = dst; const uint8_t* bufend = ringbuf_end(src); size_t nwritten = 0; while (nwritten != count) { lwIP_ASSERT(bufend > src->tail); size_t n = LWIP_MIN(bufend - src->tail, count - nwritten); memcpy((uint8_t*)u8dst + nwritten, src->tail, n); src->tail += n; nwritten += n; if (src->tail == bufend) { src->tail = src->buf; } } lwIP_ASSERT(count + ringbuf_bytes_used(src) == bytes_used); return src->tail; }
void * ringbuf_copy(ringbuf_t dst, ringbuf_t src, size_t count) { size_t src_bytes_used = ringbuf_bytes_used(src); if (count > src_bytes_used) return 0; int overflow = count > ringbuf_bytes_free(dst); const uint8_t *src_bufend = ringbuf_end(src); const uint8_t *dst_bufend = ringbuf_end(dst); size_t ncopied = 0; while (ncopied != count) { assert(src_bufend > src->tail); size_t nsrc = MIN(src_bufend - src->tail, count - ncopied); assert(dst_bufend > dst->head); size_t n = MIN(dst_bufend - dst->head, nsrc); memcpy(dst->head, src->tail, n); src->tail += n; dst->head += n; ncopied += n; /* wrap ? */ if (src->tail == src_bufend) src->tail = src->buf; if (dst->head == dst_bufend) dst->head = dst->buf; } assert(count + ringbuf_bytes_used(src) == src_bytes_used); if (overflow) { dst->tail = ringbuf_nextp(dst, dst->head); assert(ringbuf_is_full(dst)); } return dst->head; }
static int acd_read(const char *path, char *buf, size_t size, off_t offset, struct fuse_file_info *fi) { size_t len; file_ctx * ctx = (file_ctx *) fi->fh; pthread_mutex_lock(&ctx->readmutex); //used for async reads - prevents two reads at same time //check if read is backwards. size_t bufoffset = (ctx->offset-ringbuf_bytes_used(ctx->rb)); //file offset of buffer //if buffer is ahead of read || read is ahead of buffer if( offset != bufoffset){ //non continuouse read ctx->abort_curl=1;//stop curl callbacks ringbuf_reset(ctx->rb); //empty current buffer pthread_mutex_lock(&ctx->writemutex); //lock the write thread fprintf(stderr,"READ Reset: http offset: %u, buffered: %u, read offset: %u\n", ctx->offset, ringbuf_bytes_used(ctx->rb), offset); ctx->offset=offset; //set new offset for http reads ringbuf_reset(ctx->rb); //empty current buffer ctx->abort_curl=0; //allow callbacks pthread_mutex_unlock(&ctx->writemutex); } fprintf(stderr, "READ: %s, size:%u, off:%u\n", path, size, offset); size = MIN(size,ctx->filesize-offset); //dont underflow buffer while( ringbuf_bytes_used(ctx->rb) < size){ //fprintf(stderr, ".");//block till we have enough data usleep(100); } //copy accross data ringbuf_memcpy_from(buf, ctx->rb, size); pthread_mutex_unlock(&ctx->readmutex); return size; }
ssize_t ringbuf_write(int fd, ringbuf_t rb, size_t count) { size_t bytes_used = ringbuf_bytes_used(rb); if (count > bytes_used) return 0; const uint8_t *bufend = ringbuf_end(rb); assert(bufend > rb->head); count = MIN(bufend - rb->tail, count); ssize_t n = write(fd, rb->tail, count); if (n > 0) { assert(rb->tail + n <= bufend); rb->tail += n; /* wrap? */ if (rb->tail == bufend) rb->tail = rb->buf; assert(n + ringbuf_bytes_used(rb) == bytes_used); } return n; }
size_t ringbuf_findchr(const struct ringbuf_t *rb, int c, size_t offset) { const uint8_t *bufend = ringbuf_end(rb); size_t bytes_used = ringbuf_bytes_used(rb); if (offset >= bytes_used) return bytes_used; const uint8_t *start = rb ->buf + (((rb->tail - rb->buf) + offset) % ringbuf_buffer_size(rb)); lwIP_ASSERT0(bufend > start); size_t n = LWIP_MIN(bufend - start, bytes_used - offset); const uint8_t *found = (const uint8_t *)memchr(start, c, n); if (found) return offset + (found - start); else return ringbuf_findchr(rb, c, offset + n); }
void echo_cb(EV_P_ ev_io *w_, int revents) { log(LOG_DEBUG, "echo_cb called"); echo_io *w = (echo_io *) w_; msg_buf *buf = &w->buf; if (revents & EV_WRITE) { log(LOG_DEBUG, "echo_cb write event"); bool buf_is_full = ringbuf_is_full(buf->rb); while (buf->msg_len) { ssize_t n = ringbuf_write(w->io.fd, buf->rb, buf->msg_len); if (n == -1) { if ((errno == EAGAIN) || (errno == EWOULDBLOCK) || (errno == EINTR)) break; else { log(LOG_ERR, "Write on descriptor %d failed: %m", w->io.fd); stop_echo_watcher(EV_A_ w); return; } } else { buf->msg_len -= n; w->timeout.last_activity = ev_now(EV_A); log(LOG_DEBUG, "echo_cb %zd bytes written", n); /* * Re-enable reads if they're paused due to buffer * pressure. */ if (buf_is_full && !w->half_closed) { log(LOG_DEBUG, "echo_cb re-starting reads."); reset_echo_watcher(EV_A_ &w->io, EV_READ | EV_WRITE); buf_is_full = false; } } } if (buf->msg_len == 0) { size_t eol = ringbuf_findchr(buf->rb, MSG_DELIMITER, buf->search_offset); if (eol < ringbuf_bytes_used(buf->rb)) { buf->search_offset = 0; buf->msg_len = eol + 1; } else { if (w->half_closed) stop_echo_watcher(EV_A_ w); else { buf->search_offset = eol; reset_echo_watcher(EV_A_ &w->io, EV_READ); } } } } if (revents & EV_READ) { log(LOG_DEBUG, "echo_cb read event"); size_t nread = 0; while (ringbuf_bytes_free(buf->rb)) { ssize_t n = ringbuf_read(w->io.fd, buf->rb, ringbuf_bytes_free(buf->rb)); if (n == 0) { /* EOF: drain remaining writes or close connection */ log(LOG_DEBUG, "echo_cb EOF received"); w->timeout.last_activity = ev_now(EV_A); if (buf->msg_len) { w->half_closed = true; reset_echo_watcher(EV_A_ &w->io, EV_WRITE); } else stop_echo_watcher(EV_A_ w); return; } else if (n == -1) { if ((errno == EAGAIN) || (errno == EWOULDBLOCK) || (errno == EINTR)) { /* Nothing more to read for now. */ return; } else { log(LOG_ERR, "Read on descriptor %d failed: %m", w->io.fd); stop_echo_watcher(EV_A_ w); return; } } else { nread += n; w->timeout.last_activity = ev_now(EV_A); log(LOG_DEBUG, "echo_cb %zd bytes read", n); /* * If there's no pending message to send, look for a * new one. If found, enable writes. */ if (buf->msg_len == 0) { size_t eol = ringbuf_findchr(buf->rb, MSG_DELIMITER, buf->search_offset); if (eol < ringbuf_bytes_used(buf->rb)) { buf->search_offset = 0; buf->msg_len = eol + 1; reset_echo_watcher(EV_A_ &w->io, EV_WRITE | EV_READ); } else buf->search_offset = eol; } } } /* * If we get here, the buffer is full. If there's a pending * message waiting to be written, disable reads until the * writes free up space. If there's no pending message, we've * overflowed. */ if (buf->msg_len) { log(LOG_DEBUG, "echo_cb buffer full, disabling reads on fd %d.", w->io.fd); reset_echo_watcher(EV_A_ &w->io, EV_WRITE); } else { log(LOG_WARNING, "Read overflow on descriptor %d.", w->io.fd); stop_echo_watcher(EV_A_ w); } } }