size_t ringbuf_memset(ringbuf_t dst, int c, size_t len) { const uint8_t* bufend = ringbuf_end(dst); size_t nwritten = 0; size_t count = LWIP_MIN(len, ringbuf_buffer_size(dst)); int overflow = count > ringbuf_bytes_free(dst); while (nwritten != count) { lwIP_ASSERT(bufend > dst->head); size_t n = LWIP_MIN(bufend - dst->head, count - nwritten); os_memset(dst->head, c, n); dst->head += n; nwritten += n; if (dst->head == bufend) { dst->head = dst->buf; } } if (overflow) { dst->tail = ringbuf_nextp(dst, dst->head); lwIP_ASSERT(ringbuf_is_full(dst)); } return nwritten; }
void* ringbuf_memcpy_into(ringbuf_t dst, const void* src, size_t count) { const uint8_t* u8src = src; const uint8_t* bufend = ringbuf_end(dst); int overflow = count > ringbuf_bytes_free(dst); size_t nread = 0; while (nread != count) { lwIP_ASSERT(bufend > dst->head); size_t n = LWIP_MIN(bufend - dst->head, count - nread); memcpy(dst->head, u8src + nread, n); dst->head += n; nread += n; if (dst->head == bufend) { dst->head = dst->buf; } } if (overflow) { dst->tail = ringbuf_nextp(dst, dst->head); lwIP_ASSERT(ringbuf_is_full(dst)); } return dst->head; }
void * ringbuf_memcpy_into(ringbuf_t dst, const void *src, size_t count) { const uint8_t *u8src = src; const uint8_t *bufend = ringbuf_end(dst); int overflow = count > ringbuf_bytes_free(dst); size_t nread = 0; while (nread != count) { /* don't copy beyond the end of the buffer */ assert(bufend > dst->head); size_t n = MIN(bufend - dst->head, count - nread); memcpy(dst->head, u8src + nread, n); dst->head += n; nread += n; /* wrap? */ if (dst->head == bufend) dst->head = dst->buf; } if (overflow) { dst->tail = ringbuf_nextp(dst, dst->head); assert(ringbuf_is_full(dst)); } return dst->head; }
ssize_t ringbuf_read(int fd, ringbuf_t rb, size_t count) { const uint8_t *bufend = ringbuf_end(rb); size_t nfree = ringbuf_bytes_free(rb); /* don't write beyond the end of the buffer */ assert(bufend > rb->head); count = MIN(bufend - rb->head, count); ssize_t n = read(fd, rb->head, count); if (n > 0) { assert(rb->head + n <= bufend); rb->head += n; /* wrap? */ if (rb->head == bufend) rb->head = rb->buf; /* fix up the tail pointer if an overflow occurred */ if (n > nfree) { rb->tail = ringbuf_nextp(rb, rb->head); assert(ringbuf_is_full(rb)); } } return n; }
//Curl pthread void * curl_thread(void * ptr){ file_ctx * ctx = (file_ctx*) ptr; char range[512]; size_t free = 0; while(1){ pthread_mutex_lock(&ctx->writemutex); if(ctx->abort_curl || ctx->thread_done){ pthread_mutex_unlock(&ctx->writemutex); if(ctx->thread_done) return 0;//shutting down thread usleep(100); continue;//let read reset offset } //find out how much data we want to fill buffers free = ringbuf_bytes_free(ctx->rb); if( free == 0 ) continue; //buffer full //limit range to filesize //sprintf(range,"%u-%u", ctx->offset , ctx->offset+MIN(free,ctx->filesize-ctx->offset) -1); /* just request offset to end of file - can waste network bandwidth if a non seqential read happenss */ sprintf(range,"%u-%u", ctx->offset , ctx->filesize -1); fprintf(stderr, "CURL START - range: %s, remaining:%u\n", range, ctx->filesize-ctx->offset); if(ctx->curl){ curl_easy_setopt(ctx->curl, CURLOPT_RANGE, range); curl_easy_perform(ctx->curl); } pthread_mutex_unlock(&ctx->writemutex); } }
size_t ringbuf_memset(ringbuf_t dst, int c, size_t len) { const uint8_t *bufend = ringbuf_end(dst); size_t nwritten = 0; size_t count = MIN(len, ringbuf_buffer_size(dst)); int overflow = count > ringbuf_bytes_free(dst); while (nwritten != count) { /* don't copy beyond the end of the buffer */ assert(bufend > dst->head); size_t n = MIN(bufend - dst->head, count - nwritten); memset(dst->head, c, n); dst->head += n; nwritten += n; /* wrap? */ if (dst->head == bufend) dst->head = dst->buf; } if (overflow) { dst->tail = ringbuf_nextp(dst, dst->head); assert(ringbuf_is_full(dst)); } return nwritten; }
size_t curl_callback(char *ptr, size_t size, size_t nmemb, void *userdata){ file_ctx * ctx = (file_ctx*)userdata; size_t amount = (size*nmemb); fprintf(stderr,"CURL: %u\n", amount); if(ctx->abort_curl==1){ fprintf(stderr, "CURL ABORT\n"); return 0; } while(ringbuf_bytes_free(ctx->rb) < amount){ usleep(100); //block till we have space } ringbuf_memcpy_into(ctx->rb, ptr, amount); ctx->offset+=amount; return amount; }
void * ringbuf_copy(ringbuf_t dst, ringbuf_t src, size_t count) { size_t src_bytes_used = ringbuf_bytes_used(src); if (count > src_bytes_used) return 0; int overflow = count > ringbuf_bytes_free(dst); const uint8_t *src_bufend = ringbuf_end(src); const uint8_t *dst_bufend = ringbuf_end(dst); size_t ncopied = 0; while (ncopied != count) { assert(src_bufend > src->tail); size_t nsrc = MIN(src_bufend - src->tail, count - ncopied); assert(dst_bufend > dst->head); size_t n = MIN(dst_bufend - dst->head, nsrc); memcpy(dst->head, src->tail, n); src->tail += n; dst->head += n; ncopied += n; /* wrap ? */ if (src->tail == src_bufend) src->tail = src->buf; if (dst->head == dst_bufend) dst->head = dst->buf; } assert(count + ringbuf_bytes_used(src) == src_bytes_used); if (overflow) { dst->tail = ringbuf_nextp(dst, dst->head); assert(ringbuf_is_full(dst)); } return dst->head; }
int ringbuf_is_full(const struct ringbuf_t* rb) { return ringbuf_bytes_free(rb) == 0; }
size_t ringbuf_bytes_used(const struct ringbuf_t* rb) { return ringbuf_capacity(rb) - ringbuf_bytes_free(rb); }
int ringbuf_is_empty(const struct ringbuf_t* rb) { return ringbuf_bytes_free(rb) == ringbuf_capacity(rb); }
int ICACHE_FLASH_ATTR ringbuf_is_empty(const struct ringbuf_t *rb) { return ringbuf_bytes_free(rb) == ringbuf_capacity(rb); }
int ICACHE_FLASH_ATTR ringbuf_is_full(const struct ringbuf_t *rb) { return ringbuf_bytes_free(rb) == 0; }
size_t ICACHE_FLASH_ATTR ringbuf_bytes_used(const struct ringbuf_t *rb) { return ringbuf_capacity(rb) - ringbuf_bytes_free(rb); }
void echo_cb(EV_P_ ev_io *w_, int revents) { log(LOG_DEBUG, "echo_cb called"); echo_io *w = (echo_io *) w_; msg_buf *buf = &w->buf; if (revents & EV_WRITE) { log(LOG_DEBUG, "echo_cb write event"); bool buf_is_full = ringbuf_is_full(buf->rb); while (buf->msg_len) { ssize_t n = ringbuf_write(w->io.fd, buf->rb, buf->msg_len); if (n == -1) { if ((errno == EAGAIN) || (errno == EWOULDBLOCK) || (errno == EINTR)) break; else { log(LOG_ERR, "Write on descriptor %d failed: %m", w->io.fd); stop_echo_watcher(EV_A_ w); return; } } else { buf->msg_len -= n; w->timeout.last_activity = ev_now(EV_A); log(LOG_DEBUG, "echo_cb %zd bytes written", n); /* * Re-enable reads if they're paused due to buffer * pressure. */ if (buf_is_full && !w->half_closed) { log(LOG_DEBUG, "echo_cb re-starting reads."); reset_echo_watcher(EV_A_ &w->io, EV_READ | EV_WRITE); buf_is_full = false; } } } if (buf->msg_len == 0) { size_t eol = ringbuf_findchr(buf->rb, MSG_DELIMITER, buf->search_offset); if (eol < ringbuf_bytes_used(buf->rb)) { buf->search_offset = 0; buf->msg_len = eol + 1; } else { if (w->half_closed) stop_echo_watcher(EV_A_ w); else { buf->search_offset = eol; reset_echo_watcher(EV_A_ &w->io, EV_READ); } } } } if (revents & EV_READ) { log(LOG_DEBUG, "echo_cb read event"); size_t nread = 0; while (ringbuf_bytes_free(buf->rb)) { ssize_t n = ringbuf_read(w->io.fd, buf->rb, ringbuf_bytes_free(buf->rb)); if (n == 0) { /* EOF: drain remaining writes or close connection */ log(LOG_DEBUG, "echo_cb EOF received"); w->timeout.last_activity = ev_now(EV_A); if (buf->msg_len) { w->half_closed = true; reset_echo_watcher(EV_A_ &w->io, EV_WRITE); } else stop_echo_watcher(EV_A_ w); return; } else if (n == -1) { if ((errno == EAGAIN) || (errno == EWOULDBLOCK) || (errno == EINTR)) { /* Nothing more to read for now. */ return; } else { log(LOG_ERR, "Read on descriptor %d failed: %m", w->io.fd); stop_echo_watcher(EV_A_ w); return; } } else { nread += n; w->timeout.last_activity = ev_now(EV_A); log(LOG_DEBUG, "echo_cb %zd bytes read", n); /* * If there's no pending message to send, look for a * new one. If found, enable writes. */ if (buf->msg_len == 0) { size_t eol = ringbuf_findchr(buf->rb, MSG_DELIMITER, buf->search_offset); if (eol < ringbuf_bytes_used(buf->rb)) { buf->search_offset = 0; buf->msg_len = eol + 1; reset_echo_watcher(EV_A_ &w->io, EV_WRITE | EV_READ); } else buf->search_offset = eol; } } } /* * If we get here, the buffer is full. If there's a pending * message waiting to be written, disable reads until the * writes free up space. If there's no pending message, we've * overflowed. */ if (buf->msg_len) { log(LOG_DEBUG, "echo_cb buffer full, disabling reads on fd %d.", w->io.fd); reset_echo_watcher(EV_A_ &w->io, EV_WRITE); } else { log(LOG_WARNING, "Read overflow on descriptor %d.", w->io.fd); stop_echo_watcher(EV_A_ w); } } }