/* Free buffer space by moving the handle struct right before the useful part of its data buffer or by moving all the data. */ static void shrink_handle(struct memory_handle *h) { size_t delta; if (!h) return; if (h->next && h->filerem == 0 && (h->type == TYPE_ID3 || h->type == TYPE_CUESHEET || h->type == TYPE_BITMAP || h->type == TYPE_CODEC || h->type == TYPE_ATOMIC_AUDIO)) { /* metadata handle: we can move all of it */ uintptr_t handle_distance = ringbuf_sub(ringbuf_offset(h->next), h->data); delta = handle_distance - h->available; /* The value of delta might change for alignment reasons */ if (!move_handle(&h, &delta, h->available, h->type==TYPE_CODEC)) return; size_t olddata = h->data; h->data = ringbuf_add(h->data, delta); h->ridx = ringbuf_add(h->ridx, delta); h->widx = ringbuf_add(h->widx, delta); if (h->type == TYPE_ID3 && h->filesize == sizeof(struct mp3entry)) { /* when moving an mp3entry we need to readjust its pointers. */ adjust_mp3entry((struct mp3entry *)&buffer[h->data], (void *)&buffer[h->data], (const void *)&buffer[olddata]); } else if (h->type == TYPE_BITMAP) { /* adjust the bitmap's pointer */ struct bitmap *bmp = (struct bitmap *)&buffer[h->data]; bmp->data = &buffer[h->data + sizeof(struct bitmap)]; } } else { /* only move the handle struct */ delta = ringbuf_sub(h->ridx, h->data); if (!move_handle(&h, &delta, 0, true)) return; h->data = ringbuf_add(h->data, delta); h->start = ringbuf_add(h->start, delta); h->available -= delta; h->offset += delta; } }
void c_runstats2(int nrb, int nd, int step, int ofs, double *data, double *dmean, double *dstd, double *dmin, double *dmax, double *dmed, double *dptile5, double *dptile95, int *nsorted, int *ng) { int i, j; int npad = (nrb - 1) / 2; ringbuf_t *rb_ptr; data += ofs; dmean += ofs; dstd += ofs; dmin += ofs; dmax += ofs; dmed += ofs; dptile5 += ofs; dptile95 += ofs; nsorted += ofs; ng += ofs; rb_ptr = new_ringbuf(nrb); for (i = 0; i < npad; i++) { nsorted[i*step] = ringbuf_add(rb_ptr, data[i*step]); } for (j=0; j<nd; i++, j++) { if (i < nd) {nsorted[j*step] = ringbuf_add(rb_ptr, data[i*step]);} else {nsorted[j*step] = ringbuf_add(rb_ptr, NaN);} dmean[j*step] = ringbuf_mean(rb_ptr); dstd[j*step] = ringbuf_sd(rb_ptr); dmin[j*step] = ringbuf_min(rb_ptr); dmax[j*step] = ringbuf_max(rb_ptr); dmed[j*step] = ringbuf_median(rb_ptr); dptile5[j*step] = ringbuf_ptile(rb_ptr, 0.05); dptile95[j*step] = ringbuf_ptile(rb_ptr, 0.95); ng[j*step] = rb_ptr->N_good; } delete_ringbuf(rb_ptr); }
/* Set reading index in handle (relatively to the start of the file). Access before the available data will trigger a rebuffer. Return 0 for success and < 0 for failure: -1 if the handle wasn't found -2 if the new requested position was beyond the end of the file */ int bufseek(int handle_id, size_t newpos) { struct memory_handle *h = find_handle(handle_id); if (!h) return ERR_HANDLE_NOT_FOUND; if (newpos > h->filesize) { /* access beyond the end of the file */ return ERR_INVALID_VALUE; } else if (newpos < h->offset || h->offset + h->available < newpos) { /* access before or after buffered data. A rebuffer is needed. */ rebuffer_handle(handle_id, newpos); } else { h->ridx = ringbuf_add(h->data, newpos - h->offset); } return 0; }
void c_runstats(int nrb, int nd, double *data, double *dmean, double *dstd, double *dmin, double *dmax, double *dmed, double *dptile5, double *dptile95, int *nsorted, int *ng) { int i, j; ringbuf_t *rb_ptr; rb_ptr = new_ringbuf(nrb); for (j=0; j<nd; i++, j++) { nsorted[j] = ringbuf_add(rb_ptr, data[j]); dmean[j] = ringbuf_mean(rb_ptr); dstd[j] = ringbuf_sd(rb_ptr); dmin[j] = ringbuf_min(rb_ptr); dmax[j] = ringbuf_max(rb_ptr); dmed[j] = ringbuf_median(rb_ptr); dptile5[j] = ringbuf_ptile(rb_ptr, 0.05); dptile95[j] = ringbuf_ptile(rb_ptr, 0.95); ng[j] = rb_ptr->N_good; } delete_ringbuf(rb_ptr); }
/* Reset writing position and data buffer of a handle to its current offset. Use this after having set the new offset to use. */ static void reset_handle(int handle_id) { size_t alignment_pad; logf("reset_handle(%d)", handle_id); struct memory_handle *h = find_handle(handle_id); if (!h) return; /* Align to desired storage alignment */ alignment_pad = STORAGE_OVERLAP(h->offset - (size_t)(&buffer[h->start])); h->ridx = h->widx = h->data = ringbuf_add(h->start, alignment_pad); if (h == cur_handle) buf_widx = h->widx; h->available = 0; h->filerem = h->filesize - h->offset; if (h->fd >= 0) { lseek(h->fd, h->offset, SEEK_SET); } }
static void window_jumplist_add(Win *win, size_t pos) { Mark mark = text_mark_set(win->file->text, pos); if (mark && win->jumplist) ringbuf_add(win->jumplist, mark); }
int brl_add_history(brl_t *brl, const char *str) { return ringbuf_add(&brl->h, str); }
/* Reserve space in the buffer for a file. filename: name of the file to open offset: offset at which to start buffering the file, useful when the first (offset-1) bytes of the file aren't needed. type: one of the data types supported (audio, image, cuesheet, others user_data: user data passed possibly passed in subcalls specific to a data_type (only used for image (albumart) buffering so far ) return value: <0 if the file cannot be opened, or one file already queued to be opened, otherwise the handle for the file in the buffer */ int bufopen(const char *file, size_t offset, enum data_type type, void *user_data) { #ifndef HAVE_ALBUMART /* currently only used for aa loading */ (void)user_data; #endif if (type == TYPE_ID3) { /* ID3 case: allocate space, init the handle and return. */ struct memory_handle *h = add_handle(sizeof(struct mp3entry), false, true); if (!h) return ERR_BUFFER_FULL; h->fd = -1; h->filesize = sizeof(struct mp3entry); h->filerem = sizeof(struct mp3entry); h->offset = 0; h->data = buf_widx; h->ridx = buf_widx; h->widx = buf_widx; h->available = 0; h->type = type; strlcpy(h->path, file, MAX_PATH); buf_widx += sizeof(struct mp3entry); /* safe because the handle can't wrap */ /* Inform the buffering thread that we added a handle */ LOGFQUEUE("buffering > Q_HANDLE_ADDED %d", h->id); queue_post(&buffering_queue, Q_HANDLE_ADDED, h->id); return h->id; } /* Other cases: there is a little more work. */ int fd = open(file, O_RDONLY); if (fd < 0) return ERR_FILE_ERROR; size_t size = filesize(fd); bool can_wrap = type==TYPE_PACKET_AUDIO || type==TYPE_CODEC; size_t adjusted_offset = offset; if (adjusted_offset > size) adjusted_offset = 0; /* Reserve extra space because alignment can move data forward */ size_t padded_size = STORAGE_PAD(size-adjusted_offset); struct memory_handle *h = add_handle(padded_size, can_wrap, false); if (!h) { DEBUGF("%s(): failed to add handle\n", __func__); close(fd); return ERR_BUFFER_FULL; } strlcpy(h->path, file, MAX_PATH); h->offset = adjusted_offset; /* Don't bother to storage align bitmaps because they are not * loaded directly into the buffer. */ if (type != TYPE_BITMAP) { size_t alignment_pad; /* Remember where data area starts, for use by reset_handle */ h->start = buf_widx; /* Align to desired storage alignment */ alignment_pad = STORAGE_OVERLAP(adjusted_offset - (size_t)(&buffer[buf_widx])); buf_widx = ringbuf_add(buf_widx, alignment_pad); } h->ridx = buf_widx; h->widx = buf_widx; h->data = buf_widx; h->available = 0; h->filerem = 0; h->type = type; #ifdef HAVE_ALBUMART if (type == TYPE_BITMAP) { /* Bitmap file: we load the data instead of the file */ int rc; mutex_lock(&llist_mod_mutex); /* Lock because load_bitmap yields */ rc = load_image(fd, file, (struct dim*)user_data); mutex_unlock(&llist_mod_mutex); if (rc <= 0) { rm_handle(h); close(fd); return ERR_FILE_ERROR; } h->filerem = 0; h->filesize = rc; h->available = rc; h->widx = buf_widx + rc; /* safe because the data doesn't wrap */ buf_widx += rc; /* safe too */ } else #endif { h->filerem = size - adjusted_offset; h->filesize = size; h->available = 0; h->widx = buf_widx; } if (type == TYPE_CUESHEET) { h->fd = fd; /* Immediately start buffering those */ LOGFQUEUE("buffering >| Q_BUFFER_HANDLE %d", h->id); queue_send(&buffering_queue, Q_BUFFER_HANDLE, h->id); } else { /* Other types will get buffered in the course of normal operations */ h->fd = -1; close(fd); /* Inform the buffering thread that we added a handle */ LOGFQUEUE("buffering > Q_HANDLE_ADDED %d", h->id); queue_post(&buffering_queue, Q_HANDLE_ADDED, h->id); } logf("bufopen: new hdl %d", h->id); return h->id; }
/* Buffer data for the given handle. Return whether or not the buffering should continue explicitly. */ static bool buffer_handle(int handle_id) { logf("buffer_handle(%d)", handle_id); struct memory_handle *h = find_handle(handle_id); bool stop = false; if (!h) return true; if (h->filerem == 0) { /* nothing left to buffer */ return true; } if (h->fd < 0) /* file closed, reopen */ { if (*h->path) h->fd = open(h->path, O_RDONLY); if (h->fd < 0) { /* could not open the file, truncate it where it is */ h->filesize -= h->filerem; h->filerem = 0; return true; } if (h->offset) lseek(h->fd, h->offset, SEEK_SET); } trigger_cpu_boost(); if (h->type == TYPE_ID3) { if (!get_metadata((struct mp3entry *)(buffer + h->data), h->fd, h->path)) { /* metadata parsing failed: clear the buffer. */ memset(buffer + h->data, 0, sizeof(struct mp3entry)); } close(h->fd); h->fd = -1; h->filerem = 0; h->available = sizeof(struct mp3entry); h->widx += sizeof(struct mp3entry); send_event(BUFFER_EVENT_FINISHED, &h->id); return true; } while (h->filerem > 0 && !stop) { /* max amount to copy */ size_t copy_n = MIN( MIN(h->filerem, BUFFERING_DEFAULT_FILECHUNK), buffer_len - h->widx); ssize_t overlap; uintptr_t next_handle = ringbuf_offset(h->next); /* stop copying if it would overwrite the reading position */ if (ringbuf_add_cross(h->widx, copy_n, buf_ridx) >= 0) return false; /* FIXME: This would overwrite the next handle * If this is true, then there's a handle even though we have still * data to buffer. This should NEVER EVER happen! (but it does :( ) */ if (h->next && (overlap = ringbuf_add_cross(h->widx, copy_n, next_handle)) > 0) { /* stop buffering data for now and post-pone buffering the rest */ stop = true; DEBUGF( "%s(): Preventing handle corruption: h1.id:%d h2.id:%d" " copy_n:%lu overlap:%ld h1.filerem:%lu\n", __func__, h->id, h->next->id, (unsigned long)copy_n, overlap, (unsigned long)h->filerem); copy_n -= overlap; } /* rc is the actual amount read */ int rc = read(h->fd, &buffer[h->widx], copy_n); if (rc < 0) { /* Some kind of filesystem error, maybe recoverable if not codec */ if (h->type == TYPE_CODEC) { logf("Partial codec"); break; } DEBUGF("File ended %ld bytes early\n", (long)h->filerem); h->filesize -= h->filerem; h->filerem = 0; break; } /* Advance buffer */ h->widx = ringbuf_add(h->widx, rc); if (h == cur_handle) buf_widx = h->widx; h->available += rc; h->filerem -= rc; /* If this is a large file, see if we need to break or give the codec * more time */ if (h->type == TYPE_PACKET_AUDIO && pcmbuf_is_lowdata() && !buffer_is_low()) { sleep(1); } else { yield(); } if (!queue_empty(&buffering_queue)) break; } if (h->filerem == 0) { /* finished buffering the file */ close(h->fd); h->fd = -1; send_event(BUFFER_EVENT_FINISHED, &h->id); } return !stop; }
/* Move a memory handle and data_size of its data delta bytes along the buffer. delta maximum bytes available to move the handle. If the move is performed it is set to the actual distance moved. data_size is the amount of data to move along with the struct. returns a valid memory_handle if the move is successful NULL if the handle is NULL, the move would be less than the size of a memory_handle after correcting for wraps or if the handle is not found in the linked list for adjustment. This function has no side effects if NULL is returned. */ static bool move_handle(struct memory_handle **h, size_t *delta, size_t data_size, bool can_wrap) { struct memory_handle *dest; const struct memory_handle *src; int32_t *here; int32_t *there; int32_t *end; int32_t *begin; size_t final_delta = *delta, size_to_move, n; uintptr_t oldpos, newpos; intptr_t overlap, overlap_old; if (h == NULL || (src = *h) == NULL) return false; size_to_move = sizeof(struct memory_handle) + data_size; /* Align to four bytes, down */ final_delta &= ~3; if (final_delta < sizeof(struct memory_handle)) { /* It's not legal to move less than the size of the struct */ return false; } mutex_lock(&llist_mutex); mutex_lock(&llist_mod_mutex); oldpos = ringbuf_offset(src); newpos = ringbuf_add(oldpos, final_delta); overlap = ringbuf_add_cross(newpos, size_to_move, buffer_len - 1); overlap_old = ringbuf_add_cross(oldpos, size_to_move, buffer_len -1); if (overlap > 0) { /* Some part of the struct + data would wrap, maybe ok */ size_t correction = 0; /* If the overlap lands inside the memory_handle */ if (!can_wrap) { /* Otherwise the overlap falls in the data area and must all be * backed out. This may become conditional if ever we move * data that is allowed to wrap (ie audio) */ correction = overlap; } else if ((uintptr_t)overlap > data_size) { /* Correct the position and real delta to prevent the struct from * wrapping, this guarantees an aligned delta, I think */ correction = overlap - data_size; } if (correction) { /* Align correction to four bytes up */ correction = (correction + 3) & ~3; if (final_delta < correction + sizeof(struct memory_handle)) { /* Delta cannot end up less than the size of the struct */ mutex_unlock(&llist_mod_mutex); mutex_unlock(&llist_mutex); return false; } newpos -= correction; overlap -= correction;/* Used below to know how to split the data */ final_delta -= correction; } } dest = (struct memory_handle *)(&buffer[newpos]); if (src == first_handle) { first_handle = dest; buf_ridx = newpos; } else { struct memory_handle *m = first_handle; while (m && m->next != src) { m = m->next; } if (m && m->next == src) { m->next = dest; } else { mutex_unlock(&llist_mod_mutex); mutex_unlock(&llist_mutex); return false; } } /* Update the cache to prevent it from keeping the old location of h */ if (src == cached_handle) cached_handle = dest; /* the cur_handle pointer might need updating */ if (src == cur_handle) cur_handle = dest; /* Copying routine takes into account that the handles have a * distance between each other which is a multiple of four. Faster 2 word * copy may be ok but do this for safety and because wrapped copies should * be fairly uncommon */ here = (int32_t *)((ringbuf_add(oldpos, size_to_move - 1) & ~3)+ (intptr_t)buffer); there =(int32_t *)((ringbuf_add(newpos, size_to_move - 1) & ~3)+ (intptr_t)buffer); end = (int32_t *)(( intptr_t)buffer + buffer_len - 4); begin =(int32_t *)buffer; n = (size_to_move & ~3)/4; if ( overlap_old > 0 || overlap > 0 ) { /* Old or moved handle wraps */ while (n--) { if (here < begin) here = end; if (there < begin) there = end; *there-- = *here--; } } else { /* both handles do not wrap */ memmove(dest,src,size_to_move); } /* Update the caller with the new location of h and the distance moved */ *h = dest; *delta = final_delta; mutex_unlock(&llist_mod_mutex); mutex_unlock(&llist_mutex); return dest; }
/* Add a new handle to the linked list and return it. It will have become the new current handle. data_size must contain the size of what will be in the handle. can_wrap tells us whether this type of data may wrap on buffer alloc_all tells us if we must immediately be able to allocate data_size returns a valid memory handle if all conditions for allocation are met. NULL if there memory_handle itself cannot be allocated or if the data_size cannot be allocated and alloc_all is set. This function's only potential side effect is to allocate space for the cur_handle if it returns NULL. */ static struct memory_handle *add_handle(size_t data_size, bool can_wrap, bool alloc_all) { /* gives each handle a unique id */ static int cur_handle_id = 0; size_t shift; size_t new_widx; size_t len; int overlap; if (num_handles >= BUF_MAX_HANDLES) return NULL; mutex_lock(&llist_mutex); mutex_lock(&llist_mod_mutex); if (cur_handle && cur_handle->filerem > 0) { /* the current handle hasn't finished buffering. We can only add a new one if there is already enough free space to finish the buffering. */ size_t req = cur_handle->filerem + sizeof(struct memory_handle); if (ringbuf_add_cross(cur_handle->widx, req, buf_ridx) >= 0) { /* Not enough space */ mutex_unlock(&llist_mod_mutex); mutex_unlock(&llist_mutex); return NULL; } else { /* Allocate the remainder of the space for the current handle */ buf_widx = ringbuf_add(cur_handle->widx, cur_handle->filerem); } } /* align to 4 bytes up */ new_widx = ringbuf_add(buf_widx, 3) & ~3; len = data_size + sizeof(struct memory_handle); /* First, will the handle wrap? */ /* If the handle would wrap, move to the beginning of the buffer, * or if the data must not but would wrap, move it to the beginning */ if( (new_widx + sizeof(struct memory_handle) > buffer_len) || (!can_wrap && (new_widx + len > buffer_len)) ) { new_widx = 0; } /* How far we shifted buf_widx to align things, must be < buffer_len */ shift = ringbuf_sub(new_widx, buf_widx); /* How much space are we short in the actual ring buffer? */ overlap = ringbuf_add_cross(buf_widx, shift + len, buf_ridx); if (overlap >= 0 && (alloc_all || (unsigned)overlap > data_size)) { /* Not enough space for required allocations */ mutex_unlock(&llist_mod_mutex); mutex_unlock(&llist_mutex); return NULL; } /* There is enough space for the required data, advance the buf_widx and * initialize the struct */ buf_widx = new_widx; struct memory_handle *new_handle = (struct memory_handle *)(&buffer[buf_widx]); /* only advance the buffer write index of the size of the struct */ buf_widx = ringbuf_add(buf_widx, sizeof(struct memory_handle)); new_handle->id = cur_handle_id; /* Wrap signed int is safe and 0 doesn't happen */ cur_handle_id = (cur_handle_id + 1) & BUF_HANDLE_MASK; new_handle->next = NULL; num_handles++; if (!first_handle) /* the new handle is the first one */ first_handle = new_handle; if (cur_handle) cur_handle->next = new_handle; cur_handle = new_handle; mutex_unlock(&llist_mod_mutex); mutex_unlock(&llist_mutex); return new_handle; }
// // addsamples needs to maintain the same function signature as addbuffered // int addsamples(MIXER_SOURCE_T *pSource, const int16_t *pSamples, unsigned int numSamples, unsigned int channels, u_int64_t tsHz, int vad, int *pvad, int lock) { int rc = 0; unsigned int idx = 0; int was_set; int overwrote = 0; if(numSamples <= 0) { return 0; } #if defined(DEBUG_MIXER_TIMING) && (DEBUG_MIXER_TIMING > 0) if(pSource->id==1) LOG(X_DEBUG("---mixer addsamples sourceid:%d numSamples:%d (in src buffer:%d) tsHz:%lldHz (clockoffset:%lldHz, discardoffset:%lldHz) (%.3fs), buf.tsHz:%llu (haveTsHz:%d, active:%d)"), pSource->id, numSamples, pSource->buf.numSamples, tsHz, pSource->clockOffset, pSource->discardedSamplesOffset, (double)(tsHz + pSource->clockOffset)/pSource->clockHz, pSource->buf.tsHz, pSource->buf.haveTsHz, pSource->active); #endif // (DEBUG_MIXER_TIMING) && (DEBUG_MIXER_TIMING > 0) if(lock) { pthread_mutex_lock(&pSource->buf.mtx); } if(!pSource->active) { was_set = pSource->buf.haveTsHz; // pSource->buf.haveTsHz will be set to 0 here source_reset(pSource, !lock); pSource->active = 1; if(was_set) { LOG(X_DEBUG("mixer setting sourceid:%d active tsHz:%lluHz)"), pSource->id, !pSource->buf.haveTsHz ? tsHz : pSource->buf.tsHz); } #if defined(DEBUG_MIXER_TIMING) && (DEBUG_MIXER_TIMING > 0) LOG(X_DEBUG("source_reset done, haveTsHz:%d buf.tsHz:%lluHz, tsHz:%lluHz, source.numS:%d"), pSource->buf.haveTsHz, pSource->buf.tsHz, tsHz, pSource->buf.numSamples); mixer_dumpLog(S_DEBUG, pSource->pMixer, !lock, "src reset"); #endif // (DEBUG_MIXER_TIMING) && (DEBUG_MIXER_TIMING > 0) } if(!pSource->buf.haveTsHz) { // // Set tsHz as the time of the first stored sample in the buffer // pSource->buf.tsHz = tsHz; pSource->buf.haveTsHz = 1; } else if(tsHz < pSource->buf.tsHz) { // // We likely previously skipped these input samples as they were late arriving and have already // been replaced with null noise // idx += (pSource->buf.tsHz - tsHz); LOG(X_WARNING("mixer_addsamples sourceid:%d clipping first %d (max:%d) samples %lld < %lld"), pSource->id, idx, numSamples, tsHz, pSource->buf.tsHz); #if defined(DEBUG_MIXER_TIMING) && (DEBUG_MIXER_TIMING > 0) mixer_dumpLog(S_DEBUG, pSource->pMixer, !lock, "addsamples clipping"); #endif // (DEBUG_MIXER_TIMING) && (DEBUG_MIXER_TIMING > 0) } // // Copy the input samples into the ring buffer // if(numSamples > idx) { rc = numSamples - idx; if((overwrote = ringbuf_add(&pSource->buf, &pSamples[idx], numSamples - idx)) > 0) { pSource->buf.tsHz += overwrote; LOG(X_WARNING("mixer_addsamples sourceid:%d overwrote %d, rdidx:%d, wridx:%d / %d. Will reset source."), pSource->id, overwrote, pSource->buf.samplesRdIdx, pSource->buf.samplesWrIdx, pSource->buf.samplesSz); pSource->active = 0; pSource->needInputReset = 1; } #if defined(DEBUG_MIXER_TIMING) && (DEBUG_MIXER_TIMING > 0) if(pSource->id==1) LOG(X_DEBUG("---mixer ringbuf_add was called with %d - %d samples. buf.tsHz:%lluHz (tsHz:%lluHz), buf.numSamples:%d"), numSamples, idx, pSource->buf.tsHz, tsHz, pSource->buf.numSamples); #endif // (DEBUG_MIXER_TIMING) && (DEBUG_MIXER_TIMING > 0) } if(lock) { pthread_mutex_unlock(&pSource->buf.mtx); } return rc; }