// Runs in the cache thread static void cache_execute_control(struct priv *s) { uint64_t old_pos = stream_tell(s->stream); s->control_flush = false; switch (s->control) { case STREAM_CTRL_SET_CACHE_SIZE: s->control_res = resize_cache(s, *(int64_t *)s->control_arg); break; default: s->control_res = stream_control(s->stream, s->control, s->control_arg); } bool pos_changed = old_pos != stream_tell(s->stream); bool ok = s->control_res == STREAM_OK; if (pos_changed && !ok) { MP_ERR(s, "STREAM_CTRL changed stream pos but " "returned error, this is not allowed!\n"); } else if (pos_changed || (ok && control_needs_flush(s->control))) { MP_VERBOSE(s, "Dropping cache due to control()\n"); s->read_filepos = stream_tell(s->stream); s->control_flush = true; cache_drop_contents(s); } update_cached_controls(s); s->control = CACHE_CTRL_NONE; pthread_cond_signal(&s->wakeup); }
// This is called both during init and at runtime. static int resize_cache(struct priv *s, int64_t size) { int64_t min_size = FILL_LIMIT * 4; int64_t max_size = ((size_t)-1) / 4; int64_t buffer_size = MPMIN(MPMAX(size, min_size), max_size); unsigned char *buffer = malloc(buffer_size); struct byte_meta *bm = calloc(buffer_size / BYTE_META_CHUNK_SIZE + 2, sizeof(struct byte_meta)); if (!buffer || !bm) { free(buffer); free(bm); return STREAM_ERROR; } if (s->buffer) { // Copy & free the old ringbuffer data. // If the buffer is too small, prefer to copy these regions: // 1. Data starting from read_filepos, until cache end size_t read_1 = read_buffer(s, buffer, buffer_size, s->read_filepos); // 2. then data from before read_filepos until cache start // (this one needs to be copied to the end of the ringbuffer) size_t read_2 = 0; if (s->min_filepos < s->read_filepos) { size_t copy_len = buffer_size - read_1; copy_len = MPMIN(copy_len, s->read_filepos - s->min_filepos); assert(copy_len + read_1 <= buffer_size); read_2 = read_buffer(s, buffer + buffer_size - copy_len, copy_len, s->read_filepos - copy_len); // This shouldn't happen, unless copy_len was computed incorrectly. assert(read_2 == copy_len); } // Set it up such that read_1 is at buffer pos 0, and read_2 wraps // around below it, so that it is located at the end of the buffer. s->min_filepos = s->read_filepos - read_2; s->max_filepos = s->read_filepos + read_1; s->offset = s->max_filepos - read_1; } else { cache_drop_contents(s); } free(s->buffer); free(s->bm); s->buffer_size = buffer_size; s->back_size = buffer_size / 2; s->buffer = buffer; s->bm = bm; s->idle = false; s->eof = false; //make sure that we won't wait from cache_fill //more data than it is allowed to fill if (s->seek_limit > s->buffer_size - FILL_LIMIT) s->seek_limit = s->buffer_size - FILL_LIMIT; return STREAM_OK; }
// Runs in the cache thread static void cache_execute_control(struct priv *s) { uint64_t old_pos = stream_tell(s->stream); s->control_res = stream_control(s->stream, s->control, s->control_arg); s->control_flush = false; bool pos_changed = old_pos != stream_tell(s->stream); bool ok = s->control_res == STREAM_OK; if (pos_changed && !ok) { mp_msg(MSGT_STREAM, MSGL_ERR, "STREAM_CTRL changed stream pos but " "returned error, this is not allowed!\n"); } else if (pos_changed || (ok && control_needs_flush(s->control))) { mp_msg(MSGT_CACHE, MSGL_V, "Dropping cache due to control()\n"); s->read_filepos = stream_tell(s->stream); s->control_flush = true; cache_drop_contents(s); } s->control = CACHE_CTRL_NONE; pthread_cond_signal(&s->wakeup); }
// return 1 on success, 0 if the cache is disabled/not needed, and -1 on error // or if the cache is disabled int stream_cache_init(stream_t *cache, stream_t *stream, struct mp_cache_opts *opts) { if (opts->size < 1) return 0; struct priv *s = talloc_zero(NULL, struct priv); s->log = cache->log; s->eof_pos = -1; cache_drop_contents(s); s->seek_limit = opts->seek_min * 1024ULL; s->back_size = opts->back_buffer * 1024ULL; int64_t cache_size = opts->size * 1024ULL; int64_t file_size = stream_get_size(stream); if (file_size >= 0) cache_size = MPMIN(cache_size, file_size); if (resize_cache(s, cache_size) != STREAM_OK) { MP_ERR(s, "Failed to allocate cache buffer.\n"); talloc_free(s); return -1; } MP_VERBOSE(cache, "Cache size set to %lld KiB (%lld KiB backbuffer)\n", (long long)(s->buffer_size / 1024), (long long)(s->back_size / 1024)); pthread_mutex_init(&s->mutex, NULL); pthread_cond_init(&s->wakeup, NULL); cache->priv = s; s->cache = cache; s->stream = stream; cache->seek = cache_seek; cache->fill_buffer = cache_fill_buffer; cache->control = cache_control; cache->close = cache_uninit; int64_t min = opts->initial * 1024ULL; if (min > s->buffer_size - FILL_LIMIT) min = s->buffer_size - FILL_LIMIT; s->seekable = stream->seekable; if (pthread_create(&s->cache_thread, NULL, cache_thread, s) != 0) { MP_ERR(s, "Starting cache thread failed.\n"); return -1; } s->cache_thread_running = true; // wait until cache is filled with at least min bytes if (min < 1) return 1; for (;;) { if (mp_cancel_test(cache->cancel)) return -1; int64_t fill; int idle; if (stream_control(s->cache, STREAM_CTRL_GET_CACHE_FILL, &fill) < 0) break; if (stream_control(s->cache, STREAM_CTRL_GET_CACHE_IDLE, &idle) < 0) break; MP_INFO(s, "\rCache fill: %5.2f%% " "(%" PRId64 " bytes) ", 100.0 * fill / s->buffer_size, fill); if (fill >= min) break; if (idle) break; // file is smaller than prefill size // Wake up if the cache is done reading some data (or on timeout/abort) pthread_mutex_lock(&s->mutex); s->control = CACHE_CTRL_PING; pthread_cond_signal(&s->wakeup); cache_wakeup_and_wait(s, &(double){0}); pthread_mutex_unlock(&s->mutex); }
// Runs in the cache thread. // Returns true if reading was attempted, and the mutex was shortly unlocked. static bool cache_fill(struct priv *s) { int64_t read = s->read_filepos; int len = 0; // drop cache contents only if seeking backward or too much fwd. // This is also done for on-disk files, since it loses the backseek cache. // That in turn can cause major bandwidth increase and performance // issues with e.g. mov or badly interleaved files if (read < s->min_filepos || read > s->max_filepos + s->seek_limit) { MP_VERBOSE(s, "Dropping cache at pos %"PRId64", " "cached range: %"PRId64"-%"PRId64".\n", read, s->min_filepos, s->max_filepos); cache_drop_contents(s); } if (stream_tell(s->stream) != s->max_filepos && s->seekable) { MP_VERBOSE(s, "Seeking underlying stream: %"PRId64" -> %"PRId64"\n", stream_tell(s->stream), s->max_filepos); stream_seek(s->stream, s->max_filepos); if (stream_tell(s->stream) != s->max_filepos) goto done; } if (mp_cancel_test(s->cache->cancel)) goto done; // number of buffer bytes which should be preserved in backwards direction int64_t back = MPCLAMP(read - s->min_filepos, 0, s->back_size); // limit maximum readahead so that the backbuffer space is reserved, even // if the backbuffer is not used. limit it to ensure that we don't stall the // network when starting a file, or we wouldn't download new data until we // get new free space again. (unless everything fits in the cache.) if (s->stream_size > s->buffer_size) back = MPMAX(back, s->back_size); // number of buffer bytes that are valid and can be read int64_t newb = FFMAX(s->max_filepos - read, 0); // max. number of bytes that can be written (starting from max_filepos) int64_t space = s->buffer_size - (newb + back); // offset into the buffer that maps to max_filepos int64_t pos = s->max_filepos - s->offset; if (pos >= s->buffer_size) pos -= s->buffer_size; // wrap-around if (space < FILL_LIMIT) { s->idle = true; s->reads++; // don't stuck main thread return false; } // limit to end of buffer (without wrapping) if (pos + space >= s->buffer_size) space = s->buffer_size - pos; // limit read size (or else would block and read the entire buffer in 1 call) space = FFMIN(space, s->stream->read_chunk); // back+newb+space <= buffer_size int64_t back2 = s->buffer_size - (space + newb); // max back size if (s->min_filepos < (read - back2)) s->min_filepos = read - back2; // The read call might take a long time and block, so drop the lock. pthread_mutex_unlock(&s->mutex); len = stream_read_partial(s->stream, &s->buffer[pos], space); pthread_mutex_lock(&s->mutex); // Do this after reading a block, because at least libdvdnav updates the // stream position only after actually reading something after a seek. if (s->start_pts == MP_NOPTS_VALUE) { double pts; if (stream_control(s->stream, STREAM_CTRL_GET_CURRENT_TIME, &pts) > 0) s->start_pts = pts; } s->max_filepos += len; if (pos + len == s->buffer_size) s->offset += s->buffer_size; // wrap... done: s->eof = len <= 0; s->idle = s->eof; s->reads++; if (s->eof) { s->eof_pos = stream_tell(s->stream); MP_TRACE(s, "EOF reached.\n"); } pthread_cond_signal(&s->wakeup); return true; }
// Runs in the cache thread. // Returns true if reading was attempted, and the mutex was shortly unlocked. static bool cache_fill(struct priv *s) { int64_t read = s->read_filepos; int len; if (read < s->min_filepos || read > s->max_filepos) { // seek... mp_msg(MSGT_CACHE, MSGL_DBG2, "Out of boundaries... seeking to 0x%" PRIX64 " \n", read); // drop cache contents only if seeking backward or too much fwd. // This is also done for on-disk files, since it loses the backseek cache. // That in turn can cause major bandwidth increase and performance // issues with e.g. mov or badly interleaved files if (read < s->min_filepos || read >= s->max_filepos + s->seek_limit) { mp_msg(MSGT_CACHE, MSGL_V, "Dropping cache at pos %"PRId64", " "cached range: %"PRId64"-%"PRId64".\n", read, s->min_filepos, s->max_filepos); cache_drop_contents(s); stream_seek(s->stream, read); } } // number of buffer bytes which should be preserved in backwards direction int64_t back = mp_clipi64(read - s->min_filepos, 0, s->back_size); // number of buffer bytes that are valid and can be read int64_t newb = FFMAX(s->max_filepos - read, 0); // max. number of bytes that can be written (starting from max_filepos) int64_t space = s->buffer_size - (newb + back); // offset into the buffer that maps to max_filepos int pos = s->max_filepos - s->offset; if (pos >= s->buffer_size) pos -= s->buffer_size; // wrap-around if (space < s->fill_limit) { s->idle = true; return false; } // limit to end of buffer (without wrapping) if (pos + space >= s->buffer_size) space = s->buffer_size - pos; // limit read size (or else would block and read the entire buffer in 1 call) space = FFMIN(space, s->stream->read_chunk); // back+newb+space <= buffer_size int64_t back2 = s->buffer_size - (space + newb); // max back size if (s->min_filepos < (read - back2)) s->min_filepos = read - back2; // The read call might take a long time and block, so drop the lock. pthread_mutex_unlock(&s->mutex); len = stream_read_partial(s->stream, &s->buffer[pos], space); pthread_mutex_lock(&s->mutex); double pts; if (stream_control(s->stream, STREAM_CTRL_GET_CURRENT_TIME, &pts) <= 0) pts = MP_NOPTS_VALUE; for (int64_t b_pos = pos; b_pos < pos + len + BYTE_META_CHUNK_SIZE; b_pos += BYTE_META_CHUNK_SIZE) { s->bm[b_pos / BYTE_META_CHUNK_SIZE] = (struct byte_meta){.stream_pts = pts}; } s->max_filepos += len; if (pos + len == s->buffer_size) s->offset += s->buffer_size; // wrap... s->eof = len > 0 ? 0 : 1; s->idle = s->eof; pthread_cond_signal(&s->wakeup); return true; }