static void pb_cache_buffer_fence(struct pb_buffer *_buf, struct pipe_fence_handle *fence) { struct pb_cache_buffer *buf = pb_cache_buffer(_buf); pb_fence(buf->buffer, fence); }
static void * pb_cache_buffer_map(struct pb_buffer *_buf, unsigned flags, void *flush_ctx) { struct pb_cache_buffer *buf = pb_cache_buffer(_buf); return pb_map(buf->buffer, flags, flush_ctx); }
static void * pb_cache_buffer_map(struct pb_buffer *_buf, enum pb_usage_flags flags, void *flush_ctx) { struct pb_cache_buffer *buf = pb_cache_buffer(_buf); return pb_map(buf->buffer, flags, flush_ctx); }
static void pb_cache_buffer_destroy(struct pb_buffer *_buf) { struct pb_cache_buffer *buf = pb_cache_buffer(_buf); struct pb_cache_manager *mgr = buf->mgr; pipe_mutex_lock(mgr->mutex); assert(!pipe_is_referenced(&buf->base.reference)); _pb_cache_buffer_list_check_free(mgr); /* Directly release any buffer that exceeds the limit. */ if (mgr->cache_size + buf->base.size > mgr->max_cache_size) { pb_reference(&buf->buffer, NULL); FREE(buf); pipe_mutex_unlock(mgr->mutex); return; } buf->start = os_time_get(); buf->end = buf->start + mgr->usecs; LIST_ADDTAIL(&buf->head, &mgr->delayed); ++mgr->numDelayed; mgr->cache_size += buf->base.size; pipe_mutex_unlock(mgr->mutex); }
void pb_cache_manager_remove_buffer(struct pb_buffer *pb_buf) { struct pb_cache_buffer *buf = pb_cache_buffer(pb_buf); /* the buffer won't be added if mgr is NULL */ buf->mgr = NULL; }
static void pb_cache_buffer_get_base_buffer(struct pb_buffer *_buf, struct pb_buffer **base_buf, pb_size *offset) { struct pb_cache_buffer *buf = pb_cache_buffer(_buf); pb_get_base_buffer(buf->buffer, base_buf, offset); }
static enum pipe_error pb_cache_buffer_validate(struct pb_buffer *_buf, struct pb_validate *vl, unsigned flags) { struct pb_cache_buffer *buf = pb_cache_buffer(_buf); return pb_validate(buf->buffer, vl, flags); }
/** * Actually destroy the buffer. */ static void _pb_cache_buffer_destroy(struct pb_buffer *pb_buf) { struct pb_cache_buffer *buf = pb_cache_buffer(pb_buf); assert(!pipe_is_referenced(&buf->base.reference)); pb_reference(&buf->buffer, NULL); FREE(buf); }
static void pb_cache_buffer_destroy(struct pb_buffer *_buf) { struct pb_cache_buffer *buf = pb_cache_buffer(_buf); struct pb_cache_manager *mgr = buf->mgr; if (!mgr) { pb_reference(&buf->buffer, NULL); FREE(buf); return; } pb_cache_add_buffer(&buf->cache_entry); }
static void pb_cache_buffer_destroy(struct pb_buffer *_buf) { struct pb_cache_buffer *buf = pb_cache_buffer(_buf); struct pb_cache_manager *mgr = buf->mgr; pipe_mutex_lock(mgr->mutex); assert(!pipe_is_referenced(&buf->base.base.reference)); _pb_cache_buffer_list_check_free(mgr); buf->start = os_time_get(); buf->end = buf->start + mgr->usecs; LIST_ADDTAIL(&buf->head, &mgr->delayed); ++mgr->numDelayed; pipe_mutex_unlock(mgr->mutex); }
static bool pb_cache_can_reclaim_buffer(struct pb_buffer *_buf) { struct pb_cache_buffer *buf = pb_cache_buffer(_buf); if (buf->mgr->provider->is_buffer_busy) { if (buf->mgr->provider->is_buffer_busy(buf->mgr->provider, buf->buffer)) return false; } else { void *ptr = pb_map(buf->buffer, PB_USAGE_DONTBLOCK, NULL); if (!ptr) return false; pb_unmap(buf->buffer); } return true; }
static void pb_cache_buffer_unmap(struct pb_buffer *_buf) { struct pb_cache_buffer *buf = pb_cache_buffer(_buf); pb_unmap(buf->buffer); }