bool os_wait_until_zero(volatile int *var, uint64_t timeout) { if (!p_atomic_read(var)) return true; if (!timeout) return false; if (timeout == PIPE_TIMEOUT_INFINITE) { while (p_atomic_read(var)) { #if defined(PIPE_OS_UNIX) sched_yield(); #endif } return true; } else { int64_t start_time = os_time_get_nano(); int64_t end_time = start_time + timeout; while (p_atomic_read(var)) { if (os_time_timeout(start_time, end_time, os_time_get_nano())) return false; #if defined(PIPE_OS_UNIX) sched_yield(); #endif } return true; } }
static void virgl_cache_list_check_free(struct virgl_vtest_winsys *vtws) { struct list_head *curr, *next; struct virgl_hw_res *res; int64_t now; now = os_time_get(); curr = vtws->delayed.next; next = curr->next; while (curr != &vtws->delayed) { res = LIST_ENTRY(struct virgl_hw_res, curr, head); if (!os_time_timeout(res->start, res->end, now)) break; LIST_DEL(&res->head); virgl_hw_res_destroy(vtws, res); curr = next; next = curr->next; } }
/** * Free as many cache buffers from the list head as possible. */ static void _pb_cache_buffer_list_check_free(struct pb_cache_manager *mgr) { struct list_head *curr, *next; struct pb_cache_buffer *buf; int64_t now; now = os_time_get(); curr = mgr->delayed.next; next = curr->next; while(curr != &mgr->delayed) { buf = LIST_ENTRY(struct pb_cache_buffer, curr, head); if(!os_time_timeout(buf->start, buf->end, now)) break; _pb_cache_buffer_destroy(buf); curr = next; next = curr->next; } }
/** * Free as many cache buffers from the list head as possible. */ static void release_expired_buffers_locked(struct list_head *cache) { struct list_head *curr, *next; struct pb_cache_entry *entry; int64_t now; now = os_time_get(); curr = cache->next; next = curr->next; while (curr != cache) { entry = LIST_ENTRY(struct pb_cache_entry, curr, head); if (!os_time_timeout(entry->start, entry->end, now)) break; destroy_buffer_locked(entry); curr = next; next = curr->next; } }
static struct virgl_hw_res * virgl_vtest_winsys_resource_cache_create(struct virgl_winsys *vws, enum pipe_texture_target target, uint32_t format, uint32_t bind, uint32_t width, uint32_t height, uint32_t depth, uint32_t array_size, uint32_t last_level, uint32_t nr_samples, uint32_t size) { struct virgl_vtest_winsys *vtws = virgl_vtest_winsys(vws); struct virgl_hw_res *res, *curr_res; struct list_head *curr, *next; int64_t now; int ret; /* only store binds for vertex/index/const buffers */ if (bind != VIRGL_BIND_CONSTANT_BUFFER && bind != VIRGL_BIND_INDEX_BUFFER && bind != VIRGL_BIND_VERTEX_BUFFER && bind != VIRGL_BIND_CUSTOM) goto alloc; pipe_mutex_lock(vtws->mutex); res = NULL; curr = vtws->delayed.next; next = curr->next; now = os_time_get(); while (curr != &vtws->delayed) { curr_res = LIST_ENTRY(struct virgl_hw_res, curr, head); if (!res && ((ret = virgl_is_res_compat(vtws, curr_res, size, bind, format)) > 0)) res = curr_res; else if (os_time_timeout(curr_res->start, curr_res->end, now)) { LIST_DEL(&curr_res->head); virgl_hw_res_destroy(vtws, curr_res); } else break; if (ret == -1) break; curr = next; next = curr->next; } if (!res && ret != -1) { while (curr != &vtws->delayed) { curr_res = LIST_ENTRY(struct virgl_hw_res, curr, head); ret = virgl_is_res_compat(vtws, curr_res, size, bind, format); if (ret > 0) { res = curr_res; break; } if (ret == -1) break; curr = next; next = curr->next; } } if (res) { LIST_DEL(&res->head); --vtws->num_delayed; pipe_mutex_unlock(vtws->mutex); pipe_reference_init(&res->reference, 1); return res; } pipe_mutex_unlock(vtws->mutex); alloc: res = virgl_vtest_winsys_resource_create(vws, target, format, bind, width, height, depth, array_size, last_level, nr_samples, size); if (bind == VIRGL_BIND_CONSTANT_BUFFER || bind == VIRGL_BIND_INDEX_BUFFER || bind == VIRGL_BIND_VERTEX_BUFFER) res->cacheable = TRUE; return res; }
static struct pb_buffer * pb_cache_manager_create_buffer(struct pb_manager *_mgr, pb_size size, const struct pb_desc *desc) { struct pb_cache_manager *mgr = pb_cache_manager(_mgr); struct pb_cache_buffer *buf; struct pb_cache_buffer *curr_buf; struct list_head *curr, *next; int64_t now; pipe_mutex_lock(mgr->mutex); buf = NULL; curr = mgr->delayed.next; next = curr->next; /* search in the expired buffers, freeing them in the process */ now = os_time_get(); while(curr != &mgr->delayed) { curr_buf = LIST_ENTRY(struct pb_cache_buffer, curr, head); if(!buf && pb_cache_is_buffer_compat(curr_buf, size, desc)) buf = curr_buf; else if(os_time_timeout(curr_buf->start, curr_buf->end, now)) _pb_cache_buffer_destroy(curr_buf); else /* This buffer (and all hereafter) are still hot in cache */ break; curr = next; next = curr->next; } /* keep searching in the hot buffers */ if(!buf) { while(curr != &mgr->delayed) { curr_buf = LIST_ENTRY(struct pb_cache_buffer, curr, head); if(pb_cache_is_buffer_compat(curr_buf, size, desc)) { buf = curr_buf; break; } /* no need to check the timeout here */ curr = next; next = curr->next; } } if(buf) { LIST_DEL(&buf->head); pipe_mutex_unlock(mgr->mutex); /* Increase refcount */ pipe_reference_init(&buf->base.base.reference, 1); return &buf->base; } pipe_mutex_unlock(mgr->mutex); buf = CALLOC_STRUCT(pb_cache_buffer); if(!buf) return NULL; buf->buffer = mgr->provider->create_buffer(mgr->provider, size, desc); if(!buf->buffer) { FREE(buf); return NULL; } assert(pipe_is_referenced(&buf->buffer->base.reference)); assert(pb_check_alignment(desc->alignment, buf->buffer->base.alignment)); assert(pb_check_usage(desc->usage, buf->buffer->base.usage)); assert(buf->buffer->base.size >= size); pipe_reference_init(&buf->base.base.reference, 1); buf->base.base.alignment = buf->buffer->base.alignment; buf->base.base.usage = buf->buffer->base.usage; buf->base.base.size = buf->buffer->base.size; buf->base.vtbl = &pb_cache_buffer_vtbl; buf->mgr = mgr; return &buf->base; }
/** * Find a compatible buffer in the cache, return it, and remove it * from the cache. */ struct pb_buffer * pb_cache_reclaim_buffer(struct pb_cache *mgr, pb_size size, unsigned alignment, unsigned usage, unsigned bucket_index) { struct pb_cache_entry *entry; struct pb_cache_entry *cur_entry; struct list_head *cur, *next; int64_t now; int ret = 0; struct list_head *cache = &mgr->buckets[bucket_index]; pipe_mutex_lock(mgr->mutex); entry = NULL; cur = cache->next; next = cur->next; /* search in the expired buffers, freeing them in the process */ now = os_time_get(); while (cur != cache) { cur_entry = LIST_ENTRY(struct pb_cache_entry, cur, head); if (!entry && (ret = pb_cache_is_buffer_compat(cur_entry, size, alignment, usage) > 0)) entry = cur_entry; else if (os_time_timeout(cur_entry->start, cur_entry->end, now)) destroy_buffer_locked(cur_entry); else /* This buffer (and all hereafter) are still hot in cache */ break; /* the buffer is busy (and probably all remaining ones too) */ if (ret == -1) break; cur = next; next = cur->next; } /* keep searching in the hot buffers */ if (!entry && ret != -1) { while (cur != cache) { cur_entry = LIST_ENTRY(struct pb_cache_entry, cur, head); ret = pb_cache_is_buffer_compat(cur_entry, size, alignment, usage); if (ret > 0) { entry = cur_entry; break; } if (ret == -1) break; /* no need to check the timeout here */ cur = next; next = cur->next; } } /* found a compatible buffer, return it */ if (entry) { struct pb_buffer *buf = entry->buffer; mgr->cache_size -= buf->size; LIST_DEL(&entry->head); --mgr->num_buffers; pipe_mutex_unlock(mgr->mutex); /* Increase refcount */ pipe_reference_init(&buf->reference, 1); return buf; } pipe_mutex_unlock(mgr->mutex); return NULL; }