/* Frees older cached buffers. Called under table_lock */ void fd_cleanup_bo_cache(struct fd_device *dev, time_t time) { int i; if (dev->time == time) return; for (i = 0; i < dev->num_buckets; i++) { struct fd_bo_bucket *bucket = &dev->cache_bucket[i]; struct fd_bo *bo; while (!LIST_IS_EMPTY(&bucket->list)) { bo = LIST_ENTRY(struct fd_bo, bucket->list.next, list); /* keep things in cache for at least 1 second: */ if (time && ((time - bo->free_time) <= 1)) break; list_del(&bo->list); bo_del(bo); } } dev->time = time; }
static struct fd_bo *find_in_bucket(struct fd_device *dev, struct fd_bo_bucket *bucket, uint32_t flags) { struct fd_bo *bo = NULL; /* TODO .. if we had an ALLOC_FOR_RENDER flag like intel, we could * skip the busy check.. if it is only going to be a render target * then we probably don't need to stall.. * * NOTE that intel takes ALLOC_FOR_RENDER bo's from the list tail * (MRU, since likely to be in GPU cache), rather than head (LRU).. */ pthread_mutex_lock(&table_lock); while (!LIST_IS_EMPTY(&bucket->list)) { bo = LIST_ENTRY(struct fd_bo, bucket->list.next, list); if (0 /* TODO: if madvise tells us bo is gone... */) { list_del(&bo->list); bo_del(bo); bo = NULL; continue; } /* TODO check for compatible flags? */ if (is_idle(bo)) { list_del(&bo->list); break; } bo = NULL; break; } pthread_mutex_unlock(&table_lock); return bo; }
void fd_bo_del(struct fd_bo *bo) { struct fd_device *dev = bo->dev; if (!atomic_dec_and_test(&bo->refcnt)) return; pthread_mutex_lock(&table_lock); if (bo->bo_reuse) { struct fd_bo_bucket *bucket = get_bucket(dev, bo->size); /* see if we can be green and recycle: */ if (bucket) { struct timespec time; clock_gettime(CLOCK_MONOTONIC, &time); bo->free_time = time.tv_sec; list_addtail(&bo->list, &bucket->list); fd_cleanup_bo_cache(dev, time.tv_sec); /* bo's in the bucket cache don't have a ref and * don't hold a ref to the dev: */ goto out; } } bo_del(bo); out: fd_device_del_locked(dev); pthread_mutex_unlock(&table_lock); }
void fd_bo_del(struct fd_bo *bo) { struct fd_device *dev = bo->dev; if (!atomic_dec_and_test(&bo->refcnt)) return; pthread_mutex_lock(&table_lock); if (bo->bo_reuse && (fd_bo_cache_free(&dev->bo_cache, bo) == 0)) goto out; bo_del(bo); fd_device_del_locked(dev); out: pthread_mutex_unlock(&table_lock); }