void vc4_bo_cache_destroy(struct drm_device *dev) { struct vc4_dev *vc4 = to_vc4_dev(dev); del_timer(&vc4->bo_cache.time_timer); cancel_work_sync(&vc4->bo_cache.time_work); vc4_bo_cache_purge(dev); if (vc4->bo_stats.num_allocated) { DRM_ERROR("Destroying BO cache while BOs still allocated:\n"); vc4_bo_stats_dump(vc4); } }
struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size, bool allow_unzeroed) { size_t size = roundup(unaligned_size, PAGE_SIZE); struct vc4_dev *vc4 = to_vc4_dev(dev); struct drm_gem_cma_object *cma_obj; struct vc4_bo *bo; if (size == 0) return ERR_PTR(-EINVAL); /* First, try to get a vc4_bo from the kernel BO cache. */ bo = vc4_bo_get_from_cache(dev, size); if (bo) { if (!allow_unzeroed) memset(bo->base.vaddr, 0, bo->base.base.size); return bo; } cma_obj = drm_gem_cma_create(dev, size); if (IS_ERR(cma_obj)) { /* * If we've run out of CMA memory, kill the cache of * CMA allocations we've got laying around and try again. */ vc4_bo_cache_purge(dev); cma_obj = drm_gem_cma_create(dev, size); if (IS_ERR(cma_obj)) { DRM_ERROR("Failed to allocate from CMA:\n"); vc4_bo_stats_dump(vc4); return ERR_PTR(-ENOMEM); } } return to_vc4_bo(&cma_obj->base); }