static int submit_lookup_objects(struct etnaviv_gem_submit *submit, struct drm_file *file, struct drm_etnaviv_gem_submit_bo *submit_bos, unsigned nr_bos) { struct drm_etnaviv_gem_submit_bo *bo; unsigned i; int ret = 0; spin_lock(&file->table_lock); for (i = 0, bo = submit_bos; i < nr_bos; i++, bo++) { struct drm_gem_object *obj; if (bo->flags & BO_INVALID_FLAGS) { DRM_ERROR("invalid flags: %x\n", bo->flags); ret = -EINVAL; goto out_unlock; } submit->bos[i].flags = bo->flags; /* normally use drm_gem_object_lookup(), but for bulk lookup * all under single table_lock just hit object_idr directly: */ obj = idr_find(&file->object_idr, bo->handle); if (!obj) { DRM_ERROR("invalid handle %u at index %u\n", bo->handle, i); ret = -EINVAL; goto out_unlock; } /* * Take a refcount on the object. The file table lock * prevents the object_idr's refcount on this being dropped. */ drm_gem_object_get(obj); submit->bos[i].obj = to_etnaviv_bo(obj); } out_unlock: submit->nr_bos = i; spin_unlock(&file->table_lock); return ret; }
struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf) { struct drm_gem_object *obj; if (dma_buf->ops == &amdgpu_dmabuf_ops) { obj = dma_buf->priv; if (obj->dev == dev) { /* * Importing dmabuf exported from out own gem increases * refcount on gem itself instead of f_count of dmabuf. */ drm_gem_object_get(obj); return obj; } } return drm_gem_prime_import(dev, dma_buf); }