/** * __drm_gem_cma_create - Create a GEM CMA object without allocating memory * @drm: DRM device * @size: size of the object to allocate * * This function creates and initializes a GEM CMA object of the given size, * but doesn't allocate any memory to back the object. * * Returns: * A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative * error code on failure. */ static struct drm_gem_cma_object * __drm_gem_cma_create(struct drm_device *drm, size_t size) { struct drm_gem_cma_object *cma_obj; struct drm_gem_object *gem_obj; int ret; if (drm->driver->gem_create_object) gem_obj = drm->driver->gem_create_object(drm, size); else gem_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL); if (!gem_obj) return ERR_PTR(-ENOMEM); cma_obj = container_of(gem_obj, struct drm_gem_cma_object, base); ret = drm_gem_object_init(drm, gem_obj, size); if (ret) goto error; ret = drm_gem_create_mmap_offset(gem_obj); if (ret) { drm_gem_object_release(gem_obj); goto error; } return cma_obj; error: kfree(cma_obj); return ERR_PTR(ret); }
/* the dumb interface doesn't work with the GEM straight MMAP interface, it expects to do MMAP on the drm fd, like normal */ int udl_gem_mmap(struct drm_file *file, struct drm_device *dev, uint32_t handle, uint64_t *offset) { struct udl_gem_object *gobj; struct drm_gem_object *obj; int ret = 0; mutex_lock(&dev->struct_mutex); obj = drm_gem_object_lookup(dev, file, handle); if (obj == NULL) { ret = -ENOENT; goto unlock; } gobj = to_udl_bo(obj); ret = udl_gem_get_pages(gobj, GFP_KERNEL); if (ret) goto out; if (!gobj->base.map_list.map) { ret = drm_gem_create_mmap_offset(obj); if (ret) goto out; } *offset = (u64)gobj->base.map_list.hash.key << PAGE_SHIFT; out: drm_gem_object_unreference(&gobj->base); unlock: mutex_unlock(&dev->struct_mutex); return ret; }
/* the dumb interface doesn't work with the GEM straight MMAP interface, it expects to do MMAP on the drm fd, like normal */ int udl_gem_mmap(struct drm_file *file, struct drm_device *dev, uint32_t handle, uint64_t *offset) { struct udl_gem_object *gobj; struct drm_gem_object *obj; int ret = 0; mutex_lock(&dev->struct_mutex); obj = drm_gem_object_lookup(file, handle); if (obj == NULL) { ret = -ENOENT; goto unlock; } gobj = to_udl_bo(obj); ret = udl_gem_get_pages(gobj); if (ret) goto out; ret = drm_gem_create_mmap_offset(obj); if (ret) goto out; *offset = drm_vma_node_offset_addr(&gobj->base.vma_node); out: drm_gem_object_unreference(&gobj->base); unlock: mutex_unlock(&dev->struct_mutex); return ret; }
int rockchip_gem_dumb_map_offset(struct drm_file *file_priv, struct drm_device *dev, uint32_t handle, uint64_t *offset) { struct drm_gem_object *obj; int ret; obj = drm_gem_object_lookup(file_priv, handle); if (!obj) { DRM_ERROR("failed to lookup gem object.\n"); return -EINVAL; } ret = drm_gem_create_mmap_offset(obj); if (ret) goto out; *offset = drm_vma_node_offset_addr(&obj->vma_node); DRM_DEBUG_KMS("offset = 0x%llx\n", *offset); out: drm_gem_object_unreference_unlocked(obj); return 0; }
static struct tegra_bo *tegra_bo_alloc_object(struct drm_device *drm, size_t size) { struct tegra_bo *bo; int err; bo = kzalloc(sizeof(*bo), GFP_KERNEL); if (!bo) return ERR_PTR(-ENOMEM); host1x_bo_init(&bo->base, &tegra_bo_ops); size = round_up(size, PAGE_SIZE); err = drm_gem_object_init(drm, &bo->gem, size); if (err < 0) goto free; err = drm_gem_create_mmap_offset(&bo->gem); if (err < 0) goto release; return bo; release: drm_gem_object_release(&bo->gem); free: kfree(bo); return ERR_PTR(err); }
/* * __drm_gem_cma_create - Create a GEM CMA object without allocating memory * @drm: The drm device * @size: The GEM object size * * This function creates and initializes a GEM CMA object of the given size, but * doesn't allocate any memory to back the object. * * Return a struct drm_gem_cma_object* on success or ERR_PTR values on failure. */ static struct drm_gem_cma_object * __drm_gem_cma_create(struct drm_device *drm, unsigned int size) { struct drm_gem_cma_object *cma_obj; struct drm_gem_object *gem_obj; int ret; cma_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL); if (!cma_obj) return ERR_PTR(-ENOMEM); gem_obj = &cma_obj->base; ret = drm_gem_object_init(drm, gem_obj, size); if (ret) goto error; ret = drm_gem_create_mmap_offset(gem_obj); if (ret) { drm_gem_object_release(gem_obj); goto error; } return cma_obj; error: kfree(cma_obj); return ERR_PTR(ret); }
int mtk_drm_gem_dumb_map_offset(struct drm_file *file_priv, struct drm_device *dev, uint32_t handle, uint64_t *offset) { struct drm_gem_object *obj; int ret; mutex_lock(&dev->struct_mutex); obj = drm_gem_object_lookup(dev, file_priv, handle); if (!obj) { DRM_ERROR("failed to lookup gem object.\n"); ret = -EINVAL; goto unlock; } ret = drm_gem_create_mmap_offset(obj); if (ret) goto out; *offset = drm_vma_node_offset_addr(&obj->vma_node); DRM_DEBUG_KMS("offset = 0x%llx\n", *offset); out: drm_gem_object_unreference(obj); unlock: mutex_unlock(&dev->struct_mutex); return ret; }
/** * psb_gem_dumb_map_gtt - buffer mapping for dumb interface * @file: our drm client file * @dev: drm device * @handle: GEM handle to the object (from dumb_create) * * Do the necessary setup to allow the mapping of the frame buffer * into user memory. We don't have to do much here at the moment. */ int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev, uint32_t handle, uint64_t *offset) { int ret = 0; struct drm_gem_object *obj; if (!(dev->driver->driver_features & DRIVER_GEM)) return -ENODEV; mutex_lock(&dev->struct_mutex); /* GEM does all our handle to object mapping */ obj = drm_gem_object_lookup(dev, file, handle); if (obj == NULL) { ret = -ENOENT; goto unlock; } /* What validation is needed here ? */ /* Make it mmapable */ if (!obj->map_list.map) { ret = drm_gem_create_mmap_offset(obj); if (ret) goto out; } /* GEM should really work out the hash offsets for us */ *offset = (u64)obj->map_list.hash.key << PAGE_SHIFT; out: drm_gem_object_unreference(obj); unlock: mutex_unlock(&dev->struct_mutex); return ret; }
static int vgem_gem_dumb_map(struct drm_file *file, struct drm_device *dev, uint32_t handle, uint64_t *offset) { struct drm_gem_object *obj; int ret; obj = drm_gem_object_lookup(file, handle); if (!obj) return -ENOENT; if (!obj->filp) { ret = -EINVAL; goto unref; } ret = drm_gem_create_mmap_offset(obj); if (ret) goto unref; *offset = drm_vma_node_offset_addr(&obj->vma_node); unref: drm_gem_object_put_unlocked(obj); return ret; }
/** * psb_gem_dumb_map_gtt - buffer mapping for dumb interface * @file: our drm client file * @dev: drm device * @handle: GEM handle to the object (from dumb_create) * * Do the necessary setup to allow the mapping of the frame buffer * into user memory. We don't have to do much here at the moment. */ int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev, uint32_t handle, uint64_t *offset) { int ret = 0; struct drm_gem_object *obj; if (!(dev->driver->driver_features & DRIVER_GEM)) return -ENODEV; mutex_lock(&dev->struct_mutex); /* GEM does all our handle to object mapping */ obj = drm_gem_object_lookup(dev, file, handle); if (obj == NULL) { ret = -ENOENT; goto unlock; } /* What validation is needed here ? */ /* Make it mmapable */ ret = drm_gem_create_mmap_offset(obj); if (ret) goto out; *offset = drm_vma_node_offset_addr(&obj->vma_node); out: drm_gem_object_unreference(obj); unlock: mutex_unlock(&dev->struct_mutex); return ret; }
struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size, unsigned long flags) { struct tegra_bo *bo; int err; bo = kzalloc(sizeof(*bo), GFP_KERNEL); if (!bo) return ERR_PTR(-ENOMEM); host1x_bo_init(&bo->base, &tegra_bo_ops); size = round_up(size, PAGE_SIZE); bo->vaddr = dma_alloc_writecombine(drm->dev, size, &bo->paddr, GFP_KERNEL | __GFP_NOWARN); if (!bo->vaddr) { dev_err(drm->dev, "failed to allocate buffer with size %u\n", size); err = -ENOMEM; goto err_dma; } err = drm_gem_object_init(drm, &bo->gem, size); if (err) goto err_init; err = drm_gem_create_mmap_offset(&bo->gem); if (err) goto err_mmap; if (flags & DRM_TEGRA_GEM_CREATE_TILED) bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED; if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP) bo->flags |= TEGRA_BO_BOTTOM_UP; return bo; err_mmap: drm_gem_object_release(&bo->gem); err_init: tegra_bo_destroy(drm, bo); err_dma: kfree(bo); return ERR_PTR(err); }
/** get mmap offset */ static uint64_t mmap_offset(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; int ret; WARN_ON(!mutex_is_locked(&dev->struct_mutex)); /* Make it mmapable */ ret = drm_gem_create_mmap_offset(obj); if (ret) { dev_err(dev->dev, "could not allocate mmap offset\n"); return 0; } return drm_vma_node_offset_addr(&obj->vma_node); }
/* * drm_gem_cma_create - allocate an object with the given size * * returns a struct drm_gem_cma_object* on success or ERR_PTR values * on failure. */ struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm, unsigned int size) { struct drm_gem_cma_object *cma_obj; struct drm_gem_object *gem_obj; int ret; size = round_up(size, PAGE_SIZE); cma_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL); if (!cma_obj) return ERR_PTR(-ENOMEM); cma_obj->vaddr = dma_alloc_writecombine(drm->dev, size, &cma_obj->paddr, GFP_KERNEL | __GFP_NOWARN); if (!cma_obj->vaddr) { dev_err(drm->dev, "failed to allocate buffer with size %d\n", size); ret = -ENOMEM; goto err_dma_alloc; } gem_obj = &cma_obj->base; ret = drm_gem_object_init(drm, gem_obj, size); if (ret) goto err_obj_init; ret = drm_gem_create_mmap_offset(gem_obj); if (ret) goto err_create_mmap_offset; return cma_obj; err_create_mmap_offset: drm_gem_object_release(gem_obj); err_obj_init: drm_gem_cma_buf_destroy(drm, cma_obj); err_dma_alloc: kfree(cma_obj); return ERR_PTR(ret); }
static struct tegra_bo *tegra_bo_import(struct drm_device *drm, struct dma_buf *buf) { struct dma_buf_attachment *attach; struct tegra_bo *bo; ssize_t size; int err; bo = kzalloc(sizeof(*bo), GFP_KERNEL); if (!bo) return ERR_PTR(-ENOMEM); host1x_bo_init(&bo->base, &tegra_bo_ops); size = round_up(buf->size, PAGE_SIZE); err = drm_gem_object_init(drm, &bo->gem, size); if (err < 0) goto free; err = drm_gem_create_mmap_offset(&bo->gem); if (err < 0) goto release; attach = dma_buf_attach(buf, drm->dev); if (IS_ERR(attach)) { err = PTR_ERR(attach); goto free_mmap; } get_dma_buf(buf); bo->sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE); if (!bo->sgt) { err = -ENOMEM; goto detach; } if (IS_ERR(bo->sgt)) { err = PTR_ERR(bo->sgt); goto detach; } if (bo->sgt->nents > 1) { err = -EINVAL; goto detach; } bo->paddr = sg_dma_address(bo->sgt->sgl); bo->gem.import_attach = attach; return bo; detach: if (!IS_ERR_OR_NULL(bo->sgt)) dma_buf_unmap_attachment(attach, bo->sgt, DMA_TO_DEVICE); dma_buf_detach(buf, attach); dma_buf_put(buf); free_mmap: drm_gem_free_mmap_offset(&bo->gem); release: drm_gem_object_release(&bo->gem); free: kfree(bo); return ERR_PTR(err); }