/**
 * drm_gem_mmap - memory map routine for GEM objects
 * @filp: DRM file pointer
 * @vma: VMA for the area to be mapped
 *
 * If a driver supports GEM object mapping, mmap calls on the DRM file
 * descriptor will end up here.
 *
 * Look up the GEM object based on the offset passed in (vma->vm_pgoff will
 * contain the fake offset we created when the GTT map ioctl was called on
 * the object) and map it with a call to drm_gem_mmap_obj().
 *
 * If the caller is not granted access to the buffer object, the mmap will fail
 * with EACCES. Please see the vma manager for more information.
 */
int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
{
	struct drm_file *priv = filp->private_data;
	struct drm_device *dev = priv->minor->dev;
	struct drm_gem_mm *mm = dev->mm_private;
	struct drm_gem_object *obj;
	struct drm_vma_offset_node *node;
	int ret = 0;

	if (drm_device_is_unplugged(dev))
		return -ENODEV;

	mutex_lock(&dev->struct_mutex);

	node = drm_vma_offset_exact_lookup(&mm->vma_manager, vma->vm_pgoff,
					   vma_pages(vma));
	if (!node) {
		mutex_unlock(&dev->struct_mutex);
		return drm_mmap(filp, vma);
	} else if (!drm_vma_node_is_allowed(node, filp)) {
		mutex_unlock(&dev->struct_mutex);
		return -EACCES;
	}

	obj = container_of(node, struct drm_gem_object, vma_node);
	ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT, vma);

	mutex_unlock(&dev->struct_mutex);

	return ret;
}
Exemple #2
0
int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
{
	int ret;

	ret = drm_gem_mmap_obj(obj, obj->size, vma);
	if (ret < 0)
		return ret;

	return msm_gem_mmap_obj(vma->vm_private_data, vma);
}
Exemple #3
0
int rockchip_gem_mmap_buf(struct drm_gem_object *obj,
                          struct vm_area_struct *vma)
{
    int ret;

    ret = drm_gem_mmap_obj(obj, obj->size, vma);
    if (ret)
        return ret;

    return rockchip_drm_gem_object_mmap(obj, vma);
}
static int omap_gem_dmabuf_mmap(struct dma_buf *buffer,
		struct vm_area_struct *vma)
{
	struct drm_gem_object *obj = buffer->priv;
	int ret = 0;

	ret = drm_gem_mmap_obj(obj, omap_gem_mmap_size(obj), vma);
	if (ret < 0)
		return ret;

	return omap_gem_mmap_obj(obj, vma);
}
/**
 * drm_gem_cma_prime_mmap - memory-map an exported CMA GEM object
 * @obj: GEM object
 * @vma: VMA for the area to be mapped
 *
 * This function maps a buffer imported via DRM PRIME into a userspace
 * process's address space. Drivers that use the CMA helpers should set this
 * as their DRM driver's ->gem_prime_mmap() callback.
 *
 * Returns:
 * 0 on success or a negative error code on failure.
 */
int drm_gem_cma_prime_mmap(struct drm_gem_object *obj,
			   struct vm_area_struct *vma)
{
	struct drm_gem_cma_object *cma_obj;
	int ret;

	ret = drm_gem_mmap_obj(obj, obj->size, vma);
	if (ret < 0)
		return ret;

	cma_obj = to_drm_gem_cma_obj(obj);
	return drm_gem_cma_mmap_obj(cma_obj, vma);
}
Exemple #6
0
int rockchip_gem_mmap_buf(struct drm_gem_object *obj,
			  struct vm_area_struct *vma)
{
	struct drm_device *drm = obj->dev;
	int ret;

	mutex_lock(&drm->struct_mutex);
	ret = drm_gem_mmap_obj(obj, obj->size, vma);
	mutex_unlock(&drm->struct_mutex);
	if (ret)
		return ret;

	return rockchip_drm_gem_object_mmap(obj, vma);
}
Exemple #7
0
static int tegra_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
	struct drm_fb_helper *helper = info->par;
	struct tegra_bo *bo;
	int err;

	bo = tegra_fb_get_plane(helper->fb, 0);

	err = drm_gem_mmap_obj(&bo->gem, bo->gem.size, vma);
	if (err < 0)
		return err;

	return __tegra_gem_mmap(&bo->gem, vma);
}
/**
 * drm_gem_cma_prime_mmap - memory-map an exported CMA GEM object
 * @obj: GEM object
 * @vma: VMA for the area to be mapped
 *
 * This function maps a buffer imported via DRM PRIME into a userspace
 * process's address space. Drivers that use the CMA helpers should set this
 * as their DRM driver's ->gem_prime_mmap() callback.
 *
 * Returns:
 * 0 on success or a negative error code on failure.
 */
int drm_gem_cma_prime_mmap(struct drm_gem_object *obj,
			   struct vm_area_struct *vma)
{
	struct drm_gem_cma_object *cma_obj;
	struct drm_device *dev = obj->dev;
	int ret;

	mutex_lock(&dev->struct_mutex);
	ret = drm_gem_mmap_obj(obj, obj->size, vma);
	mutex_unlock(&dev->struct_mutex);
	if (ret < 0)
		return ret;

	cma_obj = to_drm_gem_cma_obj(obj);
	return drm_gem_cma_mmap_obj(cma_obj, vma);
}
/**
 * drm_gem_mmap - memory map routine for GEM objects
 * @filp: DRM file pointer
 * @vma: VMA for the area to be mapped
 *
 * If a driver supports GEM object mapping, mmap calls on the DRM file
 * descriptor will end up here.
 *
 * Look up the GEM object based on the offset passed in (vma->vm_pgoff will
 * contain the fake offset we created when the GTT map ioctl was called on
 * the object) and map it with a call to drm_gem_mmap_obj().
 *
 * If the caller is not granted access to the buffer object, the mmap will fail
 * with EACCES. Please see the vma manager for more information.
 */
int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
{
	struct drm_file *priv = filp->private_data;
	struct drm_device *dev = priv->minor->dev;
	struct drm_gem_object *obj = NULL;
	struct drm_vma_offset_node *node;
	int ret;

	if (drm_device_is_unplugged(dev))
		return -ENODEV;

	drm_vma_offset_lock_lookup(dev->vma_offset_manager);
	node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
						  vma->vm_pgoff,
						  vma_pages(vma));
	if (likely(node)) {
		obj = container_of(node, struct drm_gem_object, vma_node);
		/*
		 * When the object is being freed, after it hits 0-refcnt it
		 * proceeds to tear down the object. In the process it will
		 * attempt to remove the VMA offset and so acquire this
		 * mgr->vm_lock.  Therefore if we find an object with a 0-refcnt
		 * that matches our range, we know it is in the process of being
		 * destroyed and will be freed as soon as we release the lock -
		 * so we have to check for the 0-refcnted object and treat it as
		 * invalid.
		 */
		if (!kref_get_unless_zero(&obj->refcount))
			obj = NULL;
	}
	drm_vma_offset_unlock_lookup(dev->vma_offset_manager);

	if (!obj)
		return -EINVAL;

	if (!drm_vma_node_is_allowed(node, filp)) {
		drm_gem_object_unreference_unlocked(obj);
		return -EACCES;
	}

	ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
			       vma);

	drm_gem_object_unreference_unlocked(obj);

	return ret;
}
Exemple #10
0
static int omap_gem_dmabuf_mmap(struct dma_buf *buffer,
                                struct vm_area_struct *vma)
{
    struct drm_gem_object *obj = buffer->priv;
    struct drm_device *dev = obj->dev;
    int ret = 0;

    if (WARN_ON(!obj->filp))
        return -EINVAL;

    mutex_lock(&dev->struct_mutex);
    ret = drm_gem_mmap_obj(obj, omap_gem_mmap_size(obj), vma);
    mutex_unlock(&dev->struct_mutex);
    if (ret < 0)
        return ret;

    return omap_gem_mmap_obj(obj, vma);
}