Example #1
0
static int vgem_gem_dumb_map(struct drm_file *file, struct drm_device *dev,
			     uint32_t handle, uint64_t *offset)
{
	struct drm_gem_object *obj;
	int ret;

	obj = drm_gem_object_lookup(file, handle);
	if (!obj)
		return -ENOENT;

	if (!obj->filp) {
		ret = -EINVAL;
		goto unref;
	}

	ret = drm_gem_create_mmap_offset(obj);
	if (ret)
		goto unref;

	*offset = drm_vma_node_offset_addr(&obj->vma_node);
unref:
	drm_gem_object_put_unlocked(obj);

	return ret;
}
Example #2
0
int
drm_gem_flink_ioctl(struct drm_device *dev, void *data,
                    struct drm_file *file_priv)
{
    struct drm_gem_flink *args = data;
    struct drm_gem_object *obj;
    int ret;

    if (!(dev->driver->driver_features & DRIVER_GEM))
        return -ENODEV;

    obj = drm_gem_object_lookup(dev, file_priv, args->handle);
    if (obj == NULL)
        return -ENOENT;

    ret = drm_gem_name_create(&dev->object_names, obj, &obj->name);
    if (ret != 0) {
        if (ret == -EALREADY)
            ret = 0;
        drm_gem_object_unreference_unlocked(obj);
    }
    if (ret == 0)
        args->name = obj->name;
    return ret;
}
Example #3
0
static struct drm_framebuffer *
cirrus_user_framebuffer_create(struct drm_device *dev,
			       struct drm_file *filp,
			       const struct drm_mode_fb_cmd2 *mode_cmd)
{
	struct cirrus_device *cdev = dev->dev_private;
	struct drm_gem_object *obj;
	struct drm_framebuffer *fb;
	u32 bpp;
	int ret;

	bpp = drm_format_plane_cpp(mode_cmd->pixel_format, 0) * 8;

	if (!cirrus_check_framebuffer(cdev, mode_cmd->width, mode_cmd->height,
				      bpp, mode_cmd->pitches[0]))
		return ERR_PTR(-EINVAL);

	obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]);
	if (obj == NULL)
		return ERR_PTR(-ENOENT);

	fb = kzalloc(sizeof(*fb), GFP_KERNEL);
	if (!fb) {
		drm_gem_object_put_unlocked(obj);
		return ERR_PTR(-ENOMEM);
	}

	ret = cirrus_framebuffer_init(dev, fb, mode_cmd, obj);
	if (ret) {
		drm_gem_object_put_unlocked(obj);
		kfree(fb);
		return ERR_PTR(ret);
	}
	return fb;
}
Example #4
0
int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
			  struct drm_file *filp)
{
	struct drm_radeon_gem_busy *args = data;
	struct drm_gem_object *gobj;
	struct radeon_object *robj;
	int r;
	uint32_t cur_placement;

	gobj = drm_gem_object_lookup(dev, filp, args->handle);
	if (gobj == NULL) {
		return -EINVAL;
	}
	robj = gobj->driver_private;
	r = radeon_object_busy_domain(robj, &cur_placement);
	switch (cur_placement) {
	case TTM_PL_VRAM:
		args->domain = RADEON_GEM_DOMAIN_VRAM;
		break;
	case TTM_PL_TT:
		args->domain = RADEON_GEM_DOMAIN_GTT;
		break;
	case TTM_PL_SYSTEM:
		args->domain = RADEON_GEM_DOMAIN_CPU;
	default:
		break;
	}
	mutex_lock(&dev->struct_mutex);
	drm_gem_object_unreference(gobj);
	mutex_unlock(&dev->struct_mutex);
	return r;
}
Example #5
0
/* the dumb interface doesn't work with the GEM straight MMAP
   interface, it expects to do MMAP on the drm fd, like normal */
int udl_gem_mmap(struct drm_file *file, struct drm_device *dev,
		 uint32_t handle, uint64_t *offset)
{
	struct udl_gem_object *gobj;
	struct drm_gem_object *obj;
	int ret = 0;

	mutex_lock(&dev->struct_mutex);
	obj = drm_gem_object_lookup(dev, file, handle);
	if (obj == NULL) {
		ret = -ENOENT;
		goto unlock;
	}
	gobj = to_udl_bo(obj);

	ret = udl_gem_get_pages(gobj);
	if (ret)
		goto out;
	ret = drm_gem_create_mmap_offset(obj);
	if (ret)
		goto out;

	*offset = drm_vma_node_offset_addr(&gobj->base.vma_node);

out:
	drm_gem_object_unreference(&gobj->base);
unlock:
	mutex_unlock(&dev->struct_mutex);
	return ret;
}
Example #6
0
int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
				struct drm_file *filp)
{
	
	struct drm_radeon_gem_set_domain *args = data;
	struct drm_gem_object *gobj;
	struct radeon_object *robj;
	int r;

	

	
	gobj = drm_gem_object_lookup(dev, filp, args->handle);
	if (gobj == NULL) {
		return -EINVAL;
	}
	robj = gobj->driver_private;

	r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);

	mutex_lock(&dev->struct_mutex);
	drm_gem_object_unreference(gobj);
	mutex_unlock(&dev->struct_mutex);
	return r;
}
Example #7
0
static int MMapGEM(struct file* pFile, struct vm_area_struct* ps_vma)
{
	extern struct drm_device *gpsPVRDRMDev;
	uint32_t uHandle = (uint32_t)ps_vma->vm_pgoff;
	struct drm_file *pDRMFile = PVR_DRM_FILE_FROM_FILE(pFile);
	struct drm_gem_object *pObj = drm_gem_object_lookup(gpsPVRDRMDev, pDRMFile, uHandle); 
	int ret;

	if (pObj == NULL)
	{
		return -EEXIST;
	}

	if (ps_vma->vm_file != NULL)
	{
		fput(ps_vma->vm_file);

		ps_vma->vm_file = pObj->filp;
		get_file(pObj->filp);
	}

	ps_vma->vm_pgoff = 0;

	ret = pObj->filp->f_op->mmap(pObj->filp, ps_vma);

	drm_gem_object_unreference_unlocked(pObj);

	return ret;
}
Example #8
0
static struct drm_framebuffer *
hibmc_user_framebuffer_create(struct drm_device *dev,
			      struct drm_file *filp,
			      const struct drm_mode_fb_cmd2 *mode_cmd)
{
	struct drm_gem_object *obj;
	struct hibmc_framebuffer *hibmc_fb;

	DRM_DEBUG_DRIVER("%dx%d, format %c%c%c%c\n",
			 mode_cmd->width, mode_cmd->height,
			 (mode_cmd->pixel_format) & 0xff,
			 (mode_cmd->pixel_format >> 8)  & 0xff,
			 (mode_cmd->pixel_format >> 16) & 0xff,
			 (mode_cmd->pixel_format >> 24) & 0xff);

	obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]);
	if (!obj)
		return ERR_PTR(-ENOENT);

	hibmc_fb = hibmc_framebuffer_init(dev, mode_cmd, obj);
	if (IS_ERR(hibmc_fb)) {
		drm_gem_object_unreference_unlocked(obj);
		return ERR_PTR((long)hibmc_fb);
	}
	return &hibmc_fb->fb;
}
Example #9
0
File: gem.c Project: monojo/xu3
int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm,
			     uint32_t handle, uint64_t *offset)
{
	struct drm_gem_object *gem;
	struct tegra_bo *bo;

	mutex_lock(&drm->struct_mutex);

	gem = drm_gem_object_lookup(drm, file, handle);
	if (!gem) {
		dev_err(drm->dev, "failed to lookup GEM object\n");
		mutex_unlock(&drm->struct_mutex);
		return -EINVAL;
	}

	bo = to_tegra_bo(gem);

	*offset = tegra_bo_get_mmap_offset(bo);

	drm_gem_object_unreference(gem);

	mutex_unlock(&drm->struct_mutex);

	return 0;
}
Example #10
0
static int ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
		struct drm_file *file_priv)
{
	struct drm_omap_gem_cpu_fini *args = data;
	struct drm_gem_object *obj;
	int ret;

	VERB("%p:%p: handle=%d", dev, file_priv, args->handle);

	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
	if (!obj) {
		return -ENOENT;
	}

	/* XXX flushy, flushy */
	ret = 0;

	if (!ret) {
		ret = omap_gem_op_finish(obj, args->op);
	}

	drm_gem_object_unreference_unlocked(obj);

	return ret;
}
Example #11
0
File: gem.c Project: 03199618/linux
/**
 *	psb_gem_dumb_map_gtt	-	buffer mapping for dumb interface
 *	@file: our drm client file
 *	@dev: drm device
 *	@handle: GEM handle to the object (from dumb_create)
 *
 *	Do the necessary setup to allow the mapping of the frame buffer
 *	into user memory. We don't have to do much here at the moment.
 */
int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
			 uint32_t handle, uint64_t *offset)
{
	int ret = 0;
	struct drm_gem_object *obj;

	if (!(dev->driver->driver_features & DRIVER_GEM))
		return -ENODEV;

	mutex_lock(&dev->struct_mutex);

	/* GEM does all our handle to object mapping */
	obj = drm_gem_object_lookup(dev, file, handle);
	if (obj == NULL) {
		ret = -ENOENT;
		goto unlock;
	}
	/* What validation is needed here ? */

	/* Make it mmapable */
	ret = drm_gem_create_mmap_offset(obj);
	if (ret)
		goto out;
	*offset = drm_vma_node_offset_addr(&obj->vma_node);
out:
	drm_gem_object_unreference(obj);
unlock:
	mutex_unlock(&dev->struct_mutex);
	return ret;
}
Example #12
0
int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
			  struct drm_file *filp)
{
	struct radeon_device *rdev = dev->dev_private;
	struct drm_radeon_gem_busy *args = data;
	struct drm_gem_object *gobj;
	struct radeon_bo *robj;
	int r;
	uint32_t cur_placement = 0;

	gobj = drm_gem_object_lookup(dev, filp, args->handle);
	if (gobj == NULL) {
		return -ENOENT;
	}
	robj = gem_to_radeon_bo(gobj);
	r = radeon_bo_wait(robj, &cur_placement, true);
	switch (cur_placement) {
	case TTM_PL_VRAM:
		args->domain = RADEON_GEM_DOMAIN_VRAM;
		break;
	case TTM_PL_TT:
		args->domain = RADEON_GEM_DOMAIN_GTT;
		break;
	case TTM_PL_SYSTEM:
		args->domain = RADEON_GEM_DOMAIN_CPU;
	default:
		break;
	}
	drm_gem_object_unreference_unlocked(gobj);
	r = radeon_gem_handle_lockup(rdev, r);
	return r;
}
/* the dumb interface doesn't work with the GEM straight MMAP
   interface, it expects to do MMAP on the drm fd, like normal */
int udl_gem_mmap(struct drm_file *file, struct drm_device *dev,
		 uint32_t handle, uint64_t *offset)
{
	struct udl_gem_object *gobj;
	struct drm_gem_object *obj;
	int ret = 0;

	mutex_lock(&dev->struct_mutex);
	obj = drm_gem_object_lookup(dev, file, handle);
	if (obj == NULL) {
		ret = -ENOENT;
		goto unlock;
	}
	gobj = to_udl_bo(obj);

	ret = udl_gem_get_pages(gobj, GFP_KERNEL);
	if (ret)
		goto out;
	if (!gobj->base.map_list.map) {
		ret = drm_gem_create_mmap_offset(obj);
		if (ret)
			goto out;
	}

	*offset = (u64)gobj->base.map_list.hash.key << PAGE_SHIFT;

out:
	drm_gem_object_unreference(&gobj->base);
unlock:
	mutex_unlock(&dev->struct_mutex);
	return ret;
}
Example #14
0
int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
				struct drm_file *filp)
{
	/* transition the BO to a domain -
	 * just validate the BO into a certain domain */
	struct radeon_device *rdev = dev->dev_private;
	struct drm_radeon_gem_set_domain *args = data;
	struct drm_gem_object *gobj;
	struct radeon_bo *robj;
	int r;

	/* for now if someone requests domain CPU -
	 * just make sure the buffer is finished with */
	sx_slock(&rdev->exclusive_lock);

	/* just do a BO wait for now */
	gobj = drm_gem_object_lookup(dev, filp, args->handle);
	if (gobj == NULL) {
		sx_sunlock(&rdev->exclusive_lock);
		return -ENOENT;
	}
	robj = gem_to_radeon_bo(gobj);

	r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);

	drm_gem_object_unreference_unlocked(gobj);
	sx_sunlock(&rdev->exclusive_lock);
	r = radeon_gem_handle_lockup(robj->rdev, r);
	return r;
}
Example #15
0
static struct drm_framebuffer *
exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
		      const struct drm_mode_fb_cmd2 *mode_cmd)
{
	struct exynos_drm_gem *exynos_gem[MAX_FB_BUFFER];
	struct drm_gem_object *obj;
	struct drm_framebuffer *fb;
	int i;
	int ret;

	for (i = 0; i < drm_format_num_planes(mode_cmd->pixel_format); i++) {
		obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[i]);
		if (!obj) {
			DRM_ERROR("failed to lookup gem object\n");
			ret = -ENOENT;
			goto err;
		}

		exynos_gem[i] = to_exynos_gem(obj);
	}

	fb = exynos_drm_framebuffer_init(dev, mode_cmd, exynos_gem, i);
	if (IS_ERR(fb)) {
		ret = PTR_ERR(fb);
		goto err;
	}

	return fb;

err:
	while (i--)
		drm_gem_object_unreference_unlocked(&exynos_gem[i]->base);

	return ERR_PTR(ret);
}
Example #16
0
struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev,
		struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd)
{
	struct drm_gem_object *bos[4] = {0};
	struct drm_framebuffer *fb;
	int ret, i, n = drm_format_num_planes(mode_cmd->pixel_format);

	for (i = 0; i < n; i++) {
		bos[i] = drm_gem_object_lookup(file, mode_cmd->handles[i]);
		if (!bos[i]) {
			ret = -ENXIO;
			goto out_unref;
		}
	}

	fb = msm_framebuffer_init(dev, mode_cmd, bos);
	if (IS_ERR(fb)) {
		ret = PTR_ERR(fb);
		goto out_unref;
	}

	return fb;

out_unref:
	for (i = 0; i < n; i++)
		drm_gem_object_unreference_unlocked(bos[i]);
	return ERR_PTR(ret);
}
Example #17
0
struct drm_framebuffer *
udl_fb_user_fb_create(struct drm_device *dev,
		   struct drm_file *file,
		   const struct drm_mode_fb_cmd2 *mode_cmd)
{
	struct drm_gem_object *obj;
	struct udl_framebuffer *ufb;
	int ret;
	uint32_t size;

	obj = drm_gem_object_lookup(file, mode_cmd->handles[0]);
	if (obj == NULL)
		return ERR_PTR(-ENOENT);

	size = mode_cmd->pitches[0] * mode_cmd->height;
	size = ALIGN(size, PAGE_SIZE);

	if (size > obj->size) {
		DRM_ERROR("object size not sufficient for fb %d %zu %d %d\n", size, obj->size, mode_cmd->pitches[0], mode_cmd->height);
		return ERR_PTR(-ENOMEM);
	}

	ufb = kzalloc(sizeof(*ufb), GFP_KERNEL);
	if (ufb == NULL)
		return ERR_PTR(-ENOMEM);

	ret = udl_framebuffer_init(dev, ufb, mode_cmd, to_udl_bo(obj));
	if (ret) {
		kfree(ufb);
		return ERR_PTR(-EINVAL);
	}
	return &ufb->base;
}
Example #18
0
int
ast_dumb_mmap_offset(struct drm_file *file,
		     struct drm_device *dev,
		     uint32_t handle,
		     uint64_t *offset)
{
	struct drm_gem_object *obj;
	int ret;
	struct ast_bo *bo;

	mutex_lock(&dev->struct_mutex);
	obj = drm_gem_object_lookup(dev, file, handle);
	if (obj == NULL) {
		ret = -ENOENT;
		goto out_unlock;
	}

	bo = gem_to_ast_bo(obj);
	*offset = ast_bo_mmap_offset(bo);

	drm_gem_object_unreference(obj);
	ret = 0;
out_unlock:
	mutex_unlock(&dev->struct_mutex);
	return ret;

}
Example #19
0
/**
 * Sets the tiling mode of an object, returning the required swizzling of
 * bit 6 of addresses in the object.
 */
int
i915_gem_set_tiling(struct drm_device *dev, void *data,
		   struct drm_file *file_priv)
{
	struct drm_i915_gem_set_tiling *args = data;
	drm_i915_private_t *dev_priv = dev->dev_private;
	struct drm_gem_object *obj;
	struct drm_i915_gem_object *obj_priv;

	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
	if (obj == NULL)
		return -EINVAL;
	obj_priv = obj->driver_private;

	if (!i915_tiling_ok(dev, args->stride, obj->size, args->tiling_mode)) {
		drm_gem_object_unreference(obj);
		return -EINVAL;
	}

	mutex_lock(&dev->struct_mutex);

	if (args->tiling_mode == I915_TILING_NONE) {
		obj_priv->tiling_mode = I915_TILING_NONE;
		args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
	} else {
		if (args->tiling_mode == I915_TILING_X)
			args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
		else
			args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
		/* If we can't handle the swizzling, make it untiled. */
		if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) {
			args->tiling_mode = I915_TILING_NONE;
			args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
		}
	}
	if (args->tiling_mode != obj_priv->tiling_mode) {
		int ret;

		/* Unbind the object, as switching tiling means we're
		 * switching the cache organization due to fencing, probably.
		 */
		ret = i915_gem_object_unbind(obj);
		if (ret != 0) {
			WARN(ret != -ERESTARTSYS,
			     "failed to unbind object for tiling switch");
			args->tiling_mode = obj_priv->tiling_mode;
			mutex_unlock(&dev->struct_mutex);
			drm_gem_object_unreference(obj);

			return ret;
		}
		obj_priv->tiling_mode = args->tiling_mode;
	}
	obj_priv->stride = args->stride;

	drm_gem_object_unreference(obj);
	mutex_unlock(&dev->struct_mutex);

	return 0;
}
Example #20
0
static struct drm_framebuffer *
ast_user_framebuffer_create(struct drm_device *dev,
	       struct drm_file *filp,
	       struct drm_mode_fb_cmd2 *mode_cmd)
{
	struct drm_gem_object *obj;
	struct ast_framebuffer *ast_fb;
	int ret;

	obj = drm_gem_object_lookup(dev, filp, mode_cmd->handles[0]);
	if (obj == NULL)
		return ERR_PTR(-ENOENT);

	ast_fb = kzalloc(sizeof(*ast_fb), GFP_KERNEL);
	if (!ast_fb) {
		drm_gem_object_unreference_unlocked(obj);
		return ERR_PTR(-ENOMEM);
	}

	ret = ast_framebuffer_init(dev, ast_fb, mode_cmd, obj);
	if (ret) {
		drm_gem_object_unreference_unlocked(obj);
		kfree(ast_fb);
		return ERR_PTR(ret);
	}
	return &ast_fb->base;
}
Example #21
0
static int etnaviv_ioctl_gem_wait(struct drm_device *dev, void *data,
	struct drm_file *file)
{
	struct etnaviv_drm_private *priv = dev->dev_private;
	struct drm_etnaviv_gem_wait *args = data;
	struct timespec *timeout = &TS(args->timeout);
	struct drm_gem_object *obj;
	struct etnaviv_gpu *gpu;
	int ret;

	if (args->flags & ~(ETNA_WAIT_NONBLOCK))
		return -EINVAL;

	if (args->pipe >= ETNA_MAX_PIPES)
		return -EINVAL;

	gpu = priv->gpu[args->pipe];
	if (!gpu)
		return -ENXIO;

	obj = drm_gem_object_lookup(dev, file, args->handle);
	if (!obj)
		return -ENOENT;

	if (args->flags & ETNA_WAIT_NONBLOCK)
		timeout = NULL;

	ret = etnaviv_gem_wait_bo(gpu, obj, timeout);

	drm_gem_object_unreference_unlocked(obj);

	return ret;
}
Example #22
0
static struct drm_framebuffer *
virtio_gpu_user_framebuffer_create(struct drm_device *dev,
				   struct drm_file *file_priv,
				   const struct drm_mode_fb_cmd2 *mode_cmd)
{
	struct drm_gem_object *obj = NULL;
	struct virtio_gpu_framebuffer *virtio_gpu_fb;
	int ret;

	/* lookup object associated with res handle */
	obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
	if (!obj)
		return ERR_PTR(-EINVAL);

	virtio_gpu_fb = kzalloc(sizeof(*virtio_gpu_fb), GFP_KERNEL);
	if (virtio_gpu_fb == NULL)
		return ERR_PTR(-ENOMEM);

	ret = virtio_gpu_framebuffer_init(dev, virtio_gpu_fb, mode_cmd, obj);
	if (ret) {
		kfree(virtio_gpu_fb);
		if (obj)
			drm_gem_object_unreference_unlocked(obj);
		return NULL;
	}

	return &virtio_gpu_fb->base;
}
Example #23
0
static struct drm_framebuffer *
cirrus_user_framebuffer_create(struct drm_device *dev,
			       struct drm_file *filp,
			       struct drm_mode_fb_cmd2 *mode_cmd)
{
	struct drm_gem_object *obj;
	struct cirrus_framebuffer *cirrus_fb;
	int ret;
	u32 bpp, depth;

	drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
	/* cirrus can't handle > 24bpp framebuffers at all */
	if (bpp > 24)
		return ERR_PTR(-EINVAL);

	obj = drm_gem_object_lookup(dev, filp, mode_cmd->handles[0]);
	if (obj == NULL)
		return ERR_PTR(-ENOENT);

	cirrus_fb = kzalloc(sizeof(*cirrus_fb), GFP_KERNEL);
	if (!cirrus_fb) {
		drm_gem_object_unreference_unlocked(obj);
		return ERR_PTR(-ENOMEM);
	}

	ret = cirrus_framebuffer_init(dev, cirrus_fb, mode_cmd, obj);
	if (ret) {
		drm_gem_object_unreference_unlocked(obj);
		kfree(cirrus_fb);
		return ERR_PTR(ret);
	}
	return &cirrus_fb->base;
}
Example #24
0
int rockchip_gem_dumb_map_offset(struct drm_file *file_priv,
				 struct drm_device *dev, uint32_t handle,
				 uint64_t *offset)
{
	struct drm_gem_object *obj;
	int ret;

	obj = drm_gem_object_lookup(dev, file_priv, handle);
	if (!obj) {
		DRM_ERROR("failed to lookup gem object.\n");
		return -EINVAL;
	}

	ret = drm_gem_create_mmap_offset(obj);
	if (ret)
		goto out;

	*offset = drm_vma_node_offset_addr(&obj->vma_node);
	DRM_DEBUG_KMS("offset = 0x%llx\n", *offset);

out:
	drm_gem_object_unreference_unlocked(obj);

	return 0;
}
Example #25
0
/**
 *	psb_gem_dumb_map_gtt	-	buffer mapping for dumb interface
 *	@file: our drm client file
 *	@dev: drm device
 *	@handle: GEM handle to the object (from dumb_create)
 *
 *	Do the necessary setup to allow the mapping of the frame buffer
 *	into user memory. We don't have to do much here at the moment.
 */
int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
			 uint32_t handle, uint64_t *offset)
{
	int ret = 0;
	struct drm_gem_object *obj;

	if (!(dev->driver->driver_features & DRIVER_GEM))
		return -ENODEV;

	mutex_lock(&dev->struct_mutex);

	/* GEM does all our handle to object mapping */
	obj = drm_gem_object_lookup(dev, file, handle);
	if (obj == NULL) {
		ret = -ENOENT;
		goto unlock;
	}
	/* What validation is needed here ? */

	/* Make it mmapable */
	if (!obj->map_list.map) {
		ret = gem_create_mmap_offset(obj);
		if (ret)
			goto out;
	}
	/* GEM should really work out the hash offsets for us */
	*offset = (u64)obj->map_list.hash.key << PAGE_SHIFT;
out:
	drm_gem_object_unreference(obj);
unlock:
	mutex_unlock(&dev->struct_mutex);
	return ret;
}
Example #26
0
static int virtio_gpu_crtc_cursor_set(struct drm_crtc *crtc,
				      struct drm_file *file_priv,
				      uint32_t handle,
				      uint32_t width,
				      uint32_t height,
				      int32_t hot_x, int32_t hot_y)
{
	struct virtio_gpu_device *vgdev = crtc->dev->dev_private;
	struct virtio_gpu_output *output =
		container_of(crtc, struct virtio_gpu_output, crtc);
	struct drm_gem_object *gobj = NULL;
	struct virtio_gpu_object *qobj = NULL;
	struct virtio_gpu_fence *fence = NULL;
	int ret = 0;

	if (handle == 0) {
		virtio_gpu_hide_cursor(vgdev, output);
		return 0;
	}

	/* lookup the cursor */
	gobj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
	if (gobj == NULL)
		return -ENOENT;

	qobj = gem_to_virtio_gpu_obj(gobj);

	if (!qobj->hw_res_handle) {
		ret = -EINVAL;
		goto out;
	}

	virtio_gpu_cmd_transfer_to_host_2d(vgdev, qobj->hw_res_handle, 0,
					   cpu_to_le32(64),
					   cpu_to_le32(64),
					   0, 0, &fence);
	ret = virtio_gpu_object_reserve(qobj, false);
	if (!ret) {
		reservation_object_add_excl_fence(qobj->tbo.resv,
						  &fence->f);
		fence_put(&fence->f);
		virtio_gpu_object_unreserve(qobj);
		virtio_gpu_object_wait(qobj, false);
	}

	output->cursor.hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_UPDATE_CURSOR);
	output->cursor.resource_id = cpu_to_le32(qobj->hw_res_handle);
	output->cursor.hot_x = cpu_to_le32(hot_x);
	output->cursor.hot_y = cpu_to_le32(hot_y);
	virtio_gpu_cursor_ping(vgdev, output);
	ret = 0;

out:
	drm_gem_object_unreference_unlocked(gobj);
	return ret;
}
int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
{
	struct drm_device *ddev = p->rdev->ddev;
	struct radeon_cs_chunk *chunk;
	unsigned i, j;
	bool duplicate;

	if (p->chunk_relocs_idx == -1) {
		return 0;
	}
	chunk = &p->chunks[p->chunk_relocs_idx];
	/* FIXME: we assume that each relocs use 4 dwords */
	p->nrelocs = chunk->length_dw / 4;
	p->relocs_ptr = kcalloc(p->nrelocs, sizeof(void *), GFP_KERNEL);
	if (p->relocs_ptr == NULL) {
		return -ENOMEM;
	}
	p->relocs = kcalloc(p->nrelocs, sizeof(struct radeon_cs_reloc), GFP_KERNEL);
	if (p->relocs == NULL) {
		return -ENOMEM;
	}
	for (i = 0; i < p->nrelocs; i++) {
		struct drm_radeon_cs_reloc *r;

		duplicate = false;
		r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4];
		for (j = 0; j < p->nrelocs; j++) {
			if (r->handle == p->relocs[j].handle) {
				p->relocs_ptr[i] = &p->relocs[j];
				duplicate = true;
				break;
			}
		}
		if (!duplicate) {
			p->relocs[i].gobj = drm_gem_object_lookup(ddev,
								  p->filp,
								  r->handle);
			if (p->relocs[i].gobj == NULL) {
				DRM_ERROR("gem object lookup failed 0x%x\n",
					  r->handle);
				return -ENOENT;
			}
			p->relocs_ptr[i] = &p->relocs[i];
			p->relocs[i].robj = gem_to_radeon_bo(p->relocs[i].gobj);
			p->relocs[i].lobj.bo = p->relocs[i].robj;
			p->relocs[i].lobj.wdomain = r->write_domain;
			p->relocs[i].lobj.rdomain = r->read_domains;
			p->relocs[i].lobj.tv.bo = &p->relocs[i].robj->tbo;
			p->relocs[i].handle = r->handle;
			p->relocs[i].flags = r->flags;
			radeon_bo_list_add_object(&p->relocs[i].lobj,
						  &p->validated);
		}
	}
	return radeon_bo_list_validate(&p->validated);
}
Example #28
0
static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
		struct drm_file *file_priv, uint32_t handle,
		uint32_t width, uint32_t height)
{
	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
	struct mdp4_kms *mdp4_kms = get_kms(crtc);
	struct drm_device *dev = crtc->dev;
	struct drm_gem_object *cursor_bo, *old_bo;
	unsigned long flags;
	uint32_t iova;
	int ret;

	if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
		dev_err(dev->dev, "bad cursor size: %dx%d\n", width, height);
		return -EINVAL;
	}

	if (handle) {
		cursor_bo = drm_gem_object_lookup(dev, file_priv, handle);
		if (!cursor_bo)
			return -ENOENT;
	} else {
		cursor_bo = NULL;
	}

	if (cursor_bo) {
		ret = msm_gem_get_iova(cursor_bo, mdp4_kms->id, &iova);
		if (ret)
			goto fail;
	} else {
		iova = 0;
	}

	spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
	old_bo = mdp4_crtc->cursor.next_bo;
	mdp4_crtc->cursor.next_bo   = cursor_bo;
	mdp4_crtc->cursor.next_iova = iova;
	mdp4_crtc->cursor.width     = width;
	mdp4_crtc->cursor.height    = height;
	mdp4_crtc->cursor.stale     = true;
	spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);

	if (old_bo) {
		/* drop our previous reference: */
		msm_gem_put_iova(old_bo, mdp4_kms->id);
		drm_gem_object_unreference_unlocked(old_bo);
	}

	request_pending(crtc, PENDING_CURSOR);

	return 0;

fail:
	drm_gem_object_unreference_unlocked(cursor_bo);
	return ret;
}
Example #29
0
static struct drm_framebuffer *
rockchip_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
			const struct drm_mode_fb_cmd2 *mode_cmd)
{
	struct drm_framebuffer *fb;
	struct drm_gem_object *objs[ROCKCHIP_MAX_FB_BUFFER];
	struct drm_gem_object *obj;
	unsigned int hsub;
	unsigned int vsub;
	int num_planes;
	int ret;
	int i;

	hsub = drm_format_horz_chroma_subsampling(mode_cmd->pixel_format);
	vsub = drm_format_vert_chroma_subsampling(mode_cmd->pixel_format);
	num_planes = min(drm_format_num_planes(mode_cmd->pixel_format),
			 ROCKCHIP_MAX_FB_BUFFER);

	for (i = 0; i < num_planes; i++) {
		unsigned int width = mode_cmd->width / (i ? hsub : 1);
		unsigned int height = mode_cmd->height / (i ? vsub : 1);
		unsigned int min_size;

		obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[i]);
		if (!obj) {
			DRM_DEV_ERROR(dev->dev,
				      "Failed to lookup GEM object\n");
			ret = -ENXIO;
			goto err_gem_object_unreference;
		}

		min_size = (height - 1) * mode_cmd->pitches[i] +
			mode_cmd->offsets[i] +
			width * drm_format_plane_cpp(mode_cmd->pixel_format, i);

		if (obj->size < min_size) {
			drm_gem_object_put_unlocked(obj);
			ret = -EINVAL;
			goto err_gem_object_unreference;
		}
		objs[i] = obj;
	}

	fb = rockchip_fb_alloc(dev, mode_cmd, objs, i);
	if (IS_ERR(fb)) {
		ret = PTR_ERR(fb);
		goto err_gem_object_unreference;
	}

	return fb;

err_gem_object_unreference:
	for (i--; i >= 0; i--)
		drm_gem_object_put_unlocked(objs[i]);
	return ERR_PTR(ret);
}
Example #30
0
static struct drm_framebuffer *
exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
		      struct drm_mode_fb_cmd2 *mode_cmd)
{
	struct drm_gem_object *obj;
	struct drm_framebuffer *fb;
	struct exynos_drm_fb *exynos_fb;
	int nr;
	int i;

	DRM_DEBUG_KMS("%s\n", __FILE__);

	obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
	if (!obj) {
		DRM_ERROR("failed to lookup gem object\n");
		return ERR_PTR(-ENOENT);
	}

	drm_gem_object_unreference_unlocked(obj);

	fb = exynos_drm_framebuffer_init(dev, mode_cmd, obj);
	if (IS_ERR(fb))
		return fb;

	exynos_fb = to_exynos_fb(fb);
	nr = exynos_drm_format_num_buffers(fb->pixel_format);

	for (i = 1; i < nr; i++) {
		obj = drm_gem_object_lookup(dev, file_priv,
				mode_cmd->handles[i]);
		if (!obj) {
			DRM_ERROR("failed to lookup gem object\n");
			exynos_drm_fb_destroy(fb);
			return ERR_PTR(-ENOENT);
		}

		drm_gem_object_unreference_unlocked(obj);

		exynos_fb->exynos_gem_obj[i] = to_exynos_gem_obj(obj);
	}

	return fb;
}