Пример #1
0
static int vmw_master_set(struct drm_device *dev,
			  struct drm_file *file_priv,
			  bool from_open)
{
	struct vmw_private *dev_priv = vmw_priv(dev);
	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
	struct vmw_master *active = dev_priv->active_master;
	struct vmw_master *vmaster = vmw_master(file_priv->master);
	int ret = 0;

	if (active) {
		BUG_ON(active != &dev_priv->fbdev_master);
		ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
		if (unlikely(ret != 0))
			return ret;

		ttm_lock_set_kill(&active->lock, true, SIGTERM);
		dev_priv->active_master = NULL;
	}

	ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
	if (!from_open) {
		ttm_vt_unlock(&vmaster->lock);
		BUG_ON(vmw_fp->locked_master != file_priv->master);
		drm_master_put(&vmw_fp->locked_master);
	}

	dev_priv->active_master = vmaster;
	drm_sysfs_hotplug_event(dev);

	return 0;
}
Пример #2
0
static void vmw_master_drop(struct drm_device *dev,
			    struct drm_file *file_priv)
{
	struct vmw_private *dev_priv = vmw_priv(dev);
	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
	struct vmw_master *vmaster = vmw_master(file_priv->master);
	int ret;

	/**
	 * Make sure the master doesn't disappear while we have
	 * it locked.
	 */

	vmw_fp->locked_master = drm_master_get(file_priv->master);
	ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
	vmw_kms_legacy_hotspot_clear(dev_priv);
	if (unlikely((ret != 0))) {
		DRM_ERROR("Unable to lock TTM at VT switch.\n");
		drm_master_put(&vmw_fp->locked_master);
	}

	ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);

	if (!dev_priv->enable_fb)
		vmw_svga_disable(dev_priv);

	dev_priv->active_master = &dev_priv->fbdev_master;
	ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
	ttm_vt_unlock(&dev_priv->fbdev_master.lock);

	if (dev_priv->enable_fb)
		vmw_fb_on(dev_priv);
}
Пример #3
0
static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
{
	struct ttm_object_file *tfile =
		vmw_fpriv((struct drm_file *)filp->private_data)->tfile;

	return vmw_user_dmabuf_verify_access(bo, tfile);
}
Пример #4
0
int vmw_prime_fd_to_handle(struct drm_device *dev,
			   struct drm_file *file_priv,
			   int fd, u32 *handle)
{
	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;

	return ttm_prime_fd_to_handle(tfile, fd, handle);
}
Пример #5
0
static void vmw_preclose(struct drm_device *dev,
			 struct drm_file *file_priv)
{
	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
	struct vmw_private *dev_priv = vmw_priv(dev);

	vmw_event_fence_fpriv_gone(dev_priv->fman, &vmw_fp->fence_events);
}
Пример #6
0
int vmw_prime_handle_to_fd(struct drm_device *dev,
			   struct drm_file *file_priv,
			   uint32_t handle, uint32_t flags,
			   int *prime_fd)
{
	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;

	return ttm_prime_handle_to_fd(tfile, handle, flags, prime_fd);
}
Пример #7
0
static struct vmw_master *vmw_master_check(struct drm_device *dev,
        struct drm_file *file_priv,
        unsigned int flags)
{
    int ret;
    struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
    struct vmw_master *vmaster;

    if (file_priv->minor->type != DRM_MINOR_LEGACY ||
            !(flags & DRM_AUTH))
        return NULL;

    ret = mutex_lock_interruptible(&dev->master_mutex);
    if (unlikely(ret != 0))
        return ERR_PTR(-ERESTARTSYS);

    if (file_priv->is_master) {
        mutex_unlock(&dev->master_mutex);
        return NULL;
    }

    /*
     * Check if we were previously master, but now dropped. In that
     * case, allow at least render node functionality.
     */
    if (vmw_fp->locked_master) {
        mutex_unlock(&dev->master_mutex);

        if (flags & DRM_RENDER_ALLOW)
            return NULL;

        DRM_ERROR("Dropped master trying to access ioctl that "
                  "requires authentication.\n");
        return ERR_PTR(-EACCES);
    }
    mutex_unlock(&dev->master_mutex);

    /*
     * Taking the drm_global_mutex after the TTM lock might deadlock
     */
    if (!(flags & DRM_UNLOCKED)) {
        DRM_ERROR("Refusing locked ioctl access.\n");
        return ERR_PTR(-EDEADLK);
    }

    /*
     * Take the TTM lock. Possibly sleep waiting for the authenticating
     * master to become master again, or for a SIGTERM if the
     * authenticating master exits.
     */
    vmaster = vmw_master(file_priv->master);
    ret = ttm_read_lock(&vmaster->lock, true);
    if (unlikely(ret != 0))
        vmaster = ERR_PTR(ret);

    return vmaster;
}
Пример #8
0
static void vmw_postclose(struct drm_device *dev,
			 struct drm_file *file_priv)
{
	struct vmw_fpriv *vmw_fp;

	vmw_fp = vmw_fpriv(file_priv);

	if (vmw_fp->locked_master) {
		struct vmw_master *vmaster =
			vmw_master(vmw_fp->locked_master);

		ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
		ttm_vt_unlock(&vmaster->lock);
		drm_master_put(&vmw_fp->locked_master);
	}

	ttm_object_file_release(&vmw_fp->tfile);
	kfree(vmw_fp);
}
Пример #9
0
int vmw_overlay_ioctl(struct drm_device *dev, void *data,
		      struct drm_file *file_priv)
{
	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
	struct vmw_private *dev_priv = vmw_priv(dev);
	struct vmw_overlay *overlay = dev_priv->overlay_priv;
	struct drm_vmw_control_stream_arg *arg =
	    (struct drm_vmw_control_stream_arg *)data;
	struct vmw_dma_buffer *buf;
	struct vmw_resource *res;
	int ret;

	if (!vmw_overlay_available(dev_priv))
		return -ENOSYS;

	ret = vmw_user_stream_lookup(dev_priv, tfile, &arg->stream_id, &res);
	if (ret)
		return ret;

	mutex_lock(&overlay->mutex);

	if (!arg->enabled) {
		ret = vmw_overlay_stop(dev_priv, arg->stream_id, false, true);
		goto out_unlock;
	}

	ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &buf, NULL);
	if (ret)
		goto out_unlock;

	ret = vmw_overlay_update_stream(dev_priv, buf, arg, true);

	vmw_dmabuf_unreference(&buf);

out_unlock:
	mutex_unlock(&overlay->mutex);
	vmw_resource_unreference(&res);

	return ret;
}
Пример #10
0
/**
 * vmw_ttm_map_dma - Make sure TTM pages are visible to the device
 *
 * @vmw_tt: Pointer to a struct vmw_ttm_tt
 *
 * Select the correct function for and make sure the TTM pages are
 * visible to the device. Allocate storage for the device mappings.
 * If a mapping has already been performed, indicated by the storage
 * pointer being non NULL, the function returns success.
 */
static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
{
	struct vmw_private *dev_priv = vmw_tt->dev_priv;
	struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
	struct vmw_sg_table *vsgt = &vmw_tt->vsgt;
	struct ttm_operation_ctx ctx = {
		.interruptible = true,
		.no_wait_gpu = false
	};
	struct vmw_piter iter;
	dma_addr_t old;
	int ret = 0;
	static size_t sgl_size;
	static size_t sgt_size;

	if (vmw_tt->mapped)
		return 0;

	vsgt->mode = dev_priv->map_mode;
	vsgt->pages = vmw_tt->dma_ttm.ttm.pages;
	vsgt->num_pages = vmw_tt->dma_ttm.ttm.num_pages;
	vsgt->addrs = vmw_tt->dma_ttm.dma_address;
	vsgt->sgt = &vmw_tt->sgt;

	switch (dev_priv->map_mode) {
	case vmw_dma_map_bind:
	case vmw_dma_map_populate:
		if (unlikely(!sgl_size)) {
			sgl_size = ttm_round_pot(sizeof(struct scatterlist));
			sgt_size = ttm_round_pot(sizeof(struct sg_table));
		}
		vmw_tt->sg_alloc_size = sgt_size + sgl_size * vsgt->num_pages;
		ret = ttm_mem_global_alloc(glob, vmw_tt->sg_alloc_size, &ctx);
		if (unlikely(ret != 0))
			return ret;

		ret = sg_alloc_table_from_pages(&vmw_tt->sgt, vsgt->pages,
						vsgt->num_pages, 0,
						(unsigned long)
						vsgt->num_pages << PAGE_SHIFT,
						GFP_KERNEL);
		if (unlikely(ret != 0))
			goto out_sg_alloc_fail;

		if (vsgt->num_pages > vmw_tt->sgt.nents) {
			uint64_t over_alloc =
				sgl_size * (vsgt->num_pages -
					    vmw_tt->sgt.nents);

			ttm_mem_global_free(glob, over_alloc);
			vmw_tt->sg_alloc_size -= over_alloc;
		}

		ret = vmw_ttm_map_for_dma(vmw_tt);
		if (unlikely(ret != 0))
			goto out_map_fail;

		break;
	default:
		break;
	}

	old = ~((dma_addr_t) 0);
	vmw_tt->vsgt.num_regions = 0;
	for (vmw_piter_start(&iter, vsgt, 0); vmw_piter_next(&iter);) {
		dma_addr_t cur = vmw_piter_dma_addr(&iter);

		if (cur != old + PAGE_SIZE)
			vmw_tt->vsgt.num_regions++;
		old = cur;
	}

	vmw_tt->mapped = true;
	return 0;

out_map_fail:
	sg_free_table(vmw_tt->vsgt.sgt);
	vmw_tt->vsgt.sgt = NULL;
out_sg_alloc_fail:
	ttm_mem_global_free(glob, vmw_tt->sg_alloc_size);
	return ret;
}

/**
 * vmw_ttm_unmap_dma - Tear down any TTM page device mappings
 *
 * @vmw_tt: Pointer to a struct vmw_ttm_tt
 *
 * Tear down any previously set up device DMA mappings and free
 * any storage space allocated for them. If there are no mappings set up,
 * this function is a NOP.
 */
static void vmw_ttm_unmap_dma(struct vmw_ttm_tt *vmw_tt)
{
	struct vmw_private *dev_priv = vmw_tt->dev_priv;

	if (!vmw_tt->vsgt.sgt)
		return;

	switch (dev_priv->map_mode) {
	case vmw_dma_map_bind:
	case vmw_dma_map_populate:
		vmw_ttm_unmap_from_dma(vmw_tt);
		sg_free_table(vmw_tt->vsgt.sgt);
		vmw_tt->vsgt.sgt = NULL;
		ttm_mem_global_free(vmw_mem_glob(dev_priv),
				    vmw_tt->sg_alloc_size);
		break;
	default:
		break;
	}
	vmw_tt->mapped = false;
}


/**
 * vmw_bo_map_dma - Make sure buffer object pages are visible to the device
 *
 * @bo: Pointer to a struct ttm_buffer_object
 *
 * Wrapper around vmw_ttm_map_dma, that takes a TTM buffer object pointer
 * instead of a pointer to a struct vmw_ttm_backend as argument.
 * Note that the buffer object must be either pinned or reserved before
 * calling this function.
 */
int vmw_bo_map_dma(struct ttm_buffer_object *bo)
{
	struct vmw_ttm_tt *vmw_tt =
		container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);

	return vmw_ttm_map_dma(vmw_tt);
}


/**
 * vmw_bo_unmap_dma - Make sure buffer object pages are visible to the device
 *
 * @bo: Pointer to a struct ttm_buffer_object
 *
 * Wrapper around vmw_ttm_unmap_dma, that takes a TTM buffer object pointer
 * instead of a pointer to a struct vmw_ttm_backend as argument.
 */
void vmw_bo_unmap_dma(struct ttm_buffer_object *bo)
{
	struct vmw_ttm_tt *vmw_tt =
		container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);

	vmw_ttm_unmap_dma(vmw_tt);
}


/**
 * vmw_bo_sg_table - Return a struct vmw_sg_table object for a
 * TTM buffer object
 *
 * @bo: Pointer to a struct ttm_buffer_object
 *
 * Returns a pointer to a struct vmw_sg_table object. The object should
 * not be freed after use.
 * Note that for the device addresses to be valid, the buffer object must
 * either be reserved or pinned.
 */
const struct vmw_sg_table *vmw_bo_sg_table(struct ttm_buffer_object *bo)
{
	struct vmw_ttm_tt *vmw_tt =
		container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);

	return &vmw_tt->vsgt;
}


static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
{
	struct vmw_ttm_tt *vmw_be =
		container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
	int ret;

	ret = vmw_ttm_map_dma(vmw_be);
	if (unlikely(ret != 0))
		return ret;

	vmw_be->gmr_id = bo_mem->start;
	vmw_be->mem_type = bo_mem->mem_type;

	switch (bo_mem->mem_type) {
	case VMW_PL_GMR:
		return vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt,
				    ttm->num_pages, vmw_be->gmr_id);
	case VMW_PL_MOB:
		if (unlikely(vmw_be->mob == NULL)) {
			vmw_be->mob =
				vmw_mob_create(ttm->num_pages);
			if (unlikely(vmw_be->mob == NULL))
				return -ENOMEM;
		}

		return vmw_mob_bind(vmw_be->dev_priv, vmw_be->mob,
				    &vmw_be->vsgt, ttm->num_pages,
				    vmw_be->gmr_id);
	default:
		BUG();
	}
	return 0;
}

static int vmw_ttm_unbind(struct ttm_tt *ttm)
{
	struct vmw_ttm_tt *vmw_be =
		container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);

	switch (vmw_be->mem_type) {
	case VMW_PL_GMR:
		vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id);
		break;
	case VMW_PL_MOB:
		vmw_mob_unbind(vmw_be->dev_priv, vmw_be->mob);
		break;
	default:
		BUG();
	}

	if (vmw_be->dev_priv->map_mode == vmw_dma_map_bind)
		vmw_ttm_unmap_dma(vmw_be);

	return 0;
}


static void vmw_ttm_destroy(struct ttm_tt *ttm)
{
	struct vmw_ttm_tt *vmw_be =
		container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);

	vmw_ttm_unmap_dma(vmw_be);
	if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
		ttm_dma_tt_fini(&vmw_be->dma_ttm);
	else
		ttm_tt_fini(ttm);

	if (vmw_be->mob)
		vmw_mob_destroy(vmw_be->mob);

	kfree(vmw_be);
}


static int vmw_ttm_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
{
	struct vmw_ttm_tt *vmw_tt =
		container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
	struct vmw_private *dev_priv = vmw_tt->dev_priv;
	struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
	int ret;

	if (ttm->state != tt_unpopulated)
		return 0;

	if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
		size_t size =
			ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t));
		ret = ttm_mem_global_alloc(glob, size, ctx);
		if (unlikely(ret != 0))
			return ret;

		ret = ttm_dma_populate(&vmw_tt->dma_ttm, dev_priv->dev->dev,
					ctx);
		if (unlikely(ret != 0))
			ttm_mem_global_free(glob, size);
	} else
		ret = ttm_pool_populate(ttm, ctx);

	return ret;
}

static void vmw_ttm_unpopulate(struct ttm_tt *ttm)
{
	struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt,
						 dma_ttm.ttm);
	struct vmw_private *dev_priv = vmw_tt->dev_priv;
	struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);


	if (vmw_tt->mob) {
		vmw_mob_destroy(vmw_tt->mob);
		vmw_tt->mob = NULL;
	}

	vmw_ttm_unmap_dma(vmw_tt);
	if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
		size_t size =
			ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t));

		ttm_dma_unpopulate(&vmw_tt->dma_ttm, dev_priv->dev->dev);
		ttm_mem_global_free(glob, size);
	} else
		ttm_pool_unpopulate(ttm);
}

static struct ttm_backend_func vmw_ttm_func = {
	.bind = vmw_ttm_bind,
	.unbind = vmw_ttm_unbind,
	.destroy = vmw_ttm_destroy,
};

static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo,
					uint32_t page_flags)
{
	struct vmw_ttm_tt *vmw_be;
	int ret;

	vmw_be = kzalloc(sizeof(*vmw_be), GFP_KERNEL);
	if (!vmw_be)
		return NULL;

	vmw_be->dma_ttm.ttm.func = &vmw_ttm_func;
	vmw_be->dev_priv = container_of(bo->bdev, struct vmw_private, bdev);
	vmw_be->mob = NULL;

	if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
		ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bo, page_flags);
	else
		ret = ttm_tt_init(&vmw_be->dma_ttm.ttm, bo, page_flags);
	if (unlikely(ret != 0))
		goto out_no_init;

	return &vmw_be->dma_ttm.ttm;
out_no_init:
	kfree(vmw_be);
	return NULL;
}

static int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
{
	return 0;
}

static int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
		      struct ttm_mem_type_manager *man)
{
	switch (type) {
	case TTM_PL_SYSTEM:
		/* System memory */

		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
		man->available_caching = TTM_PL_FLAG_CACHED;
		man->default_caching = TTM_PL_FLAG_CACHED;
		break;
	case TTM_PL_VRAM:
		/* "On-card" video ram */
		man->func = &ttm_bo_manager_func;
		man->gpu_offset = 0;
		man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE;
		man->available_caching = TTM_PL_FLAG_CACHED;
		man->default_caching = TTM_PL_FLAG_CACHED;
		break;
	case VMW_PL_GMR:
	case VMW_PL_MOB:
		/*
		 * "Guest Memory Regions" is an aperture like feature with
		 *  one slot per bo. There is an upper limit of the number of
		 *  slots as well as the bo size.
		 */
		man->func = &vmw_gmrid_manager_func;
		man->gpu_offset = 0;
		man->flags = TTM_MEMTYPE_FLAG_CMA | TTM_MEMTYPE_FLAG_MAPPABLE;
		man->available_caching = TTM_PL_FLAG_CACHED;
		man->default_caching = TTM_PL_FLAG_CACHED;
		break;
	default:
		DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
		return -EINVAL;
	}
	return 0;
}

static void vmw_evict_flags(struct ttm_buffer_object *bo,
		     struct ttm_placement *placement)
{
	*placement = vmw_sys_placement;
}

static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
{
	struct ttm_object_file *tfile =
		vmw_fpriv((struct drm_file *)filp->private_data)->tfile;

	return vmw_user_bo_verify_access(bo, tfile);
}

static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
	struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev);

	mem->bus.addr = NULL;
	mem->bus.is_iomem = false;
	mem->bus.offset = 0;
	mem->bus.size = mem->num_pages << PAGE_SHIFT;
	mem->bus.base = 0;
	if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
		return -EINVAL;
	switch (mem->mem_type) {
	case TTM_PL_SYSTEM:
	case VMW_PL_GMR:
	case VMW_PL_MOB:
		return 0;
	case TTM_PL_VRAM:
		mem->bus.offset = mem->start << PAGE_SHIFT;
		mem->bus.base = dev_priv->vram_start;
		mem->bus.is_iomem = true;
		break;
	default:
		return -EINVAL;
	}
	return 0;
}

static void vmw_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
}

static int vmw_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
{
	return 0;
}

/**
 * vmw_move_notify - TTM move_notify_callback
 *
 * @bo: The TTM buffer object about to move.
 * @mem: The struct ttm_mem_reg indicating to what memory
 *       region the move is taking place.
 *
 * Calls move_notify for all subsystems needing it.
 * (currently only resources).
 */
static void vmw_move_notify(struct ttm_buffer_object *bo,
			    bool evict,
			    struct ttm_mem_reg *mem)
{
	vmw_bo_move_notify(bo, mem);
	vmw_query_move_notify(bo, mem);
}
Пример #11
0
int vmw_present_ioctl(struct drm_device *dev, void *data,
		      struct drm_file *file_priv)
{
	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
	struct vmw_private *dev_priv = vmw_priv(dev);
	struct drm_vmw_present_arg *arg =
		(struct drm_vmw_present_arg *)data;
	struct vmw_surface *surface;
	struct vmw_master *vmaster = vmw_master(file_priv->master);
	struct drm_vmw_rect __user *clips_ptr;
	struct drm_vmw_rect *clips = NULL;
	struct drm_framebuffer *fb;
	struct vmw_framebuffer *vfb;
	struct vmw_resource *res;
	uint32_t num_clips;
	int ret;

	num_clips = arg->num_clips;
	clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;

	if (unlikely(num_clips == 0))
		return 0;

	if (clips_ptr == NULL) {
		DRM_ERROR("Variable clips_ptr must be specified.\n");
		ret = -EINVAL;
		goto out_clips;
	}

	clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL);
	if (clips == NULL) {
		DRM_ERROR("Failed to allocate clip rect list.\n");
		ret = -ENOMEM;
		goto out_clips;
	}

	ret = copy_from_user(clips, clips_ptr, num_clips * sizeof(*clips));
	if (ret) {
		DRM_ERROR("Failed to copy clip rects from userspace.\n");
		ret = -EFAULT;
		goto out_no_copy;
	}

	drm_modeset_lock_all(dev);

	fb = drm_framebuffer_lookup(dev, arg->fb_id);
	if (!fb) {
		DRM_ERROR("Invalid framebuffer id.\n");
		ret = -ENOENT;
		goto out_no_fb;
	}
	vfb = vmw_framebuffer_to_vfb(fb);

	ret = ttm_read_lock(&vmaster->lock, true);
	if (unlikely(ret != 0))
		goto out_no_ttm_lock;

	ret = vmw_user_resource_lookup_handle(dev_priv, tfile, arg->sid,
					      user_surface_converter,
					      &res);
	if (ret)
		goto out_no_surface;

	surface = vmw_res_to_srf(res);
	ret = vmw_kms_present(dev_priv, file_priv,
			      vfb, surface, arg->sid,
			      arg->dest_x, arg->dest_y,
			      clips, num_clips);

	/* vmw_user_surface_lookup takes one ref so does new_fb */
	vmw_surface_unreference(&surface);

out_no_surface:
	ttm_read_unlock(&vmaster->lock);
out_no_ttm_lock:
	drm_framebuffer_unreference(fb);
out_no_fb:
	drm_modeset_unlock_all(dev);
out_no_copy:
	kfree(clips);
out_clips:
	return ret;
}
int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
			   uint32_t handle, uint32_t width, uint32_t height)
{
	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
	struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
	struct vmw_surface *surface = NULL;
	struct vmw_dma_buffer *dmabuf = NULL;
	int ret;

	if (handle) {
		ret = vmw_user_surface_lookup_handle(dev_priv, tfile,
						     handle, &surface);
		if (!ret) {
			if (!surface->snooper.image) {
				DRM_ERROR("surface not suitable for cursor\n");
				return -EINVAL;
			}
		} else {
			ret = vmw_user_dmabuf_lookup(tfile,
						     handle, &dmabuf);
			if (ret) {
				DRM_ERROR("failed to find surface or dmabuf: %i\n", ret);
				return -EINVAL;
			}
		}
	}

	/* takedown old cursor */
	if (du->cursor_surface) {
		du->cursor_surface->snooper.crtc = NULL;
		vmw_surface_unreference(&du->cursor_surface);
	}
	if (du->cursor_dmabuf)
		vmw_dmabuf_unreference(&du->cursor_dmabuf);

	/* setup new image */
	if (surface) {
		/* vmw_user_surface_lookup takes one reference */
		du->cursor_surface = surface;

		du->cursor_surface->snooper.crtc = crtc;
		du->cursor_age = du->cursor_surface->snooper.age;
		vmw_cursor_update_image(dev_priv, surface->snooper.image,
					64, 64, du->hotspot_x, du->hotspot_y);
	} else if (dmabuf) {
		struct ttm_bo_kmap_obj map;
		unsigned long kmap_offset;
		unsigned long kmap_num;
		void *virtual;
		bool dummy;

		/* vmw_user_surface_lookup takes one reference */
		du->cursor_dmabuf = dmabuf;

		kmap_offset = 0;
		kmap_num = (64*64*4) >> PAGE_SHIFT;

		ret = ttm_bo_reserve(&dmabuf->base, true, false, false, 0);
		if (unlikely(ret != 0)) {
			DRM_ERROR("reserve failed\n");
			return -EINVAL;
		}

		ret = ttm_bo_kmap(&dmabuf->base, kmap_offset, kmap_num, &map);
		if (unlikely(ret != 0))
			goto err_unreserve;

		virtual = ttm_kmap_obj_virtual(&map, &dummy);
		vmw_cursor_update_image(dev_priv, virtual, 64, 64,
					du->hotspot_x, du->hotspot_y);

		ttm_bo_kunmap(&map);
err_unreserve:
		ttm_bo_unreserve(&dmabuf->base);

	} else {
Пример #13
0
/**
 * vmw_simple_resource_create_ioctl - Helper to set up an ioctl function to
 * create a struct vmw_simple_resource.
 *
 * @dev: Pointer to a struct drm device.
 * @data: Ioctl argument.
 * @file_priv: Pointer to a struct drm_file identifying the caller.
 * @func: Pointer to a struct vmw_simple_resource_func identifying the
 * simple resource type.
 *
 * Returns:
 *   0 if success,
 *   Negative error value on error.
 */
int
vmw_simple_resource_create_ioctl(struct drm_device *dev, void *data,
				 struct drm_file *file_priv,
				 const struct vmw_simple_resource_func *func)
{
	struct vmw_private *dev_priv = vmw_priv(dev);
	struct vmw_user_simple_resource *usimple;
	struct vmw_resource *res;
	struct vmw_resource *tmp;
	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
	struct ttm_operation_ctx ctx = {
		.interruptible = true,
		.no_wait_gpu = false
	};
	size_t alloc_size;
	size_t account_size;
	int ret;

	alloc_size = offsetof(struct vmw_user_simple_resource, simple) +
	  func->size;
	account_size = ttm_round_pot(alloc_size) + VMW_IDA_ACC_SIZE +
		TTM_OBJ_EXTRA_SIZE;

	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
	if (ret)
		return ret;

	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), account_size,
				   &ctx);
	ttm_read_unlock(&dev_priv->reservation_sem);
	if (ret) {
		if (ret != -ERESTARTSYS)
			DRM_ERROR("Out of graphics memory for %s"
				  " creation.\n", func->res_func.type_name);

		goto out_ret;
	}

	usimple = kzalloc(alloc_size, GFP_KERNEL);
	if (!usimple) {
		ttm_mem_global_free(vmw_mem_glob(dev_priv),
				    account_size);
		ret = -ENOMEM;
		goto out_ret;
	}

	usimple->simple.func = func;
	usimple->account_size = account_size;
	res = &usimple->simple.res;
	usimple->base.shareable = false;
	usimple->base.tfile = NULL;

	/*
	 * From here on, the destructor takes over resource freeing.
	 */
	ret = vmw_simple_resource_init(dev_priv, &usimple->simple,
				       data, vmw_simple_resource_free);
	if (ret)
		goto out_ret;

	tmp = vmw_resource_reference(res);
	ret = ttm_base_object_init(tfile, &usimple->base, false,
				   func->ttm_res_type,
				   &vmw_simple_resource_base_release, NULL);

	if (ret) {
		vmw_resource_unreference(&tmp);
		goto out_err;
	}

	func->set_arg_handle(data, usimple->base.handle);
out_err:
	vmw_resource_unreference(&res);
out_ret:
	return ret;
}

/**
 * vmw_simple_resource_lookup - Look up a simple resource from its user-space
 * handle.
 *
 * @tfile: struct ttm_object_file identifying the caller.
 * @handle: The user-space handle.
 * @func: The struct vmw_simple_resource_func identifying the simple resource
 * type.
 *
 * Returns: Refcounted pointer to the embedded struct vmw_resource if
 * successfule. Error pointer otherwise.
 */
struct vmw_resource *
vmw_simple_resource_lookup(struct ttm_object_file *tfile,
			   uint32_t handle,
			   const struct vmw_simple_resource_func *func)
{
	struct vmw_user_simple_resource *usimple;
	struct ttm_base_object *base;
	struct vmw_resource *res;

	base = ttm_base_object_lookup(tfile, handle);
	if (!base) {
		DRM_ERROR("Invalid %s handle 0x%08lx.\n",
			  func->res_func.type_name,
			  (unsigned long) handle);
		return ERR_PTR(-ESRCH);
	}

	if (ttm_base_object_type(base) != func->ttm_res_type) {
		ttm_base_object_unref(&base);
		DRM_ERROR("Invalid type of %s handle 0x%08lx.\n",
			  func->res_func.type_name,
			  (unsigned long) handle);
		return ERR_PTR(-EINVAL);
	}

	usimple = container_of(base, typeof(*usimple), base);
	res = vmw_resource_reference(&usimple->simple.res);
	ttm_base_object_unref(&base);

	return res;
}
int vmw_present_ioctl(struct drm_device *dev, void *data,
		      struct drm_file *file_priv)
{
	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
	struct vmw_private *dev_priv = vmw_priv(dev);
	struct drm_vmw_present_arg *arg =
		(struct drm_vmw_present_arg *)data;
	struct vmw_surface *surface;
	struct vmw_master *vmaster = vmw_master(file_priv->master);
	struct drm_vmw_rect __user *clips_ptr;
	struct drm_vmw_rect *clips = NULL;
	struct drm_mode_object *obj;
	struct vmw_framebuffer *vfb;
	uint32_t num_clips;
	int ret;

	num_clips = arg->num_clips;
	clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;

	if (unlikely(num_clips == 0))
		return 0;

	if (clips_ptr == NULL) {
		DRM_ERROR("Variable clips_ptr must be specified.\n");
		ret = -EINVAL;
		goto out_clips;
	}

	clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL);
	if (clips == NULL) {
		DRM_ERROR("Failed to allocate clip rect list.\n");
		ret = -ENOMEM;
		goto out_clips;
	}

	ret = copy_from_user(clips, clips_ptr, num_clips * sizeof(*clips));
	if (ret) {
		DRM_ERROR("Failed to copy clip rects from userspace.\n");
		ret = -EFAULT;
		goto out_no_copy;
	}

	ret = mutex_lock_interruptible(&dev->mode_config.mutex);
	if (unlikely(ret != 0)) {
		ret = -ERESTARTSYS;
		goto out_no_mode_mutex;
	}

	obj = drm_mode_object_find(dev, arg->fb_id, DRM_MODE_OBJECT_FB);
	if (!obj) {
		DRM_ERROR("Invalid framebuffer id.\n");
		ret = -EINVAL;
		goto out_no_fb;
	}
	vfb = vmw_framebuffer_to_vfb(obj_to_fb(obj));

	ret = ttm_read_lock(&vmaster->lock, true);
	if (unlikely(ret != 0))
		goto out_no_ttm_lock;

	ret = vmw_user_surface_lookup_handle(dev_priv, tfile, arg->sid,
					     &surface);
	if (ret)
		goto out_no_surface;

	ret = vmw_kms_present(dev_priv, file_priv,
			      vfb, surface, arg->sid,
			      arg->dest_x, arg->dest_y,
			      clips, num_clips);

	/* vmw_user_surface_lookup takes one ref so does new_fb */
	vmw_surface_unreference(&surface);

out_no_surface:
	ttm_read_unlock(&vmaster->lock);
out_no_ttm_lock:
out_no_fb:
	mutex_unlock(&dev->mode_config.mutex);
out_no_mode_mutex:
out_no_copy:
	kfree(clips);
out_clips:
	return ret;
}