示例#1
0
/**
 * vmw_sou_primary_plane_prepare_fb - allocate backing buffer
 *
 * @plane:  display plane
 * @new_state: info on the new plane state, including the FB
 *
 * The SOU backing buffer is our equivalent of the display plane.
 *
 * Returns 0 on success
 */
static int
vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
				 struct drm_plane_state *new_state)
{
	struct drm_framebuffer *new_fb = new_state->fb;
	struct drm_crtc *crtc = plane->state->crtc ?: new_state->crtc;
	struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
	struct vmw_private *dev_priv;
	size_t size;
	int ret;


	if (!new_fb) {
		vmw_dmabuf_unreference(&vps->dmabuf);
		vps->dmabuf_size = 0;

		return 0;
	}

	size = new_state->crtc_w * new_state->crtc_h * 4;

	if (vps->dmabuf) {
		if (vps->dmabuf_size == size)
			return 0;

		vmw_dmabuf_unreference(&vps->dmabuf);
		vps->dmabuf_size = 0;
	}

	vps->dmabuf = kzalloc(sizeof(*vps->dmabuf), GFP_KERNEL);
	if (!vps->dmabuf)
		return -ENOMEM;

	dev_priv = vmw_priv(crtc->dev);
	vmw_svga_enable(dev_priv);

	/* After we have alloced the backing store might not be able to
	 * resume the overlays, this is preferred to failing to alloc.
	 */
	vmw_overlay_pause_all(dev_priv);
	ret = vmw_dmabuf_init(dev_priv, vps->dmabuf, size,
			      &vmw_vram_ne_placement,
			      false, &vmw_dmabuf_bo_free);
	vmw_overlay_resume_all(dev_priv);

	if (ret != 0)
		vps->dmabuf = NULL; /* vmw_dmabuf_init frees on error */
	else
		vps->dmabuf_size = size;

	return ret;
}
void vmw_display_unit_cleanup(struct vmw_display_unit *du)
{
	if (du->cursor_surface)
		vmw_surface_unreference(&du->cursor_surface);
	if (du->cursor_dmabuf)
		vmw_dmabuf_unreference(&du->cursor_dmabuf);
	drm_crtc_cleanup(&du->crtc);
	drm_encoder_cleanup(&du->encoder);
	drm_connector_cleanup(&du->connector);
}
示例#3
0
/**
 * vmw_sou_primary_plane_cleanup_fb - Frees sou backing buffer
 *
 * @plane:  display plane
 * @old_state: Contains the FB to clean up
 *
 * Unpins the display surface
 *
 * Returns 0 on success
 */
static void
vmw_sou_primary_plane_cleanup_fb(struct drm_plane *plane,
				 struct drm_plane_state *old_state)
{
	struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);

	vmw_dmabuf_unreference(&vps->dmabuf);
	vps->dmabuf_size = 0;

	vmw_du_plane_cleanup_fb(plane, old_state);
}
示例#4
0
/**
 * vmw_dummy_query_bo_create - create a bo to hold a dummy query result
 *
 * @dev_priv: A device private structure.
 *
 * This function creates a small buffer object that holds the query
 * result for dummy queries emitted as query barriers.
 * The function will then map the first page and initialize a pending
 * occlusion query result structure, Finally it will unmap the buffer.
 * No interruptible waits are done within this function.
 *
 * Returns an error if bo creation or initialization fails.
 */
static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
{
	int ret;
	struct vmw_dma_buffer *vbo;
	struct ttm_bo_kmap_obj map;
	volatile SVGA3dQueryResult *result;
	bool dummy;

	/*
	 * Create the vbo as pinned, so that a tryreserve will
	 * immediately succeed. This is because we're the only
	 * user of the bo currently.
	 */
	vbo = kzalloc(sizeof(*vbo), GFP_KERNEL);
	if (!vbo)
		return -ENOMEM;

	ret = vmw_dmabuf_init(dev_priv, vbo, PAGE_SIZE,
			      &vmw_sys_ne_placement, false,
			      &vmw_dmabuf_bo_free);
	if (unlikely(ret != 0))
		return ret;

	ret = ttm_bo_reserve(&vbo->base, false, true, NULL);
	BUG_ON(ret != 0);
	vmw_bo_pin_reserved(vbo, true);

	ret = ttm_bo_kmap(&vbo->base, 0, 1, &map);
	if (likely(ret == 0)) {
		result = ttm_kmap_obj_virtual(&map, &dummy);
		result->totalSize = sizeof(*result);
		result->state = SVGA3D_QUERYSTATE_PENDING;
		result->result32 = 0xff;
		ttm_bo_kunmap(&map);
	}
	vmw_bo_pin_reserved(vbo, false);
	ttm_bo_unreserve(&vbo->base);

	if (unlikely(ret != 0)) {
		DRM_ERROR("Dummy query buffer map failed.\n");
		vmw_dmabuf_unreference(&vbo);
	} else
		dev_priv->dummy_query_bo = vbo;

	return ret;
}
示例#5
0
/**
 * vmw_release_device_early - Early part of fifo takedown.
 *
 * @dev_priv: Pointer to device private struct.
 *
 * This is the first part of command submission takedown, to be called before
 * buffer management is taken down.
 */
static void vmw_release_device_early(struct vmw_private *dev_priv)
{
	/*
	 * Previous destructions should've released
	 * the pinned bo.
	 */

	BUG_ON(dev_priv->pinned_bo != NULL);

	vmw_dmabuf_unreference(&dev_priv->dummy_query_bo);
	if (dev_priv->cman)
		vmw_cmdbuf_remove_pool(dev_priv->cman);

	if (dev_priv->has_mob) {
		ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
		vmw_otables_takedown(dev_priv);
	}
}
示例#6
0
int vmw_overlay_ioctl(struct drm_device *dev, void *data,
		      struct drm_file *file_priv)
{
	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
	struct vmw_private *dev_priv = vmw_priv(dev);
	struct vmw_overlay *overlay = dev_priv->overlay_priv;
	struct drm_vmw_control_stream_arg *arg =
	    (struct drm_vmw_control_stream_arg *)data;
	struct vmw_dma_buffer *buf;
	struct vmw_resource *res;
	int ret;

	if (!vmw_overlay_available(dev_priv))
		return -ENOSYS;

	ret = vmw_user_stream_lookup(dev_priv, tfile, &arg->stream_id, &res);
	if (ret)
		return ret;

	mutex_lock(&overlay->mutex);

	if (!arg->enabled) {
		ret = vmw_overlay_stop(dev_priv, arg->stream_id, false, true);
		goto out_unlock;
	}

	ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &buf, NULL);
	if (ret)
		goto out_unlock;

	ret = vmw_overlay_update_stream(dev_priv, buf, arg, true);

	vmw_dmabuf_unreference(&buf);

out_unlock:
	mutex_unlock(&overlay->mutex);
	vmw_resource_unreference(&res);

	return ret;
}
示例#7
0
/**
 * Stop or pause a stream.
 *
 * If the stream is paused the no evict flag is removed from the buffer
 * but left in vram. This allows for instance mode_set to evict it
 * should it need to.
 *
 * The caller must hold the overlay lock.
 *
 * @stream_id which stream to stop/pause.
 * @pause true to pause, false to stop completely.
 */
static int vmw_overlay_stop(struct vmw_private *dev_priv,
			    uint32_t stream_id, bool pause,
			    bool interruptible)
{
	struct vmw_overlay *overlay = dev_priv->overlay_priv;
	struct vmw_stream *stream = &overlay->stream[stream_id];
	int ret;

	/* no buffer attached the stream is completely stopped */
	if (!stream->buf)
		return 0;

	/* If the stream is paused this is already done */
	if (!stream->paused) {
		ret = vmw_overlay_send_stop(dev_priv, stream_id,
					    interruptible);
		if (ret)
			return ret;

		/* We just remove the NO_EVICT flag so no -ENOMEM */
		ret = vmw_overlay_move_buffer(dev_priv, stream->buf, false,
					      interruptible);
		if (interruptible && ret == -ERESTARTSYS)
			return ret;
		else
			BUG_ON(ret != 0);
	}

	if (!pause) {
		vmw_dmabuf_unreference(&stream->buf);
		stream->paused = false;
	} else {
		stream->paused = true;
	}

	return 0;
}
int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
			   uint32_t handle, uint32_t width, uint32_t height)
{
	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
	struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
	struct vmw_surface *surface = NULL;
	struct vmw_dma_buffer *dmabuf = NULL;
	int ret;

	if (handle) {
		ret = vmw_user_surface_lookup_handle(dev_priv, tfile,
						     handle, &surface);
		if (!ret) {
			if (!surface->snooper.image) {
				DRM_ERROR("surface not suitable for cursor\n");
				return -EINVAL;
			}
		} else {
			ret = vmw_user_dmabuf_lookup(tfile,
						     handle, &dmabuf);
			if (ret) {
				DRM_ERROR("failed to find surface or dmabuf: %i\n", ret);
				return -EINVAL;
			}
		}
	}

	/* takedown old cursor */
	if (du->cursor_surface) {
		du->cursor_surface->snooper.crtc = NULL;
		vmw_surface_unreference(&du->cursor_surface);
	}
	if (du->cursor_dmabuf)
		vmw_dmabuf_unreference(&du->cursor_dmabuf);

	/* setup new image */
	if (surface) {
		/* vmw_user_surface_lookup takes one reference */
		du->cursor_surface = surface;

		du->cursor_surface->snooper.crtc = crtc;
		du->cursor_age = du->cursor_surface->snooper.age;
		vmw_cursor_update_image(dev_priv, surface->snooper.image,
					64, 64, du->hotspot_x, du->hotspot_y);
	} else if (dmabuf) {
		struct ttm_bo_kmap_obj map;
		unsigned long kmap_offset;
		unsigned long kmap_num;
		void *virtual;
		bool dummy;

		/* vmw_user_surface_lookup takes one reference */
		du->cursor_dmabuf = dmabuf;

		kmap_offset = 0;
		kmap_num = (64*64*4) >> PAGE_SHIFT;

		ret = ttm_bo_reserve(&dmabuf->base, true, false, false, 0);
		if (unlikely(ret != 0)) {
			DRM_ERROR("reserve failed\n");
			return -EINVAL;
		}

		ret = ttm_bo_kmap(&dmabuf->base, kmap_offset, kmap_num, &map);
		if (unlikely(ret != 0))
			goto err_unreserve;

		virtual = ttm_kmap_obj_virtual(&map, &dummy);
		vmw_cursor_update_image(dev_priv, virtual, 64, 64,
					du->hotspot_x, du->hotspot_y);

		ttm_bo_kunmap(&map);
err_unreserve:
		ttm_bo_unreserve(&dmabuf->base);

	} else {
示例#9
0
/**
 * Free the backing store.
 */
static void vmw_sou_backing_free(struct vmw_private *dev_priv,
				 struct vmw_screen_object_unit *sou)
{
	vmw_dmabuf_unreference(&sou->buffer);
	sou->buffer_size = 0;
}