Esempio n. 1
0
/**
 * vmw_dmabuf_pin_in_vram_or_gmr - Move a buffer to vram or gmr.
 *
 * This function takes the reservation_sem in write mode.
 * Flushes and unpins the query bo to avoid failures.
 *
 * @dev_priv:  Driver private.
 * @buf:  DMA buffer to move.
 * @pin:  Pin buffer if true.
 * @interruptible:  Use interruptible wait.
 *
 * Returns
 * -ERESTARTSYS if interrupted by a signal.
 */
int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
				  struct vmw_dma_buffer *buf,
				  bool interruptible)
{
	struct ttm_buffer_object *bo = &buf->base;
	int ret;

	ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
	if (unlikely(ret != 0))
		return ret;

	vmw_execbuf_release_pinned_bo(dev_priv);

	ret = ttm_bo_reserve(bo, interruptible, false, NULL);
	if (unlikely(ret != 0))
		goto err;

	ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, interruptible,
			      false);
	if (likely(ret == 0) || ret == -ERESTARTSYS)
		goto out_unreserve;

	ret = ttm_bo_validate(bo, &vmw_vram_placement, interruptible, false);

out_unreserve:
	if (!ret)
		vmw_bo_pin_reserved(buf, true);

	ttm_bo_unreserve(bo);
err:
	ttm_write_unlock(&dev_priv->reservation_sem);
	return ret;
}
Esempio n. 2
0
/**
 * vmw_dmabuf_pin_in_start_of_vram - Move a buffer to start of vram.
 *
 * This function takes the reservation_sem in write mode.
 * Flushes and unpins the query bo to avoid failures.
 *
 * @dev_priv:  Driver private.
 * @buf:  DMA buffer to pin.
 * @interruptible:  Use interruptible wait.
 *
 * Returns
 * -ERESTARTSYS if interrupted by a signal.
 */
int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *dev_priv,
				    struct vmw_dma_buffer *buf,
				    bool interruptible)
{
	struct ttm_operation_ctx ctx = {interruptible, false };
	struct ttm_buffer_object *bo = &buf->base;
	struct ttm_placement placement;
	struct ttm_place place;
	int ret = 0;
	uint32_t new_flags;

	place = vmw_vram_placement.placement[0];
	place.lpfn = bo->num_pages;
	placement.num_placement = 1;
	placement.placement = &place;
	placement.num_busy_placement = 1;
	placement.busy_placement = &place;

	ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
	if (unlikely(ret != 0))
		return ret;

	vmw_execbuf_release_pinned_bo(dev_priv);
	ret = ttm_bo_reserve(bo, interruptible, false, NULL);
	if (unlikely(ret != 0))
		goto err_unlock;

	/*
	 * Is this buffer already in vram but not at the start of it?
	 * In that case, evict it first because TTM isn't good at handling
	 * that situation.
	 */
	if (bo->mem.mem_type == TTM_PL_VRAM &&
	    bo->mem.start < bo->num_pages &&
	    bo->mem.start > 0 &&
	    buf->pin_count == 0) {
		ctx.interruptible = false;
		(void) ttm_bo_validate(bo, &vmw_sys_placement, &ctx);
	}

	if (buf->pin_count > 0)
		ret = ttm_bo_mem_compat(&placement, &bo->mem,
					&new_flags) == true ? 0 : -EINVAL;
	else
		ret = ttm_bo_validate(bo, &placement, &ctx);

	/* For some reason we didn't end up at the start of vram */
	WARN_ON(ret == 0 && bo->offset != 0);
	if (!ret)
		vmw_bo_pin_reserved(buf, true);

	ttm_bo_unreserve(bo);
err_unlock:
	ttm_write_unlock(&dev_priv->reservation_sem);

	return ret;
}
Esempio n. 3
0
/**
 * vmw_dmabuf_to_vram_or_gmr - Move a buffer to vram or gmr.
 *
 * May only be called by the current master since it assumes that the
 * master lock is the current master's lock.
 * This function takes the master's lock in write mode.
 * Flushes and unpins the query bo if @pin == true to avoid failures.
 *
 * @dev_priv:  Driver private.
 * @buf:  DMA buffer to move.
 * @pin:  Pin buffer if true.
 * @interruptible:  Use interruptible wait.
 *
 * Returns
 * -ERESTARTSYS if interrupted by a signal.
 */
int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
			      struct vmw_dma_buffer *buf,
			      bool pin, bool interruptible)
{
	struct vmw_master *vmaster = dev_priv->active_master;
	struct ttm_buffer_object *bo = &buf->base;
	struct ttm_placement *placement;
	int ret;

	ret = ttm_write_lock(&vmaster->lock, interruptible);
	if (unlikely(ret != 0))
		return ret;

	if (pin)
		vmw_execbuf_release_pinned_bo(dev_priv);

	ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
	if (unlikely(ret != 0))
		goto err;

	/**
	 * Put BO in VRAM if there is space, otherwise as a GMR.
	 * If there is no space in VRAM and GMR ids are all used up,
	 * start evicting GMRs to make room. If the DMA buffer can't be
	 * used as a GMR, this will return -ENOMEM.
	 */

	if (pin)
		placement = &vmw_vram_gmr_ne_placement;
	else
		placement = &vmw_vram_gmr_placement;

	ret = ttm_bo_validate(bo, placement, interruptible, false);
	if (likely(ret == 0) || ret == -ERESTARTSYS)
		goto err_unreserve;


	/**
	 * If that failed, try VRAM again, this time evicting
	 * previous contents.
	 */

	if (pin)
		placement = &vmw_vram_ne_placement;
	else
		placement = &vmw_vram_placement;

	ret = ttm_bo_validate(bo, placement, interruptible, false);

err_unreserve:
	ttm_bo_unreserve(bo);
err:
	ttm_write_unlock(&vmaster->lock);
	return ret;
}
Esempio n. 4
0
/**
 * vmw_dmabuf_to_placement - Validate a buffer to placement.
 *
 * @dev_priv:  Driver private.
 * @buf:  DMA buffer to move.
 * @pin:  Pin buffer if true.
 * @interruptible:  Use interruptible wait.
 *
 * May only be called by the current master since it assumes that the
 * master lock is the current master's lock.
 * This function takes the master's lock in write mode.
 * Flushes and unpins the query bo to avoid failures.
 *
 * Returns
 *  -ERESTARTSYS if interrupted by a signal.
 */
int vmw_dmabuf_to_placement(struct vmw_private *dev_priv,
			    struct vmw_dma_buffer *buf,
			    struct ttm_placement *placement,
			    bool interruptible)
{
	struct vmw_master *vmaster = dev_priv->active_master;
	struct ttm_buffer_object *bo = &buf->base;
	int ret;

	ret = ttm_write_lock(&vmaster->lock, interruptible);
	if (unlikely(ret != 0))
		return ret;

	vmw_execbuf_release_pinned_bo(dev_priv);

	ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
	if (unlikely(ret != 0))
		goto err;

	ret = ttm_bo_validate(bo, placement, interruptible, false);

	ttm_bo_unreserve(bo);

err:
	ttm_write_unlock(&vmaster->lock);
	return ret;
}
Esempio n. 5
0
/**
 * vmw_bo_pin_reserved - Pin or unpin a buffer object without moving it.
 *
 * @vbo: The buffer object. Must be reserved.
 * @pin: Whether to pin or unpin.
 *
 */
void vmw_bo_pin_reserved(struct vmw_dma_buffer *vbo, bool pin)
{
	struct ttm_place pl;
	struct ttm_placement placement;
	struct ttm_buffer_object *bo = &vbo->base;
	uint32_t old_mem_type = bo->mem.mem_type;
	int ret;

	lockdep_assert_held(&bo->resv->lock.base);

	if (pin) {
		if (vbo->pin_count++ > 0)
			return;
	} else {
		WARN_ON(vbo->pin_count <= 0);
		if (--vbo->pin_count > 0)
			return;
	}

	pl.fpfn = 0;
	pl.lpfn = 0;
	pl.flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | VMW_PL_FLAG_MOB
		| TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
	if (pin)
		pl.flags |= TTM_PL_FLAG_NO_EVICT;

	memset(&placement, 0, sizeof(placement));
	placement.num_placement = 1;
	placement.placement = &pl;

	ret = ttm_bo_validate(bo, &placement, false, true);

	BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
}
Esempio n. 6
0
/**
 * vmw_dmabuf_pin_in_placement - Validate a buffer to placement.
 *
 * @dev_priv:  Driver private.
 * @buf:  DMA buffer to move.
 * @placement:  The placement to pin it.
 * @interruptible:  Use interruptible wait.
 *
 * Returns
 *  -ERESTARTSYS if interrupted by a signal.
 */
int vmw_dmabuf_pin_in_placement(struct vmw_private *dev_priv,
				struct vmw_dma_buffer *buf,
				struct ttm_placement *placement,
				bool interruptible)
{
	struct ttm_operation_ctx ctx = {interruptible, false };
	struct ttm_buffer_object *bo = &buf->base;
	int ret;
	uint32_t new_flags;

	ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
	if (unlikely(ret != 0))
		return ret;

	vmw_execbuf_release_pinned_bo(dev_priv);

	ret = ttm_bo_reserve(bo, interruptible, false, NULL);
	if (unlikely(ret != 0))
		goto err;

	if (buf->pin_count > 0)
		ret = ttm_bo_mem_compat(placement, &bo->mem,
					&new_flags) == true ? 0 : -EINVAL;
	else
		ret = ttm_bo_validate(bo, placement, &ctx);

	if (!ret)
		vmw_bo_pin_reserved(buf, true);

	ttm_bo_unreserve(bo);

err:
	ttm_write_unlock(&dev_priv->reservation_sem);
	return ret;
}
Esempio n. 7
0
static int amdgpu_gem_begin_cpu_access(struct dma_buf *dma_buf,
				       enum dma_data_direction direction)
{
	struct amdgpu_bo *bo = gem_to_amdgpu_bo(dma_buf->priv);
	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
	struct ttm_operation_ctx ctx = { true, false };
	u32 domain = amdgpu_display_framebuffer_domains(adev);
	int ret;
	bool reads = (direction == DMA_BIDIRECTIONAL ||
		      direction == DMA_FROM_DEVICE);

	if (!reads || !(domain & AMDGPU_GEM_DOMAIN_GTT))
		return 0;

	/* move to gtt */
	ret = amdgpu_bo_reserve(bo, false);
	if (unlikely(ret != 0))
		return ret;

	if (!bo->pin_count && (bo->allowed_domains & AMDGPU_GEM_DOMAIN_GTT)) {
		amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
		ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
	}

	amdgpu_bo_unreserve(bo);
	return ret;
}
Esempio n. 8
0
/**
 * Pin or unpin a buffer in vram.
 *
 * @dev_priv:  Driver private.
 * @buf:  DMA buffer to pin or unpin.
 * @pin:  Pin buffer in vram if true.
 * @interruptible:  Use interruptible wait.
 *
 * Takes the current masters ttm lock in read.
 *
 * Returns
 * -ERESTARTSYS if interrupted by a signal.
 */
static int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
				  struct vmw_dma_buffer *buf,
				  bool pin, bool interruptible)
{
	struct ttm_buffer_object *bo = &buf->base;
	struct ttm_placement *overlay_placement = &vmw_vram_placement;
	int ret;

	ret = ttm_read_lock(&dev_priv->active_master->lock, interruptible);
	if (unlikely(ret != 0))
		return ret;

	ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
	if (unlikely(ret != 0))
		goto err;

	if (pin)
		overlay_placement = &vmw_vram_ne_placement;

	ret = ttm_bo_validate(bo, overlay_placement, interruptible, false, false);

	ttm_bo_unreserve(bo);

err:
	ttm_read_unlock(&dev_priv->active_master->lock);

	return ret;
}
Esempio n. 9
0
/**
 * vmw_dmabuf_to_start_of_vram - Move a buffer to start of vram.
 *
 * May only be called by the current master since it assumes that the
 * master lock is the current master's lock.
 * This function takes the master's lock in write mode.
 * Flushes and unpins the query bo if @pin == true to avoid failures.
 *
 * @dev_priv:  Driver private.
 * @buf:  DMA buffer to move.
 * @pin:  Pin buffer in vram if true.
 * @interruptible:  Use interruptible wait.
 *
 * Returns
 * -ERESTARTSYS if interrupted by a signal.
 */
int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv,
				struct vmw_dma_buffer *buf,
				bool pin, bool interruptible)
{
	struct ttm_buffer_object *bo = &buf->base;
	struct ttm_placement placement;
	struct ttm_place place;
	int ret = 0;

	if (pin)
		place = vmw_vram_ne_placement.placement[0];
	else
		place = vmw_vram_placement.placement[0];
	place.lpfn = bo->num_pages;

	placement.num_placement = 1;
	placement.placement = &place;
	placement.num_busy_placement = 1;
	placement.busy_placement = &place;

	ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
	if (unlikely(ret != 0))
		return ret;

	if (pin)
		vmw_execbuf_release_pinned_bo(dev_priv);
	ret = ttm_bo_reserve(bo, interruptible, false, false, NULL);
	if (unlikely(ret != 0))
		goto err_unlock;

	/* Is this buffer already in vram but not at the start of it? */
	if (bo->mem.mem_type == TTM_PL_VRAM &&
	    bo->mem.start < bo->num_pages &&
	    bo->mem.start > 0)
		(void) ttm_bo_validate(bo, &vmw_sys_placement, false, false);

	ret = ttm_bo_validate(bo, &placement, interruptible, false);

	/* For some reason we didn't up at the start of vram */
	WARN_ON(ret == 0 && bo->offset != 0);

	ttm_bo_unreserve(bo);
err_unlock:
	ttm_write_unlock(&dev_priv->reservation_sem);

	return ret;
}
/**
 * vmw_dmabuf_to_start_of_vram - Move a buffer to start of vram.
 *
 * May only be called by the current master since it assumes that the
 * master lock is the current master's lock.
 * This function takes the master's lock in write mode.
 * Flushes and unpins the query bo if @pin == true to avoid failures.
 *
 * @dev_priv:  Driver private.
 * @buf:  DMA buffer to move.
 * @pin:  Pin buffer in vram if true.
 * @interruptible:  Use interruptible wait.
 *
 * Returns
 * -ERESTARTSYS if interrupted by a signal.
 */
int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv,
				struct vmw_dma_buffer *buf,
				bool pin, bool interruptible)
{
//   struct vmw_master *vmaster = dev_priv->active_master;
	struct ttm_buffer_object *bo = &buf->base;
	struct ttm_placement placement;
	int ret = 0;

	if (pin)
		placement = vmw_vram_ne_placement;
	else
		placement = vmw_vram_placement;
	placement.lpfn = bo->num_pages;

//   ret = ttm_write_lock(&vmaster->lock, interruptible);
//   if (unlikely(ret != 0))
//       return ret;

	if (pin)
		vmw_execbuf_release_pinned_bo(dev_priv);
	ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
	if (unlikely(ret != 0))
		goto err_unlock;

	/* Is this buffer already in vram but not at the start of it? */
	if (bo->mem.mem_type == TTM_PL_VRAM &&
	    bo->mem.start < bo->num_pages &&
	    bo->mem.start > 0)
		(void) ttm_bo_validate(bo, &vmw_sys_placement, false, false);

	ret = ttm_bo_validate(bo, &placement, interruptible, false);

	/* For some reason we didn't up at the start of vram */
	WARN_ON(ret == 0 && bo->offset != 0);

	ttm_bo_unreserve(bo);
err_unlock:
//   ttm_write_unlock(&vmaster->lock);

	return ret;
}
Esempio n. 11
0
int
nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
                    bool no_wait_reserve, bool no_wait_gpu)
{
    int ret;

    ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, interruptible,
                          no_wait_reserve, no_wait_gpu);
    if (ret)
        return ret;

    if (nvbo->vma.node) {
        if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
            nvbo->bo.offset = nvbo->vma.offset;
    }

    return 0;
}
Esempio n. 12
0
int bochs_bo_unpin(struct bochs_bo *bo)
{
    int i, ret;

    if (!bo->pin_count) {
        DRM_ERROR("unpin bad %p\n", bo);
        return 0;
    }
    bo->pin_count--;

    if (bo->pin_count)
        return 0;

    for (i = 0; i < bo->placement.num_placement; i++)
        bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
    ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
    if (ret)
        return ret;

    return 0;
}
Esempio n. 13
0
static int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket,
					   struct list_head *head)
{
	struct ttm_validate_buffer *buf;
	struct ttm_buffer_object *bo;
	struct virtio_gpu_object *qobj;
	int ret;

	ret = ttm_eu_reserve_buffers(ticket, head, true, NULL);
	if (ret != 0)
		return ret;

	list_for_each_entry(buf, head, head) {
		bo = buf->bo;
		qobj = container_of(bo, struct virtio_gpu_object, tbo);
		ret = ttm_bo_validate(bo, &qobj->placement, false, false);
		if (ret) {
			ttm_eu_backoff_reservation(ticket, head);
			return ret;
		}
	}
Esempio n. 14
0
/**
 * vmw_bo_pin - Pin or unpin a buffer object without moving it.
 *
 * @bo: The buffer object. Must be reserved.
 * @pin: Whether to pin or unpin.
 *
 */
void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin)
{
	uint32_t pl_flags;
	struct ttm_placement placement;
	uint32_t old_mem_type = bo->mem.mem_type;
	int ret;

	lockdep_assert_held(&bo->resv->lock.base);

	pl_flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | VMW_PL_FLAG_MOB
		| TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
	if (pin)
		pl_flags |= TTM_PL_FLAG_NO_EVICT;

	memset(&placement, 0, sizeof(placement));
	placement.num_placement = 1;
	placement.placement = &pl_flags;

	ret = ttm_bo_validate(bo, &placement, false, true);

	BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
}
Esempio n. 15
0
int bochs_bo_pin(struct bochs_bo *bo, u32 pl_flag, u64 *gpu_addr)
{
    int i, ret;

    if (bo->pin_count) {
        bo->pin_count++;
        if (gpu_addr)
            *gpu_addr = bochs_bo_gpu_offset(bo);
        return 0;
    }

    bochs_ttm_placement(bo, pl_flag);
    for (i = 0; i < bo->placement.num_placement; i++)
        bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
    ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
    if (ret)
        return ret;

    bo->pin_count = 1;
    if (gpu_addr)
        *gpu_addr = bochs_bo_gpu_offset(bo);
    return 0;
}
Esempio n. 16
0
/**
 * vmw_bo_pin - Pin or unpin a buffer object without moving it.
 *
 * @bo: The buffer object. Must be reserved, and present either in VRAM
 * or GMR memory.
 * @pin: Whether to pin or unpin.
 *
 */
void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin)
{
	uint32_t pl_flags;
	struct ttm_placement placement;
	uint32_t old_mem_type = bo->mem.mem_type;
	int ret;

	BUG_ON(!atomic_read(&bo->reserved));
	BUG_ON(old_mem_type != TTM_PL_VRAM &&
	       old_mem_type != VMW_PL_FLAG_GMR);

	pl_flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED;
	if (pin)
		pl_flags |= TTM_PL_FLAG_NO_EVICT;

	memset(&placement, 0, sizeof(placement));
	placement.num_placement = 1;
	placement.placement = &pl_flags;

	ret = ttm_bo_validate(bo, &placement, false, true, true);

	BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
}
Esempio n. 17
0
static int qxl_update_area_ioctl(struct drm_device *dev, void *data,
				 struct drm_file *file)
{
	struct qxl_device *qdev = dev->dev_private;
	struct drm_qxl_update_area *update_area = data;
	struct qxl_rect area = {.left = update_area->left,
				.top = update_area->top,
				.right = update_area->right,
				.bottom = update_area->bottom};
	int ret;
	struct drm_gem_object *gobj = NULL;
	struct qxl_bo *qobj = NULL;

	if (update_area->left >= update_area->right ||
	    update_area->top >= update_area->bottom)
		return -EINVAL;

	gobj = drm_gem_object_lookup(file, update_area->handle);
	if (gobj == NULL)
		return -ENOENT;

	qobj = gem_to_qxl_bo(gobj);

	ret = qxl_bo_reserve(qobj, false);
	if (ret)
		goto out;

	if (!qobj->pin_count) {
		qxl_ttm_placement_from_domain(qobj, qobj->type, false);
		ret = ttm_bo_validate(&qobj->tbo, &qobj->placement,
				      true, false);
		if (unlikely(ret))
			goto out;
	}

	ret = qxl_bo_check_id(qdev, qobj);
	if (ret)
		goto out2;
	if (!qobj->surface_id)
		DRM_ERROR("got update area for surface with no id %d\n", update_area->handle);
	ret = qxl_io_update_area(qdev, qobj, &area);

out2:
	qxl_bo_unreserve(qobj);

out:
	drm_gem_object_unreference_unlocked(gobj);
	return ret;
}

static int qxl_getparam_ioctl(struct drm_device *dev, void *data,
		       struct drm_file *file_priv)
{
	struct qxl_device *qdev = dev->dev_private;
	struct drm_qxl_getparam *param = data;

	switch (param->param) {
	case QXL_PARAM_NUM_SURFACES:
		param->value = qdev->rom->n_surfaces;
		break;
	case QXL_PARAM_MAX_RELOCS:
		param->value = QXL_MAX_RES;
		break;
	default:
		return -EINVAL;
	}
	return 0;
}

static int qxl_clientcap_ioctl(struct drm_device *dev, void *data,
				  struct drm_file *file_priv)
{
	struct qxl_device *qdev = dev->dev_private;
	struct drm_qxl_clientcap *param = data;
	int byte, idx;

	byte = param->index / 8;
	idx = param->index % 8;

	if (qdev->pdev->revision < 4)
		return -ENOSYS;

	if (byte >= 58)
		return -ENOSYS;

	if (qdev->rom->client_capabilities[byte] & (1 << idx))
		return 0;
	return -ENOSYS;
}

static int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data,
				struct drm_file *file)
{
	struct qxl_device *qdev = dev->dev_private;
	struct drm_qxl_alloc_surf *param = data;
	struct qxl_bo *qobj;
	int handle;
	int ret;
	int size, actual_stride;
	struct qxl_surface surf;

	/* work out size allocate bo with handle */
	actual_stride = param->stride < 0 ? -param->stride : param->stride;
	size = actual_stride * param->height + actual_stride;

	surf.format = param->format;
	surf.width = param->width;
	surf.height = param->height;
	surf.stride = param->stride;
	surf.data = 0;

	ret = qxl_gem_object_create_with_handle(qdev, file,
						QXL_GEM_DOMAIN_SURFACE,
						size,
						&surf,
						&qobj, &handle);
	if (ret) {
		DRM_ERROR("%s: failed to create gem ret=%d\n",
			  __func__, ret);
		return -ENOMEM;
	} else
		param->handle = handle;
	return ret;
}

const struct drm_ioctl_desc qxl_ioctls[] = {
	DRM_IOCTL_DEF_DRV(QXL_ALLOC, qxl_alloc_ioctl, DRM_AUTH),

	DRM_IOCTL_DEF_DRV(QXL_MAP, qxl_map_ioctl, DRM_AUTH),

	DRM_IOCTL_DEF_DRV(QXL_EXECBUFFER, qxl_execbuffer_ioctl,
							DRM_AUTH),
	DRM_IOCTL_DEF_DRV(QXL_UPDATE_AREA, qxl_update_area_ioctl,
							DRM_AUTH),
	DRM_IOCTL_DEF_DRV(QXL_GETPARAM, qxl_getparam_ioctl,
							DRM_AUTH),
	DRM_IOCTL_DEF_DRV(QXL_CLIENTCAP, qxl_clientcap_ioctl,
							DRM_AUTH),

	DRM_IOCTL_DEF_DRV(QXL_ALLOC_SURF, qxl_alloc_surf_ioctl,
			  DRM_AUTH),
};

int qxl_max_ioctls = ARRAY_SIZE(qxl_ioctls);
int ttm_pl_setstatus_ioctl(struct ttm_object_file *tfile,
			   struct ttm_lock *lock, void *data)
{
	union ttm_pl_setstatus_arg *arg = data;
	struct ttm_pl_setstatus_req *req = &arg->req;
	struct ttm_pl_rep *rep = &arg->rep;
	struct ttm_buffer_object *bo;
	struct ttm_bo_device *bdev;
	struct ttm_placement placement = default_placement;
	uint32_t flags[2];
	int ret;

	bo = ttm_buffer_object_lookup(tfile, req->handle);
	if (unlikely(bo == NULL)) {
		printk(KERN_ERR
		       "Could not find buffer object for setstatus.\n");
		return -EINVAL;
	}

	bdev = bo->bdev;

	ret = ttm_read_lock(lock, true);
	if (unlikely(ret != 0))
		goto out_err0;

	ret = ttm_bo_reserve(bo, true, false, false, 0);
	if (unlikely(ret != 0))
		goto out_err1;

#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
	ret = ttm_bo_wait_cpu(bo, false);
	if (unlikely(ret != 0))
		goto out_err2;
#endif

	flags[0] = req->set_placement;
	flags[1] = req->clr_placement;

	placement.num_placement = 2;
	placement.placement = flags;

	/* spin_lock(&bo->lock); */ /* Already get reserve lock */

	ret = psb_ttm_bo_check_placement(bo, &placement);
	if (unlikely(ret != 0))
		goto out_err2;

	placement.num_placement = 1;
	flags[0] = (req->set_placement | bo->mem.placement) & ~req->clr_placement;

#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
	ret = ttm_bo_validate(bo, &placement, true, false, false);
#else
	ret = ttm_bo_validate(bo, &placement, true, false);
#endif
	if (unlikely(ret != 0))
		goto out_err2;

	ttm_pl_fill_rep(bo, rep);
out_err2:
	/* spin_unlock(&bo->lock); */
	ttm_bo_unreserve(bo);
out_err1:
	ttm_read_unlock(lock);
out_err0:
	ttm_bo_unref(&bo);
	return ret;
}
Esempio n. 19
0
int ttm_bo_init(struct ttm_bo_device *bdev,
		struct ttm_buffer_object *bo,
		unsigned long size,
		enum ttm_bo_type type,
		struct ttm_placement *placement,
		uint32_t page_alignment,
		unsigned long buffer_start,
		bool interruptible,
		struct file *persistent_swap_storage,
		size_t acc_size,
		void (*destroy) (struct ttm_buffer_object *))
{
	int ret = 0;
	unsigned long num_pages;

	size += buffer_start & ~PAGE_MASK;
	num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
	if (num_pages == 0) {
		printk(KERN_ERR TTM_PFX "Illegal buffer object size.\n");
		if (destroy)
			(*destroy)(bo);
		else
			kfree(bo);
		return -EINVAL;
	}
	bo->destroy = destroy;

	kref_init(&bo->kref);
	kref_init(&bo->list_kref);
	atomic_set(&bo->cpu_writers, 0);
	atomic_set(&bo->reserved, 1);
	init_waitqueue_head(&bo->event_queue);
	INIT_LIST_HEAD(&bo->lru);
	INIT_LIST_HEAD(&bo->ddestroy);
	INIT_LIST_HEAD(&bo->swap);
	INIT_LIST_HEAD(&bo->io_reserve_lru);
	bo->bdev = bdev;
	bo->glob = bdev->glob;
	bo->type = type;
	bo->num_pages = num_pages;
	bo->mem.size = num_pages << PAGE_SHIFT;
	bo->mem.mem_type = TTM_PL_SYSTEM;
	bo->mem.num_pages = bo->num_pages;
	bo->mem.mm_node = NULL;
	bo->mem.page_alignment = page_alignment;
	bo->mem.bus.io_reserved_vm = false;
	bo->mem.bus.io_reserved_count = 0;
	bo->buffer_start = buffer_start & PAGE_MASK;
	bo->priv_flags = 0;
	bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
	bo->seq_valid = false;
	bo->persistent_swap_storage = persistent_swap_storage;
	bo->acc_size = acc_size;
	atomic_inc(&bo->glob->bo_count);

	ret = ttm_bo_check_placement(bo, placement);
	if (unlikely(ret != 0))
		goto out_err;

	/*
	 * For ttm_bo_type_device buffers, allocate
	 * address space from the device.
	 */
	if (bo->type == ttm_bo_type_device) {
		ret = ttm_bo_setup_vm(bo);
		if (ret)
			goto out_err;
	}

	ret = ttm_bo_validate(bo, placement, interruptible, false, false);
	if (ret)
		goto out_err;

	ttm_bo_unreserve(bo);
	return 0;

out_err:
	ttm_bo_unreserve(bo);
	ttm_bo_unref(&bo);

	return ret;
}