Ejemplo n.º 1
0
int amdgpu_gem_prime_pin(struct drm_gem_object *obj)
{
	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
	long ret = 0;

	ret = amdgpu_bo_reserve(bo, false);
	if (unlikely(ret != 0))
		return ret;

	/*
	 * Wait for all shared fences to complete before we switch to future
	 * use of exclusive fence on this prime shared bo.
	 */
	ret = reservation_object_wait_timeout_rcu(bo->tbo.resv, true, false,
						  MAX_SCHEDULE_TIMEOUT);
	if (unlikely(ret < 0)) {
		DRM_DEBUG_PRIME("Fence wait failed: %li\n", ret);
		amdgpu_bo_unreserve(bo);
		return ret;
	}

	/* pin buffer into GTT */
	ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL);
	if (likely(ret == 0))
		bo->prime_shared_count++;

	amdgpu_bo_unreserve(bo);
	return ret;
}
Ejemplo n.º 2
0
static int amdgpu_gem_map_attach(struct dma_buf *dma_buf,
				 struct device *target_dev,
				 struct dma_buf_attachment *attach)
{
	struct drm_gem_object *obj = dma_buf->priv;
	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
	long r;

	r = drm_gem_map_attach(dma_buf, target_dev, attach);
	if (r)
		return r;

	r = amdgpu_bo_reserve(bo, false);
	if (unlikely(r != 0))
		goto error_detach;


	if (attach->dev->driver != adev->dev->driver) {
		/*
		 * Wait for all shared fences to complete before we switch to future
		 * use of exclusive fence on this prime shared bo.
		 */
		r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
							true, false,
							MAX_SCHEDULE_TIMEOUT);
		if (unlikely(r < 0)) {
			DRM_DEBUG_PRIME("Fence wait failed: %li\n", r);
			goto error_unreserve;
		}
	}

	/* pin buffer into GTT */
	r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL);
	if (r)
		goto error_unreserve;

	if (attach->dev->driver != adev->dev->driver)
		bo->prime_shared_count++;

error_unreserve:
	amdgpu_bo_unreserve(bo);

error_detach:
	if (r)
		drm_gem_map_detach(dma_buf, attach);
	return r;
}
Ejemplo n.º 3
0
int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	bool write = !!(op & MSM_PREP_WRITE);
	unsigned long remain =
		op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
	long ret;

	ret = reservation_object_wait_timeout_rcu(msm_obj->resv, write,
						  true,  remain);
	if (ret == 0)
		return remain == 0 ? -EBUSY : -ETIMEDOUT;
	else if (ret < 0)
		return ret;

	/* TODO cache maintenance */

	return 0;
}
Ejemplo n.º 4
0
int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	bool write = !!(op & MSM_PREP_WRITE);

	if (op & MSM_PREP_NOSYNC) {
		if (!reservation_object_test_signaled_rcu(msm_obj->resv, write))
			return -EBUSY;
	} else {
		int ret;

		ret = reservation_object_wait_timeout_rcu(msm_obj->resv, write,
				true, timeout_to_jiffies(timeout));
		if (ret <= 0)
			return ret == 0 ? -ETIMEDOUT : ret;
	}

	/* TODO cache maintenance */

	return 0;
}
Ejemplo n.º 5
0
int tegra_uapi_gem_cpu_prep(struct drm_device *drm, void *data,
			    struct drm_file *file)
{
	struct drm_tegra_gem_cpu_prep *args = data;
	struct drm_gem_object *gem;
	struct tegra_bo *bo;
	unsigned long timeout;
	bool write;
	int ret;

	gem = drm_gem_object_lookup(file, args->handle);
	if (!gem) {
		DRM_ERROR("failed to find bo handle %u\n", args->handle);
		return -ENOENT;
	}

	bo = to_tegra_bo(gem);
	write = !!(args->flags & DRM_TEGRA_CPU_PREP_WRITE);
	timeout = usecs_to_jiffies(args->timeout);

	ret = reservation_object_wait_timeout_rcu(bo->resv, write,
						  true, timeout);

	drm_gem_object_put_unlocked(gem);

	if (ret == 0) {
		DRM_DEBUG_DRIVER("bo handle %u is busy\n", args->handle);
		return timeout == 0 ? -EBUSY : -ETIMEDOUT;
	}

	if (ret < 0) {
		DRM_ERROR("failed to await bo handle %u: %d\n",
			  args->handle, ret);
		return ret;
	}

	DRM_DEBUG_DRIVER("bo handle %u is idling\n", args->handle);

	return 0;
}
Ejemplo n.º 6
0
/*
 * Wait for any exclusive fence in fb's gem object's reservation object.
 *
 * Returns -ERESTARTSYS if interrupted, else 0.
 */
int mtk_fb_wait(struct drm_framebuffer *fb)
{
	struct drm_gem_object *gem;
	struct reservation_object *resv;
	long ret;

	if (!fb)
		return 0;

	gem = mtk_fb_get_gem_obj(fb);
	if (!gem || !gem->dma_buf || !gem->dma_buf->resv)
		return 0;

	resv = gem->dma_buf->resv;
	ret = reservation_object_wait_timeout_rcu(resv, false, true,
						  MAX_SCHEDULE_TIMEOUT);
	/* MAX_SCHEDULE_TIMEOUT on success, -ERESTARTSYS if interrupted */
	if (WARN_ON(ret < 0))
		return ret;

	return 0;
}