static struct fence *amdgpu_job_run(struct amd_sched_job *sched_job)
{
	struct fence *fence = NULL;
	struct amdgpu_job *job;
	int r;

	if (!sched_job) {
		DRM_ERROR("job is null\n");
		return NULL;
	}
	job = to_amdgpu_job(sched_job);

	BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL));

	trace_amdgpu_sched_run_job(job);
	r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs,
			       job->sync.last_vm_update, job, &fence);
	if (r) {
		DRM_ERROR("Error scheduling IBs (%d)\n", r);
		goto err;
	}

err:
	/* if gpu reset, hw fence will be replaced here */
	fence_put(job->fence);
	job->fence = fence;
	return fence;
}
Esempio n. 2
0
static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job)
{
	struct dma_fence *fence = NULL;
	struct amdgpu_job *job;
	int r;

	if (!sched_job) {
		DRM_ERROR("job is null\n");
		return NULL;
	}
	job = to_amdgpu_job(sched_job);

	BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL));

	trace_amdgpu_sched_run_job(job);
	r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs, job, &fence);
	if (r)
		DRM_ERROR("Error scheduling IBs (%d)\n", r);

	/* if gpu reset, hw fence will be replaced here */
	dma_fence_put(job->fence);
	job->fence = dma_fence_get(fence);
	amdgpu_job_free_resources(job);
	return fence;
}
Esempio n. 3
0
static struct fence *amdgpu_sched_run_job(struct amd_sched_job *sched_job)
{
	struct amdgpu_fence *fence = NULL;
	struct amdgpu_job *job;
	int r;

	if (!sched_job) {
		DRM_ERROR("job is null\n");
		return NULL;
	}
	job = to_amdgpu_job(sched_job);
	mutex_lock(&job->job_lock);
	r = amdgpu_ib_schedule(job->adev,
			       job->num_ibs,
			       job->ibs,
			       job->base.owner);
	if (r) {
		DRM_ERROR("Error scheduling IBs (%d)\n", r);
		goto err;
	}

	fence = job->ibs[job->num_ibs - 1].fence;
	fence_get(&fence->base);

err:
	if (job->free_job)
		job->free_job(job);

	mutex_unlock(&job->job_lock);
	fence_put(&job->base.s_fence->base);
	kfree(job);
	return fence ? &fence->base : NULL;
}
Esempio n. 4
0
static struct fence *amdgpu_job_run(struct amd_sched_job *sched_job)
{
	struct fence *fence = NULL;
	struct amdgpu_job *job;
	int r;

	if (!sched_job) {
		DRM_ERROR("job is null\n");
		return NULL;
	}
	job = to_amdgpu_job(sched_job);

	r = amdgpu_sync_wait(&job->sync);
	if (r) {
		DRM_ERROR("failed to sync wait (%d)\n", r);
		return NULL;
	}

	trace_amdgpu_sched_run_job(job);
	r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs,
			       job->sync.last_vm_update, job, &fence);
	if (r) {
		DRM_ERROR("Error scheduling IBs (%d)\n", r);
		goto err;
	}

err:
	job->fence = fence;
	amdgpu_job_free(job);
	return fence;
}
static struct fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
{
	struct amdgpu_job *job = to_amdgpu_job(sched_job);
	struct amdgpu_vm *vm = job->vm;

	struct fence *fence = amdgpu_sync_get_fence(&job->sync);

	if (fence == NULL && vm && !job->vm_id) {
		struct amdgpu_ring *ring = job->ring;
		int r;

		r = amdgpu_vm_grab_id(vm, ring, &job->sync,
				      &job->base.s_fence->finished,
				      job);
		if (r)
			DRM_ERROR("Error getting VM ID (%d)\n", r);

		fence = amdgpu_sync_get_fence(&job->sync);
	}

	return fence;
}
Esempio n. 6
0
static struct fence *amdgpu_sched_dependency(struct amd_sched_job *sched_job)
{
	struct amdgpu_job *job = to_amdgpu_job(sched_job);
	return amdgpu_sync_get_fence(&job->ibs->sync);
}