int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev, struct amdgpu_ring *ring, struct amdgpu_ib *ibs, unsigned num_ibs, int (*free_job)(struct amdgpu_job *), void *owner, struct fence **f) { int r = 0; if (amdgpu_enable_scheduler) { struct amdgpu_job *job = kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL); if (!job) return -ENOMEM; job->base.sched = &ring->sched; job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity; job->adev = adev; job->ibs = ibs; job->num_ibs = num_ibs; job->base.owner = owner; mutex_init(&job->job_lock); job->free_job = free_job; mutex_lock(&job->job_lock); r = amd_sched_entity_push_job(&job->base); if (r) { mutex_unlock(&job->job_lock); kfree(job); return r; } *f = fence_get(&job->base.s_fence->base); mutex_unlock(&job->job_lock); } else { r = amdgpu_ib_schedule(adev, num_ibs, ibs, owner); if (r) return r; *f = fence_get(&ibs[num_ibs - 1].fence->base); } return 0; }
int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, struct amd_sched_entity *entity, void *owner, struct fence **f) { int r; job->ring = ring; if (!f) return -EINVAL; r = amd_sched_job_init(&job->base, &ring->sched, entity, owner); if (r) return r; job->owner = owner; job->ctx = entity->fence_context; *f = fence_get(&job->base.s_fence->finished); amdgpu_job_free_resources(job); amd_sched_entity_push_job(&job->base); return 0; }
int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, struct amd_sched_entity *entity, void *owner, struct fence **f) { struct fence *fence; int r; job->ring = ring; if (!f) return -EINVAL; r = amd_sched_job_init(&job->base, &ring->sched, entity, amdgpu_job_timeout_func, amdgpu_job_free_func, owner, &fence); if (r) return r; job->owner = owner; job->ctx = entity->fence_context; *f = fence_get(fence); amd_sched_entity_push_job(&job->base); return 0; }