static struct fence *amdgpu_job_dependency(struct amd_sched_job *sched_job) { struct amdgpu_job *job = to_amdgpu_job(sched_job); struct amdgpu_vm *vm = job->vm; struct fence *fence = amdgpu_sync_get_fence(&job->sync); if (fence == NULL && vm && !job->vm_id) { struct amdgpu_ring *ring = job->ring; int r; r = amdgpu_vm_grab_id(vm, ring, &job->sync, &job->base.s_fence->finished, job); if (r) DRM_ERROR("Error getting VM ID (%d)\n", r); fence = amdgpu_sync_get_fence(&job->sync); } return fence; }
/** * amdgpu_ib_schedule - schedule an IB (Indirect Buffer) on the ring * * @adev: amdgpu_device pointer * @num_ibs: number of IBs to schedule * @ibs: IB objects to schedule * @owner: owner for creating the fences * * Schedule an IB on the associated ring (all asics). * Returns 0 on success, error on failure. * * On SI, there are two parallel engines fed from the primary ring, * the CE (Constant Engine) and the DE (Drawing Engine). Since * resource descriptors have moved to memory, the CE allows you to * prime the caches while the DE is updating register state so that * the resource descriptors will be already in cache when the draw is * processed. To accomplish this, the userspace driver submits two * IBs, one for the CE and one for the DE. If there is a CE IB (called * a CONST_IB), it will be put on the ring prior to the DE IB. Prior * to SI there was just a DE IB. */ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs, struct amdgpu_ib *ibs, void *owner) { struct amdgpu_ib *ib = &ibs[0]; struct amdgpu_ring *ring; struct amdgpu_ctx *ctx, *old_ctx; struct amdgpu_vm *vm; unsigned i; int r = 0; if (num_ibs == 0) return -EINVAL; ring = ibs->ring; ctx = ibs->ctx; vm = ibs->vm; if (!ring->ready) { dev_err(adev->dev, "couldn't schedule ib\n"); return -EINVAL; } r = amdgpu_sync_wait(&ibs->sync); if (r) { dev_err(adev->dev, "IB sync failed (%d).\n", r); return r; } r = amdgpu_ring_lock(ring, (256 + AMDGPU_NUM_SYNCS * 8) * num_ibs); if (r) { dev_err(adev->dev, "scheduling IB failed (%d).\n", r); return r; } if (vm) { /* grab a vm id if necessary */ r = amdgpu_vm_grab_id(ibs->vm, ibs->ring, &ibs->sync); if (r) { amdgpu_ring_unlock_undo(ring); return r; } } r = amdgpu_sync_rings(&ibs->sync, ring); if (r) { amdgpu_ring_unlock_undo(ring); dev_err(adev->dev, "failed to sync rings (%d)\n", r); return r; } if (vm) { /* do context switch */ amdgpu_vm_flush(ring, vm, ib->sync.last_vm_update); if (ring->funcs->emit_gds_switch) amdgpu_ring_emit_gds_switch(ring, ib->vm->ids[ring->idx].id, ib->gds_base, ib->gds_size, ib->gws_base, ib->gws_size, ib->oa_base, ib->oa_size); if (ring->funcs->emit_hdp_flush) amdgpu_ring_emit_hdp_flush(ring); } old_ctx = ring->current_ctx; for (i = 0; i < num_ibs; ++i) { ib = &ibs[i]; if (ib->ring != ring || ib->ctx != ctx || ib->vm != vm) { ring->current_ctx = old_ctx; amdgpu_ring_unlock_undo(ring); return -EINVAL; } amdgpu_ring_emit_ib(ring, ib); ring->current_ctx = ctx; } r = amdgpu_fence_emit(ring, owner, &ib->fence); if (r) { dev_err(adev->dev, "failed to emit fence (%d)\n", r); ring->current_ctx = old_ctx; amdgpu_ring_unlock_undo(ring); return r; } if (!amdgpu_enable_scheduler && ib->ctx) ib->sequence = amdgpu_ctx_add_fence(ib->ctx, ring, &ib->fence->base); /* wrap the last IB with fence */ if (ib->user) { uint64_t addr = amdgpu_bo_gpu_offset(ib->user->bo); addr += ib->user->offset; amdgpu_ring_emit_fence(ring, addr, ib->sequence, AMDGPU_FENCE_FLAG_64BIT); } if (ib->vm) amdgpu_vm_fence(adev, ib->vm, &ib->fence->base); amdgpu_ring_unlock_commit(ring); return 0; }