uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg) { signed long r; uint32_t val; struct dma_fence *f; struct amdgpu_kiq *kiq = &adev->gfx.kiq; struct amdgpu_ring *ring = &kiq->ring; BUG_ON(!ring->funcs->emit_rreg); mutex_lock(&adev->virt.lock_kiq); amdgpu_ring_alloc(ring, 32); amdgpu_ring_emit_rreg(ring, reg); amdgpu_fence_emit(ring, &f); amdgpu_ring_commit(ring); mutex_unlock(&adev->virt.lock_kiq); r = dma_fence_wait(f, false); if (r) DRM_ERROR("wait for kiq fence error: %ld.\n", r); dma_fence_put(f); val = adev->wb.wb[adev->virt.reg_val_offs]; return val; }
static int amdgpu_test_create_and_emit_fence(struct amdgpu_device *adev, struct amdgpu_ring *ring, struct fence **fence) { uint32_t handle = ring->idx ^ 0xdeafbeef; int r; if (ring == &adev->uvd.ring) { r = amdgpu_uvd_get_create_msg(ring, handle, NULL); if (r) { DRM_ERROR("Failed to get dummy create msg\n"); return r; } r = amdgpu_uvd_get_destroy_msg(ring, handle, fence); if (r) { DRM_ERROR("Failed to get dummy destroy msg\n"); return r; } } else if (ring == &adev->vce.ring[0] || ring == &adev->vce.ring[1]) { r = amdgpu_vce_get_create_msg(ring, handle, NULL); if (r) { DRM_ERROR("Failed to get dummy create msg\n"); return r; } r = amdgpu_vce_get_destroy_msg(ring, handle, fence); if (r) { DRM_ERROR("Failed to get dummy destroy msg\n"); return r; } } else { struct amdgpu_fence *a_fence = NULL; r = amdgpu_ring_lock(ring, 64); if (r) { DRM_ERROR("Failed to lock ring A %d\n", ring->idx); return r; } amdgpu_fence_emit(ring, AMDGPU_FENCE_OWNER_UNDEFINED, &a_fence); amdgpu_ring_unlock_commit(ring); *fence = &a_fence->base; } return 0; }
void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v) { signed long r; struct dma_fence *f; struct amdgpu_kiq *kiq = &adev->gfx.kiq; struct amdgpu_ring *ring = &kiq->ring; BUG_ON(!ring->funcs->emit_wreg); mutex_lock(&adev->virt.lock); amdgpu_ring_alloc(ring, 32); amdgpu_ring_emit_hdp_flush(ring); amdgpu_ring_emit_wreg(ring, reg, v); amdgpu_ring_emit_hdp_invalidate(ring); amdgpu_fence_emit(ring, &f); amdgpu_ring_commit(ring); mutex_unlock(&adev->virt.lock); r = dma_fence_wait(f, false); if (r) DRM_ERROR("wait for kiq fence error: %ld.\n", r); dma_fence_put(f); }
/** * amdgpu_ib_schedule - schedule an IB (Indirect Buffer) on the ring * * @adev: amdgpu_device pointer * @num_ibs: number of IBs to schedule * @ibs: IB objects to schedule * @owner: owner for creating the fences * * Schedule an IB on the associated ring (all asics). * Returns 0 on success, error on failure. * * On SI, there are two parallel engines fed from the primary ring, * the CE (Constant Engine) and the DE (Drawing Engine). Since * resource descriptors have moved to memory, the CE allows you to * prime the caches while the DE is updating register state so that * the resource descriptors will be already in cache when the draw is * processed. To accomplish this, the userspace driver submits two * IBs, one for the CE and one for the DE. If there is a CE IB (called * a CONST_IB), it will be put on the ring prior to the DE IB. Prior * to SI there was just a DE IB. */ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs, struct amdgpu_ib *ibs, void *owner) { struct amdgpu_ib *ib = &ibs[0]; struct amdgpu_ring *ring; struct amdgpu_ctx *ctx, *old_ctx; struct amdgpu_vm *vm; unsigned i; int r = 0; if (num_ibs == 0) return -EINVAL; ring = ibs->ring; ctx = ibs->ctx; vm = ibs->vm; if (!ring->ready) { dev_err(adev->dev, "couldn't schedule ib\n"); return -EINVAL; } r = amdgpu_sync_wait(&ibs->sync); if (r) { dev_err(adev->dev, "IB sync failed (%d).\n", r); return r; } r = amdgpu_ring_lock(ring, (256 + AMDGPU_NUM_SYNCS * 8) * num_ibs); if (r) { dev_err(adev->dev, "scheduling IB failed (%d).\n", r); return r; } if (vm) { /* grab a vm id if necessary */ r = amdgpu_vm_grab_id(ibs->vm, ibs->ring, &ibs->sync); if (r) { amdgpu_ring_unlock_undo(ring); return r; } } r = amdgpu_sync_rings(&ibs->sync, ring); if (r) { amdgpu_ring_unlock_undo(ring); dev_err(adev->dev, "failed to sync rings (%d)\n", r); return r; } if (vm) { /* do context switch */ amdgpu_vm_flush(ring, vm, ib->sync.last_vm_update); if (ring->funcs->emit_gds_switch) amdgpu_ring_emit_gds_switch(ring, ib->vm->ids[ring->idx].id, ib->gds_base, ib->gds_size, ib->gws_base, ib->gws_size, ib->oa_base, ib->oa_size); if (ring->funcs->emit_hdp_flush) amdgpu_ring_emit_hdp_flush(ring); } old_ctx = ring->current_ctx; for (i = 0; i < num_ibs; ++i) { ib = &ibs[i]; if (ib->ring != ring || ib->ctx != ctx || ib->vm != vm) { ring->current_ctx = old_ctx; amdgpu_ring_unlock_undo(ring); return -EINVAL; } amdgpu_ring_emit_ib(ring, ib); ring->current_ctx = ctx; } r = amdgpu_fence_emit(ring, owner, &ib->fence); if (r) { dev_err(adev->dev, "failed to emit fence (%d)\n", r); ring->current_ctx = old_ctx; amdgpu_ring_unlock_undo(ring); return r; } if (!amdgpu_enable_scheduler && ib->ctx) ib->sequence = amdgpu_ctx_add_fence(ib->ctx, ring, &ib->fence->base); /* wrap the last IB with fence */ if (ib->user) { uint64_t addr = amdgpu_bo_gpu_offset(ib->user->bo); addr += ib->user->offset; amdgpu_ring_emit_fence(ring, addr, ib->sequence, AMDGPU_FENCE_FLAG_64BIT); } if (ib->vm) amdgpu_vm_fence(adev, ib->vm, &ib->fence->base); amdgpu_ring_unlock_commit(ring); return 0; }
/** * amdgpu_ib_schedule - schedule an IB (Indirect Buffer) on the ring * * @adev: amdgpu_device pointer * @num_ibs: number of IBs to schedule * @ibs: IB objects to schedule * @f: fence created during this submission * * Schedule an IB on the associated ring (all asics). * Returns 0 on success, error on failure. * * On SI, there are two parallel engines fed from the primary ring, * the CE (Constant Engine) and the DE (Drawing Engine). Since * resource descriptors have moved to memory, the CE allows you to * prime the caches while the DE is updating register state so that * the resource descriptors will be already in cache when the draw is * processed. To accomplish this, the userspace driver submits two * IBs, one for the CE and one for the DE. If there is a CE IB (called * a CONST_IB), it will be put on the ring prior to the DE IB. Prior * to SI there was just a DE IB. */ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, struct amdgpu_ib *ibs, struct amdgpu_job *job, struct dma_fence **f) { struct amdgpu_device *adev = ring->adev; struct amdgpu_ib *ib = &ibs[0]; struct dma_fence *tmp = NULL; bool skip_preamble, need_ctx_switch; unsigned patch_offset = ~0; struct amdgpu_vm *vm; uint64_t fence_ctx; uint32_t status = 0, alloc_size; unsigned fence_flags = 0; unsigned i; int r = 0; bool need_pipe_sync = false; if (num_ibs == 0) return -EINVAL; /* ring tests don't use a job */ if (job) { vm = job->vm; fence_ctx = job->base.s_fence->scheduled.context; } else { vm = NULL; fence_ctx = 0; } if (!ring->sched.ready) { dev_err(adev->dev, "couldn't schedule ib on ring <%s>\n", ring->name); return -EINVAL; } if (vm && !job->vmid) { dev_err(adev->dev, "VM IB without ID\n"); return -EINVAL; } alloc_size = ring->funcs->emit_frame_size + num_ibs * ring->funcs->emit_ib_size; r = amdgpu_ring_alloc(ring, alloc_size); if (r) { dev_err(adev->dev, "scheduling IB failed (%d).\n", r); return r; } need_ctx_switch = ring->current_ctx != fence_ctx; if (ring->funcs->emit_pipeline_sync && job && ((tmp = amdgpu_sync_get_fence(&job->sched_sync, NULL)) || (amdgpu_sriov_vf(adev) && need_ctx_switch) || amdgpu_vm_need_pipeline_sync(ring, job))) { need_pipe_sync = true; if (tmp) trace_amdgpu_ib_pipe_sync(job, tmp); dma_fence_put(tmp); } if (ring->funcs->insert_start) ring->funcs->insert_start(ring); if (job) { r = amdgpu_vm_flush(ring, job, need_pipe_sync); if (r) { amdgpu_ring_undo(ring); return r; } } if (job && ring->funcs->init_cond_exec) patch_offset = amdgpu_ring_init_cond_exec(ring); #ifdef CONFIG_X86_64 if (!(adev->flags & AMD_IS_APU)) #endif { if (ring->funcs->emit_hdp_flush) amdgpu_ring_emit_hdp_flush(ring); else amdgpu_asic_flush_hdp(adev, ring); } if (need_ctx_switch) status |= AMDGPU_HAVE_CTX_SWITCH; skip_preamble = ring->current_ctx == fence_ctx; if (job && ring->funcs->emit_cntxcntl) { status |= job->preamble_status; amdgpu_ring_emit_cntxcntl(ring, status); } for (i = 0; i < num_ibs; ++i) { ib = &ibs[i]; /* drop preamble IBs if we don't have a context switch */ if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && skip_preamble && !(status & AMDGPU_PREAMBLE_IB_PRESENT_FIRST) && !amdgpu_sriov_vf(adev)) /* for SRIOV preemption, Preamble CE ib must be inserted anyway */ continue; amdgpu_ring_emit_ib(ring, job, ib, status); status &= ~AMDGPU_HAVE_CTX_SWITCH; } if (ring->funcs->emit_tmz) amdgpu_ring_emit_tmz(ring, false); #ifdef CONFIG_X86_64 if (!(adev->flags & AMD_IS_APU)) #endif amdgpu_asic_invalidate_hdp(adev, ring); if (ib->flags & AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE) fence_flags |= AMDGPU_FENCE_FLAG_TC_WB_ONLY; /* wrap the last IB with fence */ if (job && job->uf_addr) { amdgpu_ring_emit_fence(ring, job->uf_addr, job->uf_sequence, fence_flags | AMDGPU_FENCE_FLAG_64BIT); } r = amdgpu_fence_emit(ring, f, fence_flags); if (r) { dev_err(adev->dev, "failed to emit fence (%d)\n", r); if (job && job->vmid) amdgpu_vmid_reset(adev, ring->funcs->vmhub, job->vmid); amdgpu_ring_undo(ring); return r; } if (ring->funcs->insert_end) ring->funcs->insert_end(ring); if (patch_offset != ~0 && ring->funcs->patch_cond_exec) amdgpu_ring_patch_cond_exec(ring, patch_offset); ring->current_ctx = fence_ctx; if (vm && ring->funcs->emit_switch_buffer) amdgpu_ring_emit_switch_buffer(ring); amdgpu_ring_commit(ring); return 0; }
/** * amdgpu_ib_schedule - schedule an IB (Indirect Buffer) on the ring * * @adev: amdgpu_device pointer * @num_ibs: number of IBs to schedule * @ibs: IB objects to schedule * @f: fence created during this submission * * Schedule an IB on the associated ring (all asics). * Returns 0 on success, error on failure. * * On SI, there are two parallel engines fed from the primary ring, * the CE (Constant Engine) and the DE (Drawing Engine). Since * resource descriptors have moved to memory, the CE allows you to * prime the caches while the DE is updating register state so that * the resource descriptors will be already in cache when the draw is * processed. To accomplish this, the userspace driver submits two * IBs, one for the CE and one for the DE. If there is a CE IB (called * a CONST_IB), it will be put on the ring prior to the DE IB. Prior * to SI there was just a DE IB. */ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, struct amdgpu_ib *ibs, struct fence *last_vm_update, struct amdgpu_job *job, struct fence **f) { struct amdgpu_device *adev = ring->adev; struct amdgpu_ib *ib = &ibs[0]; bool skip_preamble, need_ctx_switch; unsigned patch_offset = ~0; struct amdgpu_vm *vm; struct fence *hwf; uint64_t ctx; unsigned i; int r = 0; if (num_ibs == 0) return -EINVAL; /* ring tests don't use a job */ if (job) { vm = job->vm; ctx = job->ctx; } else { vm = NULL; ctx = 0; } if (!ring->ready) { dev_err(adev->dev, "couldn't schedule ib\n"); return -EINVAL; } if (vm && !job->vm_id) { dev_err(adev->dev, "VM IB without ID\n"); return -EINVAL; } r = amdgpu_ring_alloc(ring, 256 * num_ibs); if (r) { dev_err(adev->dev, "scheduling IB failed (%d).\n", r); return r; } if (ring->type == AMDGPU_RING_TYPE_SDMA && ring->funcs->init_cond_exec) patch_offset = amdgpu_ring_init_cond_exec(ring); if (vm) { r = amdgpu_vm_flush(ring, job->vm_id, job->vm_pd_addr, job->gds_base, job->gds_size, job->gws_base, job->gws_size, job->oa_base, job->oa_size); if (r) { amdgpu_ring_undo(ring); return r; } } if (ring->funcs->emit_hdp_flush) amdgpu_ring_emit_hdp_flush(ring); /* always set cond_exec_polling to CONTINUE */ *ring->cond_exe_cpu_addr = 1; skip_preamble = ring->current_ctx == ctx; need_ctx_switch = ring->current_ctx != ctx; for (i = 0; i < num_ibs; ++i) { ib = &ibs[i]; /* drop preamble IBs if we don't have a context switch */ if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && skip_preamble) continue; amdgpu_ring_emit_ib(ring, ib, job ? job->vm_id : 0, need_ctx_switch); need_ctx_switch = false; } if (ring->funcs->emit_hdp_invalidate) amdgpu_ring_emit_hdp_invalidate(ring); r = amdgpu_fence_emit(ring, &hwf); if (r) { dev_err(adev->dev, "failed to emit fence (%d)\n", r); if (job && job->vm_id) amdgpu_vm_reset_id(adev, job->vm_id); amdgpu_ring_undo(ring); return r; } /* wrap the last IB with fence */ if (job && job->uf_bo) { uint64_t addr = amdgpu_bo_gpu_offset(job->uf_bo); addr += job->uf_offset; amdgpu_ring_emit_fence(ring, addr, job->uf_sequence, AMDGPU_FENCE_FLAG_64BIT); } if (f) *f = fence_get(hwf); if (patch_offset != ~0 && ring->funcs->patch_cond_exec) amdgpu_ring_patch_cond_exec(ring, patch_offset); ring->current_ctx = ctx; amdgpu_ring_commit(ring); return 0; }