/** * amdgpu_ib_free - free an IB (Indirect Buffer) * * @adev: amdgpu_device pointer * @ib: IB object to free * * Free an IB (all asics). */ void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib) { amdgpu_sync_free(adev, &ib->sync, &ib->fence->base); amdgpu_sa_bo_free(adev, &ib->sa_bo, &ib->fence->base); if (ib->fence) fence_put(&ib->fence->base); }
void amdgpu_semaphore_free(struct amdgpu_device *adev, struct amdgpu_semaphore **semaphore, struct amdgpu_fence *fence) { if (semaphore == NULL || *semaphore == NULL) { return; } if ((*semaphore)->waiters > 0) { dev_err(adev->dev, "semaphore %p has more waiters than signalers," " hardware lockup imminent!\n", *semaphore); } amdgpu_sa_bo_free(adev, &(*semaphore)->sa_bo, fence); kfree(*semaphore); *semaphore = NULL; }
void amdgpu_job_free(struct amdgpu_job *job) { unsigned i; struct fence *f; /* use sched fence if available */ f = (job->base.s_fence)? &job->base.s_fence->base : job->fence; for (i = 0; i < job->num_ibs; ++i) amdgpu_sa_bo_free(job->adev, &job->ibs[i].sa_bo, f); fence_put(job->fence); amdgpu_bo_unref(&job->uf_bo); amdgpu_sync_free(&job->sync); if (!job->base.use_sched) kfree(job); }
/** * amdgpu_ib_free - free an IB (Indirect Buffer) * * @adev: amdgpu_device pointer * @ib: IB object to free * @f: the fence SA bo need wait on for the ib alloation * * Free an IB (all asics). */ void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, struct dma_fence *f) { amdgpu_sa_bo_free(adev, &ib->sa_bo, f); }