/* Close an iova. Warn if it is still in use */ void msm_gem_close_vma(struct msm_gem_address_space *aspace, struct msm_gem_vma *vma) { if (WARN_ON(vma->inuse > 0 || vma->mapped)) return; spin_lock(&aspace->lock); if (vma->iova) drm_mm_remove_node(&vma->node); spin_unlock(&aspace->lock); vma->iova = 0; msm_gem_address_space_put(aspace); }
static void mdp5_kms_destroy(struct msm_kms *kms) { struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); struct msm_gem_address_space *aspace = kms->aspace; int i; for (i = 0; i < mdp5_kms->num_hwmixers; i++) mdp5_mixer_destroy(mdp5_kms->hwmixers[i]); for (i = 0; i < mdp5_kms->num_hwpipes; i++) mdp5_pipe_destroy(mdp5_kms->hwpipes[i]); if (aspace) { aspace->mmu->funcs->detach(aspace->mmu, iommu_ports, ARRAY_SIZE(iommu_ports)); msm_gem_address_space_put(aspace); } }
void msm_gem_unmap_vma(struct msm_gem_address_space *aspace, struct msm_gem_vma *vma, struct sg_table *sgt) { if (!aspace || !vma->iova) return; if (aspace->mmu) { unsigned size = vma->node.size << PAGE_SHIFT; aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, sgt, size); } spin_lock(&aspace->lock); drm_mm_remove_node(&vma->node); spin_unlock(&aspace->lock); vma->iova = 0; msm_gem_address_space_put(aspace); }
static void mdp4_destroy(struct msm_kms *kms) { struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); struct device *dev = mdp4_kms->dev->dev; struct msm_gem_address_space *aspace = kms->aspace; if (mdp4_kms->blank_cursor_iova) msm_gem_put_iova(mdp4_kms->blank_cursor_bo, kms->aspace); drm_gem_object_put_unlocked(mdp4_kms->blank_cursor_bo); if (aspace) { aspace->mmu->funcs->detach(aspace->mmu, iommu_ports, ARRAY_SIZE(iommu_ports)); msm_gem_address_space_put(aspace); } if (mdp4_kms->rpm_enabled) pm_runtime_disable(dev); kfree(mdp4_kms); }
void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu) { struct msm_gpu *gpu = &adreno_gpu->base; if (adreno_gpu->memptrs_bo) { if (adreno_gpu->memptrs) msm_gem_put_vaddr(adreno_gpu->memptrs_bo); if (adreno_gpu->memptrs_iova) msm_gem_put_iova(adreno_gpu->memptrs_bo, gpu->id); drm_gem_object_unreference_unlocked(adreno_gpu->memptrs_bo); } release_firmware(adreno_gpu->pm4); release_firmware(adreno_gpu->pfp); msm_gpu_cleanup(gpu); if (gpu->aspace) { gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu, iommu_ports, ARRAY_SIZE(iommu_ports)); msm_gem_address_space_put(gpu->aspace); } }