int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm) { int r; struct amdgpu_bo_va *bo_va; struct ww_acquire_ctx ticket; struct list_head list; struct amdgpu_bo_list_entry pd; struct ttm_validate_buffer csa_tv; INIT_LIST_HEAD(&list); INIT_LIST_HEAD(&csa_tv.head); csa_tv.bo = &adev->virt.csa_obj->tbo; csa_tv.shared = true; list_add(&csa_tv.head, &list); amdgpu_vm_get_pd_bo(vm, &list, &pd); r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL); if (r) { DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r); return r; } bo_va = amdgpu_vm_bo_add(adev, vm, adev->virt.csa_obj); if (!bo_va) { ttm_eu_backoff_reservation(&ticket, &list); DRM_ERROR("failed to create bo_va for static CSA\n"); return -ENOMEM; } r = amdgpu_vm_alloc_pts(adev, bo_va->vm, AMDGPU_CSA_VADDR, AMDGPU_CSA_SIZE); if (r) { DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r); amdgpu_vm_bo_rmv(adev, bo_va); ttm_eu_backoff_reservation(&ticket, &list); return r; } r = amdgpu_vm_bo_map(adev, bo_va, AMDGPU_CSA_VADDR, 0,AMDGPU_CSA_SIZE, AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE | AMDGPU_PTE_EXECUTABLE); if (r) { DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r); amdgpu_vm_bo_rmv(adev, bo_va); ttm_eu_backoff_reservation(&ticket, &list); return r; } vm->csa_bo_va = bo_va; ttm_eu_backoff_reservation(&ticket, &list); return 0; }
/** * amdgpu_driver_postclose_kms - drm callback for post close * * @dev: drm dev pointer * @file_priv: drm file * * On device post close, tear down vm on cayman+ (all asics). */ void amdgpu_driver_postclose_kms(struct drm_device *dev, struct drm_file *file_priv) { struct amdgpu_device *adev = dev->dev_private; struct amdgpu_fpriv *fpriv = file_priv->driver_priv; struct amdgpu_bo_list *list; struct amdgpu_bo *pd; unsigned int pasid; int handle; if (!fpriv) return; pm_runtime_get_sync(dev->dev); if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_UVD) != NULL) amdgpu_uvd_free_handles(adev, file_priv); if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_VCE) != NULL) amdgpu_vce_free_handles(adev, file_priv); amdgpu_vm_bo_rmv(adev, fpriv->prt_va); if (amdgpu_sriov_vf(adev)) { /* TODO: how to handle reserve failure */ BUG_ON(amdgpu_bo_reserve(adev->virt.csa_obj, true)); amdgpu_vm_bo_rmv(adev, fpriv->csa_va); fpriv->csa_va = NULL; amdgpu_bo_unreserve(adev->virt.csa_obj); } pasid = fpriv->vm.pasid; pd = amdgpu_bo_ref(fpriv->vm.root.base.bo); amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr); amdgpu_vm_fini(adev, &fpriv->vm); if (pasid) amdgpu_pasid_free_delayed(pd->tbo.resv, pasid); amdgpu_bo_unref(&pd); idr_for_each_entry(&fpriv->bo_list_handles, list, handle) amdgpu_bo_list_put(list); idr_destroy(&fpriv->bo_list_handles); mutex_destroy(&fpriv->bo_list_lock); kfree(fpriv); file_priv->driver_priv = NULL; pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); }
/** * amdgpu_driver_postclose_kms - drm callback for post close * * @dev: drm dev pointer * @file_priv: drm file * * On device post close, tear down vm on cayman+ (all asics). */ void amdgpu_driver_postclose_kms(struct drm_device *dev, struct drm_file *file_priv) { struct amdgpu_device *adev = dev->dev_private; struct amdgpu_fpriv *fpriv = file_priv->driver_priv; struct amdgpu_bo_list *list; int handle; if (!fpriv) return; pm_runtime_get_sync(dev->dev); amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr); if (adev->asic_type != CHIP_RAVEN) { amdgpu_uvd_free_handles(adev, file_priv); amdgpu_vce_free_handles(adev, file_priv); } amdgpu_vm_bo_rmv(adev, fpriv->prt_va); if (amdgpu_sriov_vf(adev)) { /* TODO: how to handle reserve failure */ BUG_ON(amdgpu_bo_reserve(adev->virt.csa_obj, true)); amdgpu_vm_bo_rmv(adev, fpriv->csa_va); fpriv->csa_va = NULL; amdgpu_bo_unreserve(adev->virt.csa_obj); } amdgpu_vm_fini(adev, &fpriv->vm); idr_for_each_entry(&fpriv->bo_list_handles, list, handle) amdgpu_bo_list_free(list); idr_destroy(&fpriv->bo_list_handles); mutex_destroy(&fpriv->bo_list_lock); kfree(fpriv); file_priv->driver_priv = NULL; pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); }