/** * radeon_driver_open_kms - drm callback for open * * @dev: drm dev pointer * @file_priv: drm file * * On device open, init vm on cayman+ (all asics). * Returns 0 on success, error on failure. */ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) { struct radeon_device *rdev = dev->dev_private; int r; file_priv->driver_priv = NULL; r = pm_runtime_get_sync(dev->dev); if (r < 0) return r; /* new gpu have virtual address space support */ if (rdev->family >= CHIP_CAYMAN) { struct radeon_fpriv *fpriv; struct radeon_vm *vm; int r; fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL); if (unlikely(!fpriv)) { return -ENOMEM; } if (rdev->accel_working) { vm = &fpriv->vm; r = radeon_vm_init(rdev, vm); if (r) { kfree(fpriv); return r; } r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); if (r) { radeon_vm_fini(rdev, vm); kfree(fpriv); return r; } /* map the ib pool buffer read only into * virtual address space */ vm->ib_bo_va = radeon_vm_bo_add(rdev, vm, rdev->ring_tmp_bo.bo); r = radeon_vm_bo_set_addr(rdev, vm->ib_bo_va, RADEON_VA_IB_OFFSET, RADEON_VM_PAGE_READABLE | RADEON_VM_PAGE_SNOOPED); radeon_bo_unreserve(rdev->ring_tmp_bo.bo); if (r) { radeon_vm_fini(rdev, vm); kfree(fpriv); return r; } } file_priv->driver_priv = fpriv; } pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); return 0; }
int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) { struct radeon_device *rdev = dev->dev_private; file_priv->driver_priv = NULL; /* new gpu have virtual address space support */ if (rdev->family >= CHIP_CAYMAN) { struct radeon_fpriv *fpriv; int r; fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL); if (unlikely(!fpriv)) { return -ENOMEM; } r = radeon_vm_init(rdev, &fpriv->vm); if (r) { radeon_vm_fini(rdev, &fpriv->vm); kfree(fpriv); return r; } file_priv->driver_priv = fpriv; } return 0; }
/** * radeon_driver_postclose_kms - drm callback for post close * * @dev: drm dev pointer * @file_priv: drm file * * On device post close, tear down vm on cayman+ (all asics). */ void radeon_driver_postclose_kms(struct drm_device *dev, struct drm_file *file_priv) { struct radeon_device *rdev = dev->dev_private; /* new gpu have virtual address space support */ if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) { struct radeon_fpriv *fpriv = file_priv->driver_priv; struct radeon_bo_va *bo_va; int r; r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); if (!r) { bo_va = radeon_vm_bo_find(&fpriv->vm, rdev->ring_tmp_bo.bo); if (bo_va) radeon_vm_bo_rmv(rdev, bo_va); radeon_bo_unreserve(rdev->ring_tmp_bo.bo); } radeon_vm_fini(rdev, &fpriv->vm); kfree(fpriv); file_priv->driver_priv = NULL; } }
/** * radeon_driver_postclose_kms - drm callback for post close * * @dev: drm dev pointer * @file_priv: drm file * * On device post close, tear down vm on cayman+ (all asics). */ void radeon_driver_postclose_kms(struct drm_device *dev, struct drm_file *file_priv) { struct radeon_device *rdev = dev->dev_private; /* new gpu have virtual address space support */ if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) { struct radeon_fpriv *fpriv = file_priv->driver_priv; struct radeon_vm *vm = &fpriv->vm; int r; if (rdev->accel_working) { r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); if (!r) { if (vm->ib_bo_va) radeon_vm_bo_rmv(rdev, vm->ib_bo_va); radeon_bo_unreserve(rdev->ring_tmp_bo.bo); } radeon_vm_fini(rdev, vm); } kfree(fpriv); file_priv->driver_priv = NULL; } }
void radeon_driver_postclose_kms(struct drm_device *dev, struct drm_file *file_priv) { struct radeon_device *rdev = dev->dev_private; /* new gpu have virtual address space support */ if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) { struct radeon_fpriv *fpriv = file_priv->driver_priv; radeon_vm_fini(rdev, &fpriv->vm); kfree(fpriv); file_priv->driver_priv = NULL; } }
/** * radeon_driver_postclose_kms - drm callback for post close * * @dev: drm dev pointer * @file_priv: drm file * * On device close, tear down hyperz and cmask filps on r1xx-r5xx * (all asics). And tear down vm on cayman+ (all asics). */ void radeon_driver_postclose_kms(struct drm_device *dev, struct drm_file *file_priv) { struct radeon_device *rdev = dev->dev_private; pm_runtime_get_sync(dev->dev); mutex_lock(&rdev->gem.mutex); if (rdev->hyperz_filp == file_priv) rdev->hyperz_filp = NULL; if (rdev->cmask_filp == file_priv) rdev->cmask_filp = NULL; mutex_unlock(&rdev->gem.mutex); radeon_uvd_free_handles(rdev, file_priv); radeon_vce_free_handles(rdev, file_priv); /* new gpu have virtual address space support */ if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) { struct radeon_fpriv *fpriv = file_priv->driver_priv; struct radeon_vm *vm = &fpriv->vm; int r; if (rdev->accel_working) { r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); if (!r) { if (vm->ib_bo_va) radeon_vm_bo_rmv(rdev, vm->ib_bo_va); radeon_bo_unreserve(rdev->ring_tmp_bo.bo); } radeon_vm_fini(rdev, vm); } kfree(fpriv); file_priv->driver_priv = NULL; } pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); }
/** * radeon_driver_open_kms - drm callback for open * * @dev: drm dev pointer * @file_priv: drm file * * On device open, init vm on cayman+ (all asics). * Returns 0 on success, error on failure. */ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) { struct radeon_device *rdev = dev->dev_private; file_priv->driver_priv = NULL; /* new gpu have virtual address space support */ if (rdev->family >= CHIP_CAYMAN) { struct radeon_fpriv *fpriv; struct radeon_bo_va *bo_va; int r; fpriv = kmalloc(sizeof(*fpriv), DRM_MEM_DRIVER, M_ZERO | M_WAITOK); if (unlikely(!fpriv)) { return -ENOMEM; } radeon_vm_init(rdev, &fpriv->vm); /* map the ib pool buffer read only into * virtual address space */ bo_va = radeon_vm_bo_add(rdev, &fpriv->vm, rdev->ring_tmp_bo.bo); r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET, RADEON_VM_PAGE_READABLE | RADEON_VM_PAGE_SNOOPED); if (r) { radeon_vm_fini(rdev, &fpriv->vm); drm_free(fpriv, DRM_MEM_DRIVER); return r; } file_priv->driver_priv = fpriv; } return 0; }