Example #1
0
static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev,
						  int fd,
						  enum drm_sched_priority priority)
{
	struct file *filp = fget(fd);
	struct amdgpu_fpriv *fpriv;
	struct amdgpu_ctx *ctx;
	uint32_t id;
	int r;

	if (!filp)
		return -EINVAL;

	r = amdgpu_file_to_fpriv(filp, &fpriv);
	if (r) {
		fput(filp);
		return r;
	}

	idr_for_each_entry(&fpriv->ctx_mgr.ctx_handles, ctx, id)
		amdgpu_ctx_priority_override(ctx, priority);

	fput(filp);

	return 0;
}
Example #2
0
/**
 * amdgpu_driver_postclose_kms - drm callback for post close
 *
 * @dev: drm dev pointer
 * @file_priv: drm file
 *
 * On device post close, tear down vm on cayman+ (all asics).
 */
void amdgpu_driver_postclose_kms(struct drm_device *dev,
				 struct drm_file *file_priv)
{
	struct amdgpu_device *adev = dev->dev_private;
	struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
	struct amdgpu_bo_list *list;
	struct amdgpu_bo *pd;
	unsigned int pasid;
	int handle;

	if (!fpriv)
		return;

	pm_runtime_get_sync(dev->dev);

	if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_UVD) != NULL)
		amdgpu_uvd_free_handles(adev, file_priv);
	if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_VCE) != NULL)
		amdgpu_vce_free_handles(adev, file_priv);

	amdgpu_vm_bo_rmv(adev, fpriv->prt_va);

	if (amdgpu_sriov_vf(adev)) {
		/* TODO: how to handle reserve failure */
		BUG_ON(amdgpu_bo_reserve(adev->virt.csa_obj, true));
		amdgpu_vm_bo_rmv(adev, fpriv->csa_va);
		fpriv->csa_va = NULL;
		amdgpu_bo_unreserve(adev->virt.csa_obj);
	}

	pasid = fpriv->vm.pasid;
	pd = amdgpu_bo_ref(fpriv->vm.root.base.bo);

	amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr);
	amdgpu_vm_fini(adev, &fpriv->vm);

	if (pasid)
		amdgpu_pasid_free_delayed(pd->tbo.resv, pasid);
	amdgpu_bo_unref(&pd);

	idr_for_each_entry(&fpriv->bo_list_handles, list, handle)
		amdgpu_bo_list_put(list);

	idr_destroy(&fpriv->bo_list_handles);
	mutex_destroy(&fpriv->bo_list_lock);

	kfree(fpriv);
	file_priv->driver_priv = NULL;

	pm_runtime_mark_last_busy(dev->dev);
	pm_runtime_put_autosuspend(dev->dev);
}
Example #3
0
/*
 * Detach all contexts on the given AFU.
 */
void cxl_context_detach_all(struct cxl_afu *afu)
{
    struct cxl_context *ctx;
    int tmp;

    rcu_read_lock();
    idr_for_each_entry(&afu->contexts_idr, ctx, tmp)
    /*
     * Anything done in here needs to be setup before the IDR is
     * created and torn down after the IDR removed
     */
    __detach_context(ctx);
    rcu_read_unlock();
}
Example #4
0
/*
 * hl_ctx_mgr_fini - finalize the context manager
 *
 * @hdev: pointer to device structure
 * @mgr: pointer to context manager structure
 *
 * This function goes over all the contexts in the manager and frees them.
 * It is called when a process closes the FD.
 */
void hl_ctx_mgr_fini(struct hl_device *hdev, struct hl_ctx_mgr *mgr)
{
	struct hl_ctx *ctx;
	struct idr *idp;
	u32 id;

	idp = &mgr->ctx_handles;

	idr_for_each_entry(idp, ctx, id)
		hl_ctx_free(hdev, ctx);

	idr_destroy(&mgr->ctx_handles);
	mutex_destroy(&mgr->ctx_lock);
}
Example #5
0
/**
 * amdgpu_driver_postclose_kms - drm callback for post close
 *
 * @dev: drm dev pointer
 * @file_priv: drm file
 *
 * On device post close, tear down vm on cayman+ (all asics).
 */
void amdgpu_driver_postclose_kms(struct drm_device *dev,
				 struct drm_file *file_priv)
{
	struct amdgpu_device *adev = dev->dev_private;
	struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
	struct amdgpu_bo_list *list;
	int handle;

	if (!fpriv)
		return;

	pm_runtime_get_sync(dev->dev);

	amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr);

	if (adev->asic_type != CHIP_RAVEN) {
		amdgpu_uvd_free_handles(adev, file_priv);
		amdgpu_vce_free_handles(adev, file_priv);
	}

	amdgpu_vm_bo_rmv(adev, fpriv->prt_va);

	if (amdgpu_sriov_vf(adev)) {
		/* TODO: how to handle reserve failure */
		BUG_ON(amdgpu_bo_reserve(adev->virt.csa_obj, true));
		amdgpu_vm_bo_rmv(adev, fpriv->csa_va);
		fpriv->csa_va = NULL;
		amdgpu_bo_unreserve(adev->virt.csa_obj);
	}

	amdgpu_vm_fini(adev, &fpriv->vm);

	idr_for_each_entry(&fpriv->bo_list_handles, list, handle)
		amdgpu_bo_list_free(list);

	idr_destroy(&fpriv->bo_list_handles);
	mutex_destroy(&fpriv->bo_list_lock);

	kfree(fpriv);
	file_priv->driver_priv = NULL;

	pm_runtime_mark_last_busy(dev->dev);
	pm_runtime_put_autosuspend(dev->dev);
}
Example #6
0
/**
 * amdgpu_driver_postclose_kms - drm callback for post close
 *
 * @dev: drm dev pointer
 * @file_priv: drm file
 *
 * On device post close, tear down vm on cayman+ (all asics).
 */
void amdgpu_driver_postclose_kms(struct drm_device *dev,
				 struct drm_file *file_priv)
{
	struct amdgpu_device *adev = dev->dev_private;
	struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
	struct amdgpu_bo_list *list;
	int handle;

	if (!fpriv)
		return;

	amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr);

	amdgpu_vm_fini(adev, &fpriv->vm);

	idr_for_each_entry(&fpriv->bo_list_handles, list, handle)
		amdgpu_bo_list_free(list);

	idr_destroy(&fpriv->bo_list_handles);
	mutex_destroy(&fpriv->bo_list_lock);

	kfree(fpriv);
	file_priv->driver_priv = NULL;
}