Пример #1
0
/**
 * amdgpu_driver_postclose_kms - drm callback for post close
 *
 * @dev: drm dev pointer
 * @file_priv: drm file
 *
 * On device post close, tear down vm on cayman+ (all asics).
 */
void amdgpu_driver_postclose_kms(struct drm_device *dev,
				 struct drm_file *file_priv)
{
	struct amdgpu_device *adev = dev->dev_private;
	struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
	struct amdgpu_bo_list *list;
	struct amdgpu_bo *pd;
	unsigned int pasid;
	int handle;

	if (!fpriv)
		return;

	pm_runtime_get_sync(dev->dev);

	if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_UVD) != NULL)
		amdgpu_uvd_free_handles(adev, file_priv);
	if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_VCE) != NULL)
		amdgpu_vce_free_handles(adev, file_priv);

	amdgpu_vm_bo_rmv(adev, fpriv->prt_va);

	if (amdgpu_sriov_vf(adev)) {
		/* TODO: how to handle reserve failure */
		BUG_ON(amdgpu_bo_reserve(adev->virt.csa_obj, true));
		amdgpu_vm_bo_rmv(adev, fpriv->csa_va);
		fpriv->csa_va = NULL;
		amdgpu_bo_unreserve(adev->virt.csa_obj);
	}

	pasid = fpriv->vm.pasid;
	pd = amdgpu_bo_ref(fpriv->vm.root.base.bo);

	amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr);
	amdgpu_vm_fini(adev, &fpriv->vm);

	if (pasid)
		amdgpu_pasid_free_delayed(pd->tbo.resv, pasid);
	amdgpu_bo_unref(&pd);

	idr_for_each_entry(&fpriv->bo_list_handles, list, handle)
		amdgpu_bo_list_put(list);

	idr_destroy(&fpriv->bo_list_handles);
	mutex_destroy(&fpriv->bo_list_lock);

	kfree(fpriv);
	file_priv->driver_priv = NULL;

	pm_runtime_mark_last_busy(dev->dev);
	pm_runtime_put_autosuspend(dev->dev);
}
Пример #2
0
static int amdgpu_bo_list_set(struct amdgpu_device *adev,
				     struct drm_file *filp,
				     struct amdgpu_bo_list *list,
				     struct drm_amdgpu_bo_list_entry *info,
				     unsigned num_entries)
{
	struct amdgpu_bo_list_entry *array;
	struct amdgpu_bo *gds_obj = adev->gds.gds_gfx_bo;
	struct amdgpu_bo *gws_obj = adev->gds.gws_gfx_bo;
	struct amdgpu_bo *oa_obj = adev->gds.oa_gfx_bo;

	unsigned last_entry = 0, first_userptr = num_entries;
	unsigned i;
	int r;
	unsigned long total_size = 0;

	array = kvmalloc_array(num_entries, sizeof(struct amdgpu_bo_list_entry), GFP_KERNEL);
	if (!array)
		return -ENOMEM;
	memset(array, 0, num_entries * sizeof(struct amdgpu_bo_list_entry));

	for (i = 0; i < num_entries; ++i) {
		struct amdgpu_bo_list_entry *entry;
		struct drm_gem_object *gobj;
		struct amdgpu_bo *bo;
		struct mm_struct *usermm;

		gobj = drm_gem_object_lookup(filp, info[i].bo_handle);
		if (!gobj) {
			r = -ENOENT;
			goto error_free;
		}

		bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
		drm_gem_object_put_unlocked(gobj);

		usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
		if (usermm) {
			if (usermm != current->mm) {
				amdgpu_bo_unref(&bo);
				r = -EPERM;
				goto error_free;
			}
			entry = &array[--first_userptr];
		} else {
			entry = &array[last_entry++];
		}

		entry->robj = bo;
		entry->priority = min(info[i].bo_priority,
				      AMDGPU_BO_LIST_MAX_PRIORITY);
		entry->tv.bo = &entry->robj->tbo;
		entry->tv.shared = !entry->robj->prime_shared_count;

		if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_GDS)
			gds_obj = entry->robj;
		if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_GWS)
			gws_obj = entry->robj;
		if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_OA)
			oa_obj = entry->robj;

		total_size += amdgpu_bo_size(entry->robj);
		trace_amdgpu_bo_list_set(list, entry->robj);
	}

	for (i = 0; i < list->num_entries; ++i)
		amdgpu_bo_unref(&list->array[i].robj);

	kvfree(list->array);

	list->gds_obj = gds_obj;
	list->gws_obj = gws_obj;
	list->oa_obj = oa_obj;
	list->first_userptr = first_userptr;
	list->array = array;
	list->num_entries = num_entries;

	trace_amdgpu_cs_bo_status(list->num_entries, total_size);
	return 0;

error_free:
	while (i--)
		amdgpu_bo_unref(&array[i].robj);
	kvfree(array);
	return r;
}
Пример #3
0
static int amdgpu_bo_list_set(struct amdgpu_device *adev,
				     struct drm_file *filp,
				     struct amdgpu_bo_list *list,
				     struct drm_amdgpu_bo_list_entry *info,
				     unsigned num_entries)
{
	struct amdgpu_bo_list_entry *array;
	struct amdgpu_bo *gds_obj = adev->gds.gds_gfx_bo;
	struct amdgpu_bo *gws_obj = adev->gds.gws_gfx_bo;
	struct amdgpu_bo *oa_obj = adev->gds.oa_gfx_bo;

	bool has_userptr = false;
	unsigned i;

	array = drm_malloc_ab(num_entries, sizeof(struct amdgpu_bo_list_entry));
	if (!array)
		return -ENOMEM;
	memset(array, 0, num_entries * sizeof(struct amdgpu_bo_list_entry));

	for (i = 0; i < num_entries; ++i) {
		struct amdgpu_bo_list_entry *entry = &array[i];
		struct drm_gem_object *gobj;

		gobj = drm_gem_object_lookup(adev->ddev, filp, info[i].bo_handle);
		if (!gobj)
			goto error_free;

		entry->robj = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
		drm_gem_object_unreference_unlocked(gobj);
		entry->priority = info[i].bo_priority;
		entry->prefered_domains = entry->robj->initial_domain;
		entry->allowed_domains = entry->prefered_domains;
		if (entry->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
			entry->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
		if (amdgpu_ttm_tt_has_userptr(entry->robj->tbo.ttm)) {
			has_userptr = true;
			entry->prefered_domains = AMDGPU_GEM_DOMAIN_GTT;
			entry->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
		}
		entry->tv.bo = &entry->robj->tbo;
		entry->tv.shared = !entry->robj->prime_shared_count;

		if (entry->prefered_domains == AMDGPU_GEM_DOMAIN_GDS)
			gds_obj = entry->robj;
		if (entry->prefered_domains == AMDGPU_GEM_DOMAIN_GWS)
			gws_obj = entry->robj;
		if (entry->prefered_domains == AMDGPU_GEM_DOMAIN_OA)
			oa_obj = entry->robj;

		trace_amdgpu_bo_list_set(list, entry->robj);
	}

	for (i = 0; i < list->num_entries; ++i)
		amdgpu_bo_unref(&list->array[i].robj);

	drm_free_large(list->array);

	list->gds_obj = gds_obj;
	list->gws_obj = gws_obj;
	list->oa_obj = oa_obj;
	list->has_userptr = has_userptr;
	list->array = array;
	list->num_entries = num_entries;

	return 0;

error_free:
	drm_free_large(array);
	return -ENOENT;
}