示例#1
0
void
ttm_bo_release_mmap(struct ttm_buffer_object *bo)
{
	vm_object_t vm_obj;
	vm_page_t m;
	int i;

	vm_obj = cdev_pager_lookup(bo);
	if (vm_obj == NULL)
		return;

	VM_OBJECT_WLOCK(vm_obj);
retry:
	for (i = 0; i < bo->num_pages; i++) {
		m = vm_page_lookup(vm_obj, i);
		if (m == NULL)
			continue;
		if (vm_page_sleep_if_busy(m, "ttm_unm"))
			goto retry;
		cdev_pager_free_page(vm_obj, m);
	}
	VM_OBJECT_WUNLOCK(vm_obj);

	vm_object_deallocate(vm_obj);
}
示例#2
0
static void
pscnv_gem_pager_dtor(void *handle)
{
	struct drm_gem_object *gem_obj = handle;
	struct pscnv_bo *bo = gem_obj->driver_private;
	struct drm_device *dev = gem_obj->dev;
	vm_object_t devobj;

	DRM_LOCK(dev);
	devobj = cdev_pager_lookup(handle);

	if (devobj != NULL) {
		vm_size_t page_count = OFF_TO_IDX(bo->size);
		vm_page_t m;
		int i;
		VM_OBJECT_LOCK(devobj);
		for (i = 0; i < page_count; i++) {
			m = vm_page_lookup(devobj, i);
			if (!m)
				continue;
			if (pscnv_mem_debug > 0)
				NV_WARN(dev, "Freeing %010llx + %08llx (%p\n", bo->start, i * PAGE_SIZE, m);
			cdev_pager_free_page(devobj, m);
		}
		VM_OBJECT_UNLOCK(devobj);
		vm_object_deallocate(devobj);
	}
	else {
		DRM_UNLOCK(dev);
		NV_ERROR(dev, "Could not find handle %p bo %p\n", handle, bo);
		return;
	}
	if (pscnv_mem_debug > 0)
		NV_WARN(dev, "Freed %010llx (%p)\n", bo->start, bo);
	//kfree(bo->fake_pages);

	if (bo->chan)
		pscnv_chan_unref(bo->chan);
	else
		drm_gem_object_unreference_unlocked(gem_obj);
	DRM_UNLOCK(dev);
}
示例#3
0
void
ttm_bo_release_mmap(struct ttm_buffer_object *bo)
{
	vm_object_t vm_obj;
	vm_page_t m;
	int i;

	vm_obj = cdev_pager_lookup(bo);
	if (vm_obj == NULL)
		return;

	VM_OBJECT_WLOCK(vm_obj);
	for (i = 0; i < bo->num_pages; i++) {
		m = vm_page_lookup_busy_wait(vm_obj, i, TRUE, "ttm_unm");
		if (m == NULL)
			continue;
		cdev_pager_free_page(vm_obj, m);
	}
	VM_OBJECT_WUNLOCK(vm_obj);

	vm_object_deallocate(vm_obj);
}
示例#4
0
static void
privcmd_pg_dtor(void *handle)
{
	struct xen_remove_from_physmap rm = { .domid = DOMID_SELF };
	struct privcmd_map *map = handle;
	int error;
	vm_size_t i;
	vm_page_t m;

	/*
	 * Remove the mappings from the used pages. This will remove the
	 * underlying p2m bindings in Xen second stage translation.
	 */
	if (map->mapped == true) {
		VM_OBJECT_WLOCK(map->mem);
retry:
		for (i = 0; i < map->size; i++) {
			m = vm_page_lookup(map->mem, i);
			if (m == NULL)
				continue;
			if (vm_page_sleep_if_busy(m, "pcmdum"))
				goto retry;
			cdev_pager_free_page(map->mem, m);
		}
		VM_OBJECT_WUNLOCK(map->mem);

		for (i = 0; i < map->size; i++) {
			rm.gpfn = atop(map->phys_base_addr) + i;
			HYPERVISOR_memory_op(XENMEM_remove_from_physmap, &rm);
		}
		free(map->err, M_PRIVCMD);
	}

	error = xenmem_free(privcmd_dev, map->pseudo_phys_res_id,
	    map->pseudo_phys_res);
	KASSERT(error == 0, ("Unable to release memory resource: %d", error));

	free(map, M_PRIVCMD);
}