Пример #1
0
void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
{
	struct ttm_bo_device *bdev = bo->bdev;
	struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];

	ttm_mem_io_lock(man, false);
	ttm_bo_unmap_virtual_locked(bo);
	ttm_mem_io_unlock(man);
}
Пример #2
0
static void ttm_bo_release(struct kref *kref)
{
	struct ttm_buffer_object *bo =
	    container_of(kref, struct ttm_buffer_object, kref);
	struct ttm_bo_device *bdev = bo->bdev;
	struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];

	if (likely(bo->vm_node != NULL)) {
		rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
		drm_mm_put_block(bo->vm_node);
		bo->vm_node = NULL;
	}
	write_unlock(&bdev->vm_lock);
	ttm_mem_io_lock(man, false);
	ttm_mem_io_free_vm(bo);
	ttm_mem_io_unlock(man);
	ttm_bo_cleanup_refs_or_queue(bo);
	kref_put(&bo->list_kref, ttm_bo_release_list);
	write_lock(&bdev->vm_lock);
}
Пример #3
0
static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
				  struct ttm_mem_reg *mem,
				  bool evict, bool interruptible,
				  bool no_wait_reserve, bool no_wait_gpu)
{
	struct ttm_bo_device *bdev = bo->bdev;
	bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
	bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
	struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
	struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
	int ret = 0;

	if (old_is_pci || new_is_pci ||
	    ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) {
		ret = ttm_mem_io_lock(old_man, true);
		if (unlikely(ret != 0))
			goto out_err;
		ttm_bo_unmap_virtual_locked(bo);
		ttm_mem_io_unlock(old_man);
	}

	/*
	 * Create and bind a ttm if required.
	 */

	if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && (bo->ttm == NULL)) {
		ret = ttm_bo_add_ttm(bo, false);
		if (ret)
			goto out_err;

		ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
		if (ret)
			goto out_err;

		if (mem->mem_type != TTM_PL_SYSTEM) {
			ret = ttm_tt_bind(bo->ttm, mem);
			if (ret)
				goto out_err;
		}

		if (bo->mem.mem_type == TTM_PL_SYSTEM) {
			if (bdev->driver->move_notify)
				bdev->driver->move_notify(bo, mem);
			bo->mem = *mem;
			mem->mm_node = NULL;
			goto moved;
		}
	}

	if (bdev->driver->move_notify)
		bdev->driver->move_notify(bo, mem);

	if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
	    !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
		ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, mem);
	else if (bdev->driver->move)
		ret = bdev->driver->move(bo, evict, interruptible,
					 no_wait_reserve, no_wait_gpu, mem);
	else
		ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, mem);

	if (ret)
		goto out_err;

moved:
	if (bo->evicted) {
		ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
		if (ret)
			printk(KERN_ERR TTM_PFX "Can not flush read caches\n");
		bo->evicted = false;
	}

	if (bo->mem.mm_node) {
		bo->offset = (bo->mem.start << PAGE_SHIFT) +
		    bdev->man[bo->mem.mem_type].gpu_offset;
		bo->cur_placement = bo->mem.placement;
	} else
		bo->offset = 0;

	return 0;

out_err:
	new_man = &bdev->man[bo->mem.mem_type];
	if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) {
		ttm_tt_unbind(bo->ttm);
		ttm_tt_destroy(bo->ttm);
		bo->ttm = NULL;
	}

	return ret;
}
Пример #4
0
static int
ttm_bo_vm_fault(vm_object_t vm_obj, vm_ooffset_t offset,
    int prot, vm_page_t *mres)
{

	struct ttm_buffer_object *bo = vm_obj->handle;
	struct ttm_bo_device *bdev = bo->bdev;
	struct ttm_tt *ttm = NULL;
	vm_page_t m, m1, oldm;
	int ret;
	int retval = VM_PAGER_OK;
	struct ttm_mem_type_manager *man =
		&bdev->man[bo->mem.mem_type];

	vm_object_pip_add(vm_obj, 1);
	oldm = *mres;
	if (oldm != NULL) {
		vm_page_lock(oldm);
		vm_page_remove(oldm);
		vm_page_unlock(oldm);
		*mres = NULL;
	} else
		oldm = NULL;
retry:
	VM_OBJECT_WUNLOCK(vm_obj);
	m = NULL;

reserve:
	ret = ttm_bo_reserve(bo, false, false, false, 0);
	if (unlikely(ret != 0)) {
		if (ret == -EBUSY) {
			kern_yield(0);
			goto reserve;
		}
	}

	if (bdev->driver->fault_reserve_notify) {
		ret = bdev->driver->fault_reserve_notify(bo);
		switch (ret) {
		case 0:
			break;
		case -EBUSY:
		case -ERESTART:
		case -EINTR:
			kern_yield(0);
			goto reserve;
		default:
			retval = VM_PAGER_ERROR;
			goto out_unlock;
		}
	}

	/*
	 * Wait for buffer data in transit, due to a pipelined
	 * move.
	 */

	mtx_lock(&bdev->fence_lock);
	if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) {
		/*
		 * Here, the behavior differs between Linux and FreeBSD.
		 *
		 * On Linux, the wait is interruptible (3rd argument to
		 * ttm_bo_wait). There must be some mechanism to resume
		 * page fault handling, once the signal is processed.
		 *
		 * On FreeBSD, the wait is uninteruptible. This is not a
		 * problem as we can't end up with an unkillable process
		 * here, because the wait will eventually time out.
		 *
		 * An example of this situation is the Xorg process
		 * which uses SIGALRM internally. The signal could
		 * interrupt the wait, causing the page fault to fail
		 * and the process to receive SIGSEGV.
		 */
		ret = ttm_bo_wait(bo, false, false, false);
		mtx_unlock(&bdev->fence_lock);
		if (unlikely(ret != 0)) {
			retval = VM_PAGER_ERROR;
			goto out_unlock;
		}
	} else
		mtx_unlock(&bdev->fence_lock);

	ret = ttm_mem_io_lock(man, true);
	if (unlikely(ret != 0)) {
		retval = VM_PAGER_ERROR;
		goto out_unlock;
	}
	ret = ttm_mem_io_reserve_vm(bo);
	if (unlikely(ret != 0)) {
		retval = VM_PAGER_ERROR;
		goto out_io_unlock;
	}

	/*
	 * Strictly, we're not allowed to modify vma->vm_page_prot here,
	 * since the mmap_sem is only held in read mode. However, we
	 * modify only the caching bits of vma->vm_page_prot and
	 * consider those bits protected by
	 * the bo->mutex, as we should be the only writers.
	 * There shouldn't really be any readers of these bits except
	 * within vm_insert_mixed()? fork?
	 *
	 * TODO: Add a list of vmas to the bo, and change the
	 * vma->vm_page_prot when the object changes caching policy, with
	 * the correct locks held.
	 */
	if (!bo->mem.bus.is_iomem) {
		/* Allocate all page at once, most common usage */
		ttm = bo->ttm;
		if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
			retval = VM_PAGER_ERROR;
			goto out_io_unlock;
		}
	}

	if (bo->mem.bus.is_iomem) {
		m = PHYS_TO_VM_PAGE(bo->mem.bus.base + bo->mem.bus.offset +
		    offset);
		KASSERT((m->flags & PG_FICTITIOUS) != 0,
		    ("physical address %#jx not fictitious",
		    (uintmax_t)(bo->mem.bus.base + bo->mem.bus.offset
		    + offset)));
		pmap_page_set_memattr(m, ttm_io_prot(bo->mem.placement));
	} else {
		ttm = bo->ttm;
		m = ttm->pages[OFF_TO_IDX(offset)];
		if (unlikely(!m)) {
			retval = VM_PAGER_ERROR;
			goto out_io_unlock;
		}
		pmap_page_set_memattr(m,
		    (bo->mem.placement & TTM_PL_FLAG_CACHED) ?
		    VM_MEMATTR_WRITE_BACK : ttm_io_prot(bo->mem.placement));
	}

	VM_OBJECT_WLOCK(vm_obj);
	if (vm_page_busied(m)) {
		vm_page_lock(m);
		VM_OBJECT_WUNLOCK(vm_obj);
		vm_page_busy_sleep(m, "ttmpbs");
		VM_OBJECT_WLOCK(vm_obj);
		ttm_mem_io_unlock(man);
		ttm_bo_unreserve(bo);
		goto retry;
	}
	m1 = vm_page_lookup(vm_obj, OFF_TO_IDX(offset));
	if (m1 == NULL) {
		if (vm_page_insert(m, vm_obj, OFF_TO_IDX(offset))) {
			VM_OBJECT_WUNLOCK(vm_obj);
			VM_WAIT;
			VM_OBJECT_WLOCK(vm_obj);
			ttm_mem_io_unlock(man);
			ttm_bo_unreserve(bo);
			goto retry;
		}
	} else {
		KASSERT(m == m1,
		    ("inconsistent insert bo %p m %p m1 %p offset %jx",
		    bo, m, m1, (uintmax_t)offset));
	}
	m->valid = VM_PAGE_BITS_ALL;
	*mres = m;
	vm_page_xbusy(m);

	if (oldm != NULL) {
		vm_page_lock(oldm);
		vm_page_free(oldm);
		vm_page_unlock(oldm);
	}

out_io_unlock1:
	ttm_mem_io_unlock(man);
out_unlock1:
	ttm_bo_unreserve(bo);
	vm_object_pip_wakeup(vm_obj);
	return (retval);

out_io_unlock:
	VM_OBJECT_WLOCK(vm_obj);
	goto out_io_unlock1;

out_unlock:
	VM_OBJECT_WLOCK(vm_obj);
	goto out_unlock1;
}