Beispiel #1
0
static int
ttm_bo_vm_fault(vm_object_t vm_obj, vm_ooffset_t offset,
    int prot, vm_page_t *mres)
{
	struct ttm_buffer_object *bo = vm_obj->handle;
	struct ttm_bo_device *bdev = bo->bdev;
	struct ttm_tt *ttm = NULL;
	vm_page_t m, m1, oldm;
	int ret;
	int retval = VM_PAGER_OK;
	struct ttm_mem_type_manager *man =
		&bdev->man[bo->mem.mem_type];

	vm_object_pip_add(vm_obj, 1);
	oldm = *mres;
	if (oldm != NULL) {
		vm_page_remove(oldm);
		*mres = NULL;
	} else
		oldm = NULL;
retry:
	VM_OBJECT_WUNLOCK(vm_obj);
	m = NULL;

reserve:
	ret = ttm_bo_reserve(bo, false, false, false, 0);
	if (unlikely(ret != 0)) {
		if (ret == -EBUSY) {
			lwkt_yield();
			goto reserve;
		}
	}

	if (bdev->driver->fault_reserve_notify) {
		ret = bdev->driver->fault_reserve_notify(bo);
		switch (ret) {
		case 0:
			break;
		case -EBUSY:
		case -ERESTARTSYS:
		case -EINTR:
			lwkt_yield();
			goto reserve;
		default:
			retval = VM_PAGER_ERROR;
			goto out_unlock;
		}
	}

	/*
	 * Wait for buffer data in transit, due to a pipelined
	 * move.
	 */

	lockmgr(&bdev->fence_lock, LK_EXCLUSIVE);
	if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) {
		/*
		 * Here, the behavior differs between Linux and FreeBSD.
		 *
		 * On Linux, the wait is interruptible (3rd argument to
		 * ttm_bo_wait). There must be some mechanism to resume
		 * page fault handling, once the signal is processed.
		 *
		 * On FreeBSD, the wait is uninteruptible. This is not a
		 * problem as we can't end up with an unkillable process
		 * here, because the wait will eventually time out.
		 *
		 * An example of this situation is the Xorg process
		 * which uses SIGALRM internally. The signal could
		 * interrupt the wait, causing the page fault to fail
		 * and the process to receive SIGSEGV.
		 */
		ret = ttm_bo_wait(bo, false, false, false);
		lockmgr(&bdev->fence_lock, LK_RELEASE);
		if (unlikely(ret != 0)) {
			retval = VM_PAGER_ERROR;
			goto out_unlock;
		}
	} else
		lockmgr(&bdev->fence_lock, LK_RELEASE);

	ret = ttm_mem_io_lock(man, true);
	if (unlikely(ret != 0)) {
		retval = VM_PAGER_ERROR;
		goto out_unlock;
	}
	ret = ttm_mem_io_reserve_vm(bo);
	if (unlikely(ret != 0)) {
		retval = VM_PAGER_ERROR;
		goto out_io_unlock;
	}

	/*
	 * Strictly, we're not allowed to modify vma->vm_page_prot here,
	 * since the mmap_sem is only held in read mode. However, we
	 * modify only the caching bits of vma->vm_page_prot and
	 * consider those bits protected by
	 * the bo->mutex, as we should be the only writers.
	 * There shouldn't really be any readers of these bits except
	 * within vm_insert_mixed()? fork?
	 *
	 * TODO: Add a list of vmas to the bo, and change the
	 * vma->vm_page_prot when the object changes caching policy, with
	 * the correct locks held.
	 */
	if (!bo->mem.bus.is_iomem) {
		/* Allocate all page at once, most common usage */
		ttm = bo->ttm;
		if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
			retval = VM_PAGER_ERROR;
			goto out_io_unlock;
		}
	}

	if (bo->mem.bus.is_iomem) {
		m = vm_phys_fictitious_to_vm_page(bo->mem.bus.base +
		    bo->mem.bus.offset + offset);
		pmap_page_set_memattr(m, ttm_io_prot(bo->mem.placement));
	} else {
		ttm = bo->ttm;
		m = ttm->pages[OFF_TO_IDX(offset)];
		if (unlikely(!m)) {
			retval = VM_PAGER_ERROR;
			goto out_io_unlock;
		}
		pmap_page_set_memattr(m,
		    (bo->mem.placement & TTM_PL_FLAG_CACHED) ?
		    VM_MEMATTR_WRITE_BACK : ttm_io_prot(bo->mem.placement));
	}

	VM_OBJECT_WLOCK(vm_obj);
	if ((m->flags & PG_BUSY) != 0) {
#if 0
		vm_page_sleep(m, "ttmpbs");
#endif
		ttm_mem_io_unlock(man);
		ttm_bo_unreserve(bo);
		goto retry;
	}
	m->valid = VM_PAGE_BITS_ALL;
	*mres = m;
	m1 = vm_page_lookup(vm_obj, OFF_TO_IDX(offset));
	if (m1 == NULL) {
		vm_page_insert(m, vm_obj, OFF_TO_IDX(offset));
	} else {
		KASSERT(m == m1,
		    ("inconsistent insert bo %p m %p m1 %p offset %jx",
		    bo, m, m1, (uintmax_t)offset));
	}
	vm_page_busy_try(m, FALSE);

	if (oldm != NULL) {
		vm_page_free(oldm);
	}

out_io_unlock1:
	ttm_mem_io_unlock(man);
out_unlock1:
	ttm_bo_unreserve(bo);
	vm_object_pip_wakeup(vm_obj);
	return (retval);

out_io_unlock:
	VM_OBJECT_WLOCK(vm_obj);
	goto out_io_unlock1;

out_unlock:
	VM_OBJECT_WLOCK(vm_obj);
	goto out_unlock1;
}
Beispiel #2
0
static int
pscnv_gem_pager_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot,
    vm_page_t *mres)
{
	struct drm_gem_object *gem_obj = vm_obj->handle;
	struct pscnv_bo *bo = gem_obj->driver_private;
	struct drm_device *dev = gem_obj->dev;
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	vm_page_t m = NULL;
	vm_page_t oldm;
	vm_memattr_t mattr;
	vm_paddr_t paddr;
	const char *what;

	if (bo->chan) {
		paddr = dev_priv->fb_phys + offset +
			nvc0_fifo_ctrl_offs(dev, bo->chan->cid);
		mattr = VM_MEMATTR_UNCACHEABLE;
		what = "fifo";
	} else switch (bo->flags & PSCNV_GEM_MEMTYPE_MASK) {
	case PSCNV_GEM_VRAM_SMALL:
	case PSCNV_GEM_VRAM_LARGE:
		paddr = dev_priv->fb_phys + bo->map1->start + offset;
		mattr = VM_MEMATTR_WRITE_COMBINING;
		what = "vram";
		break;
	case PSCNV_GEM_SYSRAM_SNOOP:
	case PSCNV_GEM_SYSRAM_NOSNOOP:
		paddr = bo->dmapages[OFF_TO_IDX(offset)];
		mattr = VM_MEMATTR_WRITE_BACK;
		what = "sysram";
		break;
	default: return (EINVAL);
	}

	if (offset >= bo->size) {
		if (pscnv_mem_debug > 0)
			NV_WARN(dev, "Reading %p + %08llx (%s) is past max size %08llx\n",
				bo, offset, what, bo->size);
		return (VM_PAGER_ERROR);
	}
	DRM_LOCK(dev);
	if (pscnv_mem_debug > 0)
		NV_WARN(dev, "Connecting %p+%08llx (%s) at phys %010llx\n",
			bo, offset, what, paddr);
	vm_object_pip_add(vm_obj, 1);

	if (*mres != NULL) {
		oldm = *mres;
		vm_page_lock(oldm);
		vm_page_remove(oldm);
		vm_page_unlock(oldm);
		*mres = NULL;
	} else
		oldm = NULL;
	//VM_OBJECT_LOCK(vm_obj);
	m = vm_phys_fictitious_to_vm_page(paddr);
	if (m == NULL) {
		DRM_UNLOCK(dev);
		return -EFAULT;
	}
	KASSERT((m->flags & PG_FICTITIOUS) != 0,
	    ("not fictitious %p", m));
	KASSERT(m->wire_count == 1, ("wire_count not 1 %p", m));

	if ((m->flags & VPO_BUSY) != 0) {
		DRM_UNLOCK(dev);
		return -EFAULT;
	}
	pmap_page_set_memattr(m, mattr);
	m->valid = VM_PAGE_BITS_ALL;
	*mres = m;
	vm_page_lock(m);
	vm_page_insert(m, vm_obj, OFF_TO_IDX(offset));
	vm_page_unlock(m);
	vm_page_busy(m);

	printf("fault %p %jx %x phys %x", gem_obj, offset, prot,
	    m->phys_addr);
	DRM_UNLOCK(dev);
	if (oldm != NULL) {
		vm_page_lock(oldm);
		vm_page_free(oldm);
		vm_page_unlock(oldm);
	}
	vm_object_pip_wakeup(vm_obj);
	return (VM_PAGER_OK);
}