Пример #1
0
/*
 * Fill as many pages as vm_fault has allocated for us.
 */
static int
phys_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage)
{
	int i;

	VM_OBJECT_ASSERT_WLOCKED(object);
	for (i = 0; i < count; i++) {
		if (m[i]->valid == 0) {
			if ((m[i]->flags & PG_ZERO) == 0)
				pmap_zero_page(m[i]);
			m[i]->valid = VM_PAGE_BITS_ALL;
		}
		KASSERT(m[i]->valid == VM_PAGE_BITS_ALL,
		    ("phys_pager_getpages: partially valid page %p", m[i]));
		KASSERT(m[i]->dirty == 0,
		    ("phys_pager_getpages: dirty page %p", m[i]));
		/* The requested page must remain busy, the others not. */
		if (i == reqpage) {
			vm_page_lock(m[i]);
			vm_page_flash(m[i]);
			vm_page_unlock(m[i]);
		} else
			vm_page_xunbusy(m[i]);
	}
	return (VM_PAGER_OK);
}
Пример #2
0
int ttm_tt_swapin(struct ttm_tt *ttm)
{
	vm_object_t obj;
	vm_page_t from_page, to_page;
	int i, ret, rv;

	obj = ttm->swap_storage;

	VM_OBJECT_WLOCK(obj);
	vm_object_pip_add(obj, 1);
	for (i = 0; i < ttm->num_pages; ++i) {
		from_page = vm_page_grab(obj, i, VM_ALLOC_NORMAL);
		if (from_page->valid != VM_PAGE_BITS_ALL) {
			if (vm_pager_has_page(obj, i, NULL, NULL)) {
				rv = vm_pager_get_pages(obj, &from_page, 1, 0);
				if (rv != VM_PAGER_OK) {
					vm_page_lock(from_page);
					vm_page_free(from_page);
					vm_page_unlock(from_page);
					ret = -EIO;
					goto err_ret;
				}
			} else
				vm_page_zero_invalid(from_page, TRUE);
		}
		vm_page_xunbusy(from_page);
		to_page = ttm->pages[i];
		if (unlikely(to_page == NULL)) {
			ret = -ENOMEM;
			goto err_ret;
		}
		pmap_copy_page(from_page, to_page);
	}
	vm_object_pip_wakeup(obj);
	VM_OBJECT_WUNLOCK(obj);

	if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP))
		vm_object_deallocate(obj);
	ttm->swap_storage = NULL;
	ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
	return (0);

err_ret:
	vm_object_pip_wakeup(obj);
	VM_OBJECT_WUNLOCK(obj);
	return (ret);
}
Пример #3
0
int ttm_tt_swapout(struct ttm_tt *ttm, vm_object_t persistent_swap_storage)
{
	vm_object_t obj;
	vm_page_t from_page, to_page;
	int i;

	MPASS(ttm->state == tt_unbound || ttm->state == tt_unpopulated);
	MPASS(ttm->caching_state == tt_cached);

	if (persistent_swap_storage == NULL) {
		obj = vm_pager_allocate(OBJT_SWAP, NULL,
		    IDX_TO_OFF(ttm->num_pages), VM_PROT_DEFAULT, 0,
		    curthread->td_ucred);
		if (obj == NULL) {
			printf("[TTM] Failed allocating swap storage\n");
			return (-ENOMEM);
		}
	} else
		obj = persistent_swap_storage;

	VM_OBJECT_WLOCK(obj);
	vm_object_pip_add(obj, 1);
	for (i = 0; i < ttm->num_pages; ++i) {
		from_page = ttm->pages[i];
		if (unlikely(from_page == NULL))
			continue;
		to_page = vm_page_grab(obj, i, VM_ALLOC_NORMAL);
		pmap_copy_page(from_page, to_page);
		to_page->valid = VM_PAGE_BITS_ALL;
		vm_page_dirty(to_page);
		vm_page_xunbusy(to_page);
	}
	vm_object_pip_wakeup(obj);
	VM_OBJECT_WUNLOCK(obj);

	ttm->bdev->driver->ttm_tt_unpopulate(ttm);
	ttm->swap_storage = obj;
	ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
	if (persistent_swap_storage != NULL)
		ttm->page_flags |= TTM_PAGE_FLAG_PERSISTENT_SWAP;
	return (0);
}
Пример #4
0
int
exec_map_first_page(struct image_params *imgp)
{
	int rv, i, after, initial_pagein;
	vm_page_t ma[VM_INITIAL_PAGEIN];
	vm_object_t object;

	if (imgp->firstpage != NULL)
		exec_unmap_first_page(imgp);

	object = imgp->vp->v_object;
	if (object == NULL)
		return (EACCES);
	VM_OBJECT_WLOCK(object);
#if VM_NRESERVLEVEL > 0
	vm_object_color(object, 0);
#endif
	ma[0] = vm_page_grab(object, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY);
	if (ma[0]->valid != VM_PAGE_BITS_ALL) {
		vm_page_xbusy(ma[0]);
		if (!vm_pager_has_page(object, 0, NULL, &after)) {
			vm_page_lock(ma[0]);
			vm_page_free(ma[0]);
			vm_page_unlock(ma[0]);
			VM_OBJECT_WUNLOCK(object);
			return (EIO);
		}
		initial_pagein = min(after, VM_INITIAL_PAGEIN);
		KASSERT(initial_pagein <= object->size,
		    ("%s: initial_pagein %d object->size %ju",
		    __func__, initial_pagein, (uintmax_t )object->size));
		for (i = 1; i < initial_pagein; i++) {
			if ((ma[i] = vm_page_next(ma[i - 1])) != NULL) {
				if (ma[i]->valid)
					break;
				if (!vm_page_tryxbusy(ma[i]))
					break;
			} else {
				ma[i] = vm_page_alloc(object, i,
				    VM_ALLOC_NORMAL);
				if (ma[i] == NULL)
					break;
			}
		}
		initial_pagein = i;
		rv = vm_pager_get_pages(object, ma, initial_pagein, NULL, NULL);
		if (rv != VM_PAGER_OK) {
			for (i = 0; i < initial_pagein; i++) {
				vm_page_lock(ma[i]);
				vm_page_free(ma[i]);
				vm_page_unlock(ma[i]);
			}
			VM_OBJECT_WUNLOCK(object);
			return (EIO);
		}
		vm_page_xunbusy(ma[0]);
		for (i = 1; i < initial_pagein; i++)
			vm_page_readahead_finish(ma[i]);
	}
	vm_page_lock(ma[0]);
	vm_page_hold(ma[0]);
	vm_page_activate(ma[0]);
	vm_page_unlock(ma[0]);
	VM_OBJECT_WUNLOCK(object);

	imgp->firstpage = sf_buf_alloc(ma[0], 0);
	imgp->image_header = (char *)sf_buf_kva(imgp->firstpage);

	return (0);
}
Пример #5
0
static int
shm_dotruncate(struct shmfd *shmfd, off_t length)
{
	vm_object_t object;
	vm_page_t m, ma[1];
	vm_pindex_t idx, nobjsize;
	vm_ooffset_t delta;
	int base, rv;

	object = shmfd->shm_object;
	VM_OBJECT_WLOCK(object);
	if (length == shmfd->shm_size) {
		VM_OBJECT_WUNLOCK(object);
		return (0);
	}
	nobjsize = OFF_TO_IDX(length + PAGE_MASK);

	/* Are we shrinking?  If so, trim the end. */
	if (length < shmfd->shm_size) {
		/*
		 * Disallow any requests to shrink the size if this
		 * object is mapped into the kernel.
		 */
		if (shmfd->shm_kmappings > 0) {
			VM_OBJECT_WUNLOCK(object);
			return (EBUSY);
		}

		/*
		 * Zero the truncated part of the last page.
		 */
		base = length & PAGE_MASK;
		if (base != 0) {
			idx = OFF_TO_IDX(length);
retry:
			m = vm_page_lookup(object, idx);
			if (m != NULL) {
				if (vm_page_sleep_if_busy(m, "shmtrc"))
					goto retry;
			} else if (vm_pager_has_page(object, idx, NULL, NULL)) {
				m = vm_page_alloc(object, idx, VM_ALLOC_NORMAL);
				if (m == NULL) {
					VM_OBJECT_WUNLOCK(object);
					VM_WAIT;
					VM_OBJECT_WLOCK(object);
					goto retry;
				} else if (m->valid != VM_PAGE_BITS_ALL) {
					ma[0] = m;
					rv = vm_pager_get_pages(object, ma, 1,
					    0);
					m = vm_page_lookup(object, idx);
				} else
					/* A cached page was reactivated. */
					rv = VM_PAGER_OK;
				vm_page_lock(m);
				if (rv == VM_PAGER_OK) {
					vm_page_deactivate(m);
					vm_page_unlock(m);
					vm_page_xunbusy(m);
				} else {
					vm_page_free(m);
					vm_page_unlock(m);
					VM_OBJECT_WUNLOCK(object);
					return (EIO);
				}
			}
			if (m != NULL) {
				pmap_zero_page_area(m, base, PAGE_SIZE - base);
				KASSERT(m->valid == VM_PAGE_BITS_ALL,
				    ("shm_dotruncate: page %p is invalid", m));
				vm_page_dirty(m);
				vm_pager_page_unswapped(m);
			}
		}
		delta = ptoa(object->size - nobjsize);

		/* Toss in memory pages. */
		if (nobjsize < object->size)
			vm_object_page_remove(object, nobjsize, object->size,
			    0);

		/* Toss pages from swap. */
		if (object->type == OBJT_SWAP)
			swap_pager_freespace(object, nobjsize, delta);

		/* Free the swap accounted for shm */
		swap_release_by_cred(delta, object->cred);
		object->charge -= delta;
	} else {
		/* Attempt to reserve the swap */
		delta = ptoa(nobjsize - object->size);
		if (!swap_reserve_by_cred(delta, object->cred)) {
			VM_OBJECT_WUNLOCK(object);
			return (ENOMEM);
		}
		object->charge += delta;
	}
	shmfd->shm_size = length;
	mtx_lock(&shm_timestamp_lock);
	vfs_timestamp(&shmfd->shm_ctime);
	shmfd->shm_mtime = shmfd->shm_ctime;
	mtx_unlock(&shm_timestamp_lock);
	object->size = nobjsize;
	VM_OBJECT_WUNLOCK(object);
	return (0);
}
Пример #6
0
static int
uiomove_object_page(vm_object_t obj, size_t len, struct uio *uio)
{
	vm_page_t m;
	vm_pindex_t idx;
	size_t tlen;
	int error, offset, rv;

	idx = OFF_TO_IDX(uio->uio_offset);
	offset = uio->uio_offset & PAGE_MASK;
	tlen = MIN(PAGE_SIZE - offset, len);

	VM_OBJECT_WLOCK(obj);

	/*
	 * Parallel reads of the page content from disk are prevented
	 * by exclusive busy.
	 *
	 * Although the tmpfs vnode lock is held here, it is
	 * nonetheless safe to sleep waiting for a free page.  The
	 * pageout daemon does not need to acquire the tmpfs vnode
	 * lock to page out tobj's pages because tobj is a OBJT_SWAP
	 * type object.
	 */
	m = vm_page_grab(obj, idx, VM_ALLOC_NORMAL);
	if (m->valid != VM_PAGE_BITS_ALL) {
		if (vm_pager_has_page(obj, idx, NULL, NULL)) {
			rv = vm_pager_get_pages(obj, &m, 1, 0);
			m = vm_page_lookup(obj, idx);
			if (m == NULL) {
				printf(
		    "uiomove_object: vm_obj %p idx %jd null lookup rv %d\n",
				    obj, idx, rv);
				VM_OBJECT_WUNLOCK(obj);
				return (EIO);
			}
			if (rv != VM_PAGER_OK) {
				printf(
	    "uiomove_object: vm_obj %p idx %jd valid %x pager error %d\n",
				    obj, idx, m->valid, rv);
				vm_page_lock(m);
				vm_page_free(m);
				vm_page_unlock(m);
				VM_OBJECT_WUNLOCK(obj);
				return (EIO);
			}
		} else
			vm_page_zero_invalid(m, TRUE);
	}
	vm_page_xunbusy(m);
	vm_page_lock(m);
	vm_page_hold(m);
	vm_page_unlock(m);
	VM_OBJECT_WUNLOCK(obj);
	error = uiomove_fromphys(&m, offset, tlen, uio);
	if (uio->uio_rw == UIO_WRITE && error == 0) {
		VM_OBJECT_WLOCK(obj);
		vm_page_dirty(m);
		VM_OBJECT_WUNLOCK(obj);
	}
	vm_page_lock(m);
	vm_page_unhold(m);
	if (m->queue == PQ_NONE) {
		vm_page_deactivate(m);
	} else {
		/* Requeue to maintain LRU ordering. */
		vm_page_requeue(m);
	}
	vm_page_unlock(m);

	return (error);
}