Example #1
0
void
rumptest_busypage()
{
	struct lwp *newl;
	int rv;

	cv_init(&tcv, "napina");

	uobj = uao_create(1, 0);
	mutex_enter(uobj->vmobjlock);
	testpg = uvm_pagealloc(uobj, 0, NULL, 0);
	mutex_exit(uobj->vmobjlock);
	if (testpg == NULL)
		panic("couldn't create vm page");

	rv = kthread_create(PRI_NONE, KTHREAD_MUSTJOIN | KTHREAD_MPSAFE, NULL,
	    thread, NULL, &newl, "jointest");
	if (rv)
		panic("thread creation failed: %d", rv);

	mutex_enter(uobj->vmobjlock);
	while (!threadrun)
		cv_wait(&tcv, uobj->vmobjlock);

	uvm_page_unbusy(&testpg, 1);
	mutex_exit(uobj->vmobjlock);

	rv = kthread_join(newl);
	if (rv)
		panic("thread join failed: %d", rv);

}
Example #2
0
int
uvm_objwire(struct uvm_object *uobj, voff_t start, voff_t end,
    struct pglist *pageq)
{
	int i, npages, left, error;
	struct vm_page *pgs[FETCH_PAGECOUNT];
	voff_t offset = start;

	left = (end - start) >> PAGE_SHIFT;

	while (left) {

		npages = MIN(FETCH_PAGECOUNT, left);

		/* Get the pages */
		memset(pgs, 0, sizeof(pgs));
		error = (*uobj->pgops->pgo_get)(uobj, offset, pgs, &npages, 0,
			PROT_READ | PROT_WRITE, MADV_SEQUENTIAL,
			PGO_ALLPAGES | PGO_SYNCIO);

		if (error)
			goto error;

		for (i = 0; i < npages; i++) {

			KASSERT(pgs[i] != NULL);
			KASSERT(!(pgs[i]->pg_flags & PG_RELEASED));

			if (pgs[i]->pg_flags & PQ_AOBJ) {
				atomic_clearbits_int(&pgs[i]->pg_flags,
				    PG_CLEAN);
				uao_dropswap(uobj, i);
			}
		}

		/* Wire the pages */
		uvm_lock_pageq();
		for (i = 0; i < npages; i++) {
			uvm_pagewire(pgs[i]);
			if (pageq != NULL)
				TAILQ_INSERT_TAIL(pageq, pgs[i], pageq);
		}
		uvm_unlock_pageq();

		/* Unbusy the pages */
		uvm_page_unbusy(pgs, npages);

		left -= npages;
		offset += (voff_t)npages << PAGE_SHIFT;
	}

	return 0;

error:
	/* Unwire the pages which have been wired */
	uvm_objunwire(uobj, start, offset);

	return error;
}
Example #3
0
/*
 * uvm_loanpage: loan out pages to kernel (->K)
 *
 * => pages should be object-owned and the object should be locked.
 * => in the case of error, the object might be unlocked and relocked.
 * => caller should busy the pages beforehand.
 * => pages will be unbusied.
 * => fail with EBUSY if meet a wired page.
 */
static int
uvm_loanpage(struct vm_page **pgpp, int npages)
{
	int i;
	int error = 0;

	UVMHIST_FUNC(__func__); UVMHIST_CALLED(loanhist);

	for (i = 0; i < npages; i++) {
		struct vm_page *pg = pgpp[i];

		KASSERT(pg->uobject != NULL);
		KASSERT(pg->uobject == pgpp[0]->uobject);
		KASSERT(!(pg->flags & (PG_RELEASED|PG_PAGEOUT)));
		KASSERT(mutex_owned(&pg->uobject->vmobjlock));
		KASSERT(pg->flags & PG_BUSY);

		mutex_enter(&uvm_pageqlock);
		if (pg->wire_count > 0) {
			mutex_exit(&uvm_pageqlock);
			UVMHIST_LOG(loanhist, "wired %p", pg,0,0,0);
			error = EBUSY;
			break;
		}
		if (pg->loan_count == 0) {
			pmap_page_protect(pg, VM_PROT_READ);
		}
		pg->loan_count++;
		uvm_pageactivate(pg);
		mutex_exit(&uvm_pageqlock);
	}

	uvm_page_unbusy(pgpp, npages);

	if (error) {
		/*
		 * backout what we've done
		 */
		kmutex_t *slock = &pgpp[0]->uobject->vmobjlock;

		mutex_exit(slock);
		uvm_unloan(pgpp, i, UVM_LOAN_TOPAGE);
		mutex_enter(slock);
	}

	UVMHIST_LOG(loanhist, "done %d", error,0,0,0);
	return error;
}
Example #4
0
/*
 * uvm_loanuobjpages: loan pages from a uobj out (O->K)
 *
 * => uobj shouldn't be locked.  (we'll lock it)
 * => fail with EBUSY if we meet a wired page.
 */
int
uvm_loanuobjpages(struct uvm_object *uobj, voff_t pgoff, int orignpages,
    struct vm_page **origpgpp)
{
	int ndone; /* # of pages loaned out */
	struct vm_page **pgpp;
	int error;
	int i;
	kmutex_t *slock;

	pgpp = origpgpp;
	for (ndone = 0; ndone < orignpages; ) {
		int npages;
		/* npendloan: # of pages busied but not loand out yet. */
		int npendloan = 0xdead; /* XXX gcc */
reget:
		npages = MIN(UVM_LOAN_GET_CHUNK, orignpages - ndone);
		mutex_enter(&uobj->vmobjlock);
		error = (*uobj->pgops->pgo_get)(uobj,
		    pgoff + (ndone << PAGE_SHIFT), pgpp, &npages, 0,
		    VM_PROT_READ, 0, PGO_SYNCIO);
		if (error == EAGAIN) {
			tsleep(&lbolt, PVM, "nfsread", 0);
			continue;
		}
		if (error)
			goto fail;

		KASSERT(npages > 0);

		/* loan and unbusy pages */
		slock = NULL;
		for (i = 0; i < npages; i++) {
			kmutex_t *nextslock; /* slock for next page */
			struct vm_page *pg = *pgpp;

			/* XXX assuming that the page is owned by uobj */
			KASSERT(pg->uobject != NULL);
			nextslock = &pg->uobject->vmobjlock;

			if (slock != nextslock) {
				if (slock) {
					KASSERT(npendloan > 0);
					error = uvm_loanpage(pgpp - npendloan,
					    npendloan);
					mutex_exit(slock);
					if (error)
						goto fail;
					ndone += npendloan;
					KASSERT(origpgpp + ndone == pgpp);
				}
				slock = nextslock;
				npendloan = 0;
				mutex_enter(slock);
			}

			if ((pg->flags & PG_RELEASED) != 0) {
				/*
				 * release pages and try again.
				 */
				mutex_exit(slock);
				for (; i < npages; i++) {
					pg = pgpp[i];
					slock = &pg->uobject->vmobjlock;

					mutex_enter(slock);
					mutex_enter(&uvm_pageqlock);
					uvm_page_unbusy(&pg, 1);
					mutex_exit(&uvm_pageqlock);
					mutex_exit(slock);
				}
				goto reget;
			}

			npendloan++;
			pgpp++;
			KASSERT(origpgpp + ndone + npendloan == pgpp);
		}
		KASSERT(slock != NULL);
		KASSERT(npendloan > 0);
		error = uvm_loanpage(pgpp - npendloan, npendloan);
		mutex_exit(slock);
		if (error)
			goto fail;
		ndone += npendloan;
		KASSERT(origpgpp + ndone == pgpp);
	}

	return 0;

fail:
	uvm_unloan(origpgpp, ndone, UVM_LOAN_TOPAGE);

	return error;
}
Example #5
0
int
ufs_balloc_range(struct vnode *vp, off_t off, off_t len, kauth_cred_t cred,
    int flags)
{
	off_t neweof;	/* file size after the operation */
	off_t neweob;	/* offset next to the last block after the operation */
	off_t pagestart; /* starting offset of range covered by pgs */
	off_t eob;	/* offset next to allocated blocks */
	struct uvm_object *uobj;
	int i, delta, error, npages;
	int bshift = vp->v_mount->mnt_fs_bshift;
	int bsize = 1 << bshift;
	int ppb = MAX(bsize >> PAGE_SHIFT, 1);
	struct vm_page **pgs;
	size_t pgssize;
	UVMHIST_FUNC("ufs_balloc_range"); UVMHIST_CALLED(ubchist);
	UVMHIST_LOG(ubchist, "vp %p off 0x%x len 0x%x u_size 0x%x",
		    vp, off, len, vp->v_size);

	neweof = MAX(vp->v_size, off + len);
	GOP_SIZE(vp, neweof, &neweob, 0);

	error = 0;
	uobj = &vp->v_uobj;

	/*
	 * read or create pages covering the range of the allocation and
	 * keep them locked until the new block is allocated, so there
	 * will be no window where the old contents of the new block are
	 * visible to racing threads.
	 */

	pagestart = trunc_page(off) & ~(bsize - 1);
	npages = MIN(ppb, (round_page(neweob) - pagestart) >> PAGE_SHIFT);
	pgssize = npages * sizeof(struct vm_page *);
	pgs = kmem_zalloc(pgssize, KM_SLEEP);

	mutex_enter(&uobj->vmobjlock);
	error = VOP_GETPAGES(vp, pagestart, pgs, &npages, 0,
	    VM_PROT_WRITE, 0,
	    PGO_SYNCIO|PGO_PASTEOF|PGO_NOBLOCKALLOC|PGO_NOTIMESTAMP);
	if (error) {
		goto out;
	}
	mutex_enter(&uobj->vmobjlock);
	mutex_enter(&uvm_pageqlock);
	for (i = 0; i < npages; i++) {
		UVMHIST_LOG(ubchist, "got pgs[%d] %p", i, pgs[i],0,0);
		KASSERT((pgs[i]->flags & PG_RELEASED) == 0);
		pgs[i]->flags &= ~PG_CLEAN;
		uvm_pageactivate(pgs[i]);
	}
	mutex_exit(&uvm_pageqlock);
	mutex_exit(&uobj->vmobjlock);

	/*
	 * adjust off to be block-aligned.
	 */

	delta = off & (bsize - 1);
	off -= delta;
	len += delta;

	/*
	 * now allocate the range.
	 */

	genfs_node_wrlock(vp);
	error = GOP_ALLOC(vp, off, len, flags, cred);
	genfs_node_unlock(vp);

	/*
	 * clear PG_RDONLY on any pages we are holding
	 * (since they now have backing store) and unbusy them.
	 */

	GOP_SIZE(vp, off + len, &eob, 0);
	mutex_enter(&uobj->vmobjlock);
	for (i = 0; i < npages; i++) {
		if (error) {
			pgs[i]->flags |= PG_RELEASED;
		} else if (off <= pagestart + (i << PAGE_SHIFT) &&
		    pagestart + ((i + 1) << PAGE_SHIFT) <= eob) {
			pgs[i]->flags &= ~PG_RDONLY;
		}
	}
	if (error) {
		mutex_enter(&uvm_pageqlock);
		uvm_page_unbusy(pgs, npages);
		mutex_exit(&uvm_pageqlock);
	} else {
		uvm_page_unbusy(pgs, npages);
	}
	mutex_exit(&uobj->vmobjlock);

 out:
 	kmem_free(pgs, pgssize);
	return error;
}
Example #6
0
int
uvm_objwire(struct uvm_object *uobj, off_t start, off_t end,
    struct pglist *pageq)
{
	int i, npages, error;
	struct vm_page *pgs[FETCH_PAGECOUNT];
	off_t offset = start, left;

	left = (end - start) >> PAGE_SHIFT;

	mtx_enter(&uobj->vmobjlock);
	while (left) {

		npages = MIN(FETCH_PAGECOUNT, left);

		/* Get the pages */
		memset(pgs, 0, sizeof(pgs));
		error = (*uobj->pgops->pgo_get)(uobj, offset, pgs, &npages, 0,
			VM_PROT_READ | VM_PROT_WRITE, UVM_ADV_SEQUENTIAL,
			PGO_ALLPAGES | PGO_SYNCIO);

		if (error)
			goto error;

		mtx_enter(&uobj->vmobjlock);
		for (i = 0; i < npages; i++) {

			KASSERT(pgs[i] != NULL);
			KASSERT(!(pgs[i]->pg_flags & PG_RELEASED));

#if 0
			/*
			 * Loan break
			 */
			if (pgs[i]->loan_count) {
				while (pgs[i]->loan_count) {
					pg = uvm_loanbreak(pgs[i]);
					if (!pg) {
						mtx_leave(&uobj->vmobjlock);
						uvm_wait("uobjwirepg");
						mtx_enter(&uobj->vmobjlock);
						continue;
					}
				}
				pgs[i] = pg;
			}
#endif

			if (pgs[i]->pg_flags & PQ_AOBJ) {
				atomic_clearbits_int(&pgs[i]->pg_flags,
				    PG_CLEAN);
				uao_dropswap(uobj, i);
			}
		}

		/* Wire the pages */
		uvm_lock_pageq();
		for (i = 0; i < npages; i++) {
			uvm_pagewire(pgs[i]);
			if (pageq != NULL)
				TAILQ_INSERT_TAIL(pageq, pgs[i], pageq);
		}
		uvm_unlock_pageq();

		/* Unbusy the pages */
		uvm_page_unbusy(pgs, npages);

		left -= npages;
		offset += (off_t)npages << PAGE_SHIFT;
	}
	mtx_leave(&uobj->vmobjlock);

	return 0;

error:
	/* Unwire the pages which have been wired */
	uvm_objunwire(uobj, start, offset);

	return error;
}
Example #7
0
int
ulfs_balloc_range(struct vnode *vp, off_t off, off_t len, kauth_cred_t cred,
    int flags)
{
	off_t neweof;	/* file size after the operation */
	off_t neweob;	/* offset next to the last block after the operation */
	off_t pagestart; /* starting offset of range covered by pgs */
	off_t eob;	/* offset next to allocated blocks */
	struct uvm_object *uobj;
	int i, delta, error, npages;
	int bshift = vp->v_mount->mnt_fs_bshift;
	int bsize = 1 << bshift;
	int ppb = MAX(bsize >> PAGE_SHIFT, 1);
	struct vm_page **pgs;
	size_t pgssize;
	UVMHIST_FUNC("ulfs_balloc_range"); UVMHIST_CALLED(ubchist);
	UVMHIST_LOG(ubchist, "vp %p off 0x%x len 0x%x u_size 0x%x",
		    vp, off, len, vp->v_size);

	neweof = MAX(vp->v_size, off + len);
	GOP_SIZE(vp, neweof, &neweob, 0);

	error = 0;
	uobj = &vp->v_uobj;

	/*
	 * read or create pages covering the range of the allocation and
	 * keep them locked until the new block is allocated, so there
	 * will be no window where the old contents of the new block are
	 * visible to racing threads.
	 */

	pagestart = trunc_page(off) & ~(bsize - 1);
	npages = MIN(ppb, (round_page(neweob) - pagestart) >> PAGE_SHIFT);
	pgssize = npages * sizeof(struct vm_page *);
	pgs = kmem_zalloc(pgssize, KM_SLEEP);

	/*
	 * adjust off to be block-aligned.
	 */

	delta = off & (bsize - 1);
	off -= delta;
	len += delta;

	genfs_node_wrlock(vp);
	mutex_enter(uobj->vmobjlock);
	error = VOP_GETPAGES(vp, pagestart, pgs, &npages, 0,
	    VM_PROT_WRITE, 0, PGO_SYNCIO | PGO_PASTEOF | PGO_NOBLOCKALLOC |
	    PGO_NOTIMESTAMP | PGO_GLOCKHELD);
	if (error) {
		goto out;
	}

	/*
	 * now allocate the range.
	 */

	error = GOP_ALLOC(vp, off, len, flags, cred);
	genfs_node_unlock(vp);

	/*
	 * if the allocation succeeded, clear PG_CLEAN on all the pages
	 * and clear PG_RDONLY on any pages that are now fully backed
	 * by disk blocks.  if the allocation failed, we do not invalidate
	 * the pages since they might have already existed and been dirty,
	 * in which case we need to keep them around.  if we created the pages,
	 * they will be clean and read-only, and leaving such pages
	 * in the cache won't cause any problems.
	 */

	GOP_SIZE(vp, off + len, &eob, 0);
	mutex_enter(uobj->vmobjlock);
	mutex_enter(&uvm_pageqlock);
	for (i = 0; i < npages; i++) {
		KASSERT((pgs[i]->flags & PG_RELEASED) == 0);
		if (!error) {
			if (off <= pagestart + (i << PAGE_SHIFT) &&
			    pagestart + ((i + 1) << PAGE_SHIFT) <= eob) {
				pgs[i]->flags &= ~PG_RDONLY;
			}
			pgs[i]->flags &= ~PG_CLEAN;
		}
		uvm_pageactivate(pgs[i]);
	}
	mutex_exit(&uvm_pageqlock);
	uvm_page_unbusy(pgs, npages);
	mutex_exit(uobj->vmobjlock);

 out:
 	kmem_free(pgs, pgssize);
	return error;
}