コード例 #1
0
/*
 * Vnode op for VM getpages.
 * Wish wish .... get rid from multiple IO routines
 *
 * smbfs_getpages(struct vnode *a_vp, vm_page_t *a_m, int a_count,
 *		  int a_reqpage, vm_ooffset_t a_offset)
 */
int
smbfs_getpages(struct vop_getpages_args *ap)
{
#ifdef SMBFS_RWGENERIC
	return vop_stdgetpages(ap);
#else
	int i, error, npages;
	int doclose;
	size_t size, toff, nextoff, count;
	struct uio uio;
	struct iovec iov;
	vm_offset_t kva;
	struct buf *bp;
	struct vnode *vp;
	struct thread *td = curthread;	/* XXX */
	struct ucred *cred;
	struct smbmount *smp;
	struct smbnode *np;
	struct smb_cred scred;
	vm_page_t *pages;

	KKASSERT(td->td_proc);

	vp = ap->a_vp;
	cred = td->td_proc->p_ucred;
	np = VTOSMB(vp);
	smp = VFSTOSMBFS(vp->v_mount);
	pages = ap->a_m;
	count = (size_t)ap->a_count;

	if (vp->v_object == NULL) {
		kprintf("smbfs_getpages: called with non-merged cache vnode??\n");
		return VM_PAGER_ERROR;
	}
	smb_makescred(&scred, td, cred);

	bp = getpbuf_kva(&smbfs_pbuf_freecnt);
	npages = btoc(count);
	kva = (vm_offset_t) bp->b_data;
	pmap_qenter(kva, pages, npages);

	iov.iov_base = (caddr_t) kva;
	iov.iov_len = count;
	uio.uio_iov = &iov;
	uio.uio_iovcnt = 1;
	uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
	uio.uio_resid = count;
	uio.uio_segflg = UIO_SYSSPACE;
	uio.uio_rw = UIO_READ;
	uio.uio_td = td;

	/*
	 * This is kinda nasty.  Since smbfs is physically closing the
	 * fid on close(), we have to reopen it if necessary.  There are
	 * other races here too, such as if another process opens the same
	 * file while we are blocked in read. XXX
	 */
	error = 0;
	doclose = 0;
	if (np->n_opencount == 0) {
		error = smbfs_smb_open(np, SMB_AM_OPENREAD, &scred);
		if (error == 0)
			doclose = 1;
	}
	if (error == 0)
		error = smb_read(smp->sm_share, np->n_fid, &uio, &scred);
	if (doclose)
		smbfs_smb_close(smp->sm_share, np->n_fid, NULL, &scred);
	pmap_qremove(kva, npages);

	relpbuf(bp, &smbfs_pbuf_freecnt);

	if (error && (uio.uio_resid == count)) {
		kprintf("smbfs_getpages: error %d\n",error);
		for (i = 0; i < npages; i++) {
			if (ap->a_reqpage != i)
				vnode_pager_freepage(pages[i]);
		}
		return VM_PAGER_ERROR;
	}

	size = count - uio.uio_resid;

	for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
		vm_page_t m;
		nextoff = toff + PAGE_SIZE;
		m = pages[i];

		m->flags &= ~PG_ZERO;

		/*
		 * NOTE: pmap dirty bit should have already been cleared.
		 *	 We do not clear it here.
		 */
		if (nextoff <= size) {
			m->valid = VM_PAGE_BITS_ALL;
			m->dirty = 0;
		} else {
			int nvalid = ((size + DEV_BSIZE - 1) - toff) &
				      ~(DEV_BSIZE - 1);
			vm_page_set_validclean(m, 0, nvalid);
		}
		
		if (i != ap->a_reqpage) {
			/*
			 * Whether or not to leave the page activated is up in
			 * the air, but we should put the page on a page queue
			 * somewhere (it already is in the object).  Result:
			 * It appears that emperical results show that
			 * deactivating pages is best.
			 */

			/*
			 * Just in case someone was asking for this page we
			 * now tell them that it is ok to use.
			 */
			if (!error) {
				if (m->flags & PG_REFERENCED)
					vm_page_activate(m);
				else
					vm_page_deactivate(m);
				vm_page_wakeup(m);
			} else {
				vnode_pager_freepage(m);
			}
		}
	}
	return 0;
#endif /* SMBFS_RWGENERIC */
}
コード例 #2
0
ファイル: nwfs_io.c プロジェクト: mihaicarabas/dragonfly
/*
 * Vnode op for VM getpages.
 * Wish wish .... get rid from multiple IO routines
 *
 * nwfs_getpages(struct vnode *a_vp, vm_page_t *a_m, int a_count,
 *		 int a_reqpage, vm_ooffset_t a_offset)
 */
int
nwfs_getpages(struct vop_getpages_args *ap)
{
#ifndef NWFS_RWCACHE
	return vnode_pager_generic_getpages(ap->a_vp, ap->a_m, ap->a_count,
					    ap->a_reqpage, ap->a_seqaccess);
#else
	int i, error, npages;
	size_t nextoff, toff;
	size_t count;
	size_t size;
	struct uio uio;
	struct iovec iov;
	vm_offset_t kva;
	struct buf *bp;
	struct vnode *vp;
	struct thread *td = curthread;	/* XXX */
	struct ucred *cred;
	struct nwmount *nmp;
	struct nwnode *np;
	vm_page_t *pages;

	KKASSERT(td->td_proc);
	cred = td->td_proc->p_ucred;

	vp = ap->a_vp;
	np = VTONW(vp);
	nmp = VFSTONWFS(vp->v_mount);
	pages = ap->a_m;
	count = (size_t)ap->a_count;

	if (vp->v_object == NULL) {
		kprintf("nwfs_getpages: called with non-merged cache vnode??\n");
		return VM_PAGER_ERROR;
	}

	bp = getpbuf_kva(&nwfs_pbuf_freecnt);
	npages = btoc(count);
	kva = (vm_offset_t) bp->b_data;
	pmap_qenter(kva, pages, npages);

	iov.iov_base = (caddr_t) kva;
	iov.iov_len = count;
	uio.uio_iov = &iov;
	uio.uio_iovcnt = 1;
	uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
	uio.uio_resid = count;
	uio.uio_segflg = UIO_SYSSPACE;
	uio.uio_rw = UIO_READ;
	uio.uio_td = td;

	error = ncp_read(NWFSTOCONN(nmp), &np->n_fh, &uio,cred);
	pmap_qremove(kva, npages);

	relpbuf(bp, &nwfs_pbuf_freecnt);

	if (error && (uio.uio_resid == count)) {
		kprintf("nwfs_getpages: error %d\n",error);
		for (i = 0; i < npages; i++) {
			if (ap->a_reqpage != i)
				vnode_pager_freepage(pages[i]);
		}
		return VM_PAGER_ERROR;
	}

	size = count - uio.uio_resid;

	for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
		vm_page_t m;
		nextoff = toff + PAGE_SIZE;
		m = pages[i];

		m->flags &= ~PG_ZERO;

		/*
		 * NOTE: pmap dirty bit should have already been cleared.
		 *	 We do not clear it here.
		 */
		if (nextoff <= size) {
			m->valid = VM_PAGE_BITS_ALL;
			m->dirty = 0;
		} else {
			int nvalid = ((size + DEV_BSIZE - 1) - toff) &
				      ~(DEV_BSIZE - 1);
			vm_page_set_validclean(m, 0, nvalid);
		}
		
		if (i != ap->a_reqpage) {
			/*
			 * Whether or not to leave the page activated is up in
			 * the air, but we should put the page on a page queue
			 * somewhere (it already is in the object).  Result:
			 * It appears that emperical results show that
			 * deactivating pages is best.
			 */

			/*
			 * Just in case someone was asking for this page we
			 * now tell them that it is ok to use.
			 */
			if (!error) {
				if (m->flags & PG_REFERENCED)
					vm_page_activate(m);
				else
					vm_page_deactivate(m);
				vm_page_wakeup(m);
			} else {
				vnode_pager_freepage(m);
			}
		}
	}
	return 0;
#endif /* NWFS_RWCACHE */
}
コード例 #3
0
/*
 * This is now called from local media FS's to operate against their
 * own vnodes if they fail to implement VOP_GETPAGES.
 *
 * With all the caching local media devices do these days there is really
 * very little point to attempting to restrict the I/O size to contiguous
 * blocks on-disk, especially if our caller thinks we need all the specified
 * pages.  Just construct and issue a READ.
 */
int
vnode_pager_generic_getpages(struct vnode *vp, vm_page_t *mpp, int bytecount,
			     int reqpage, int seqaccess)
{
	struct iovec aiov;
	struct uio auio;
	off_t foff;
	int error;
	int count;
	int i;
	int ioflags;

	/*
	 * Do not do anything if the vnode is bad.
	 */
	if (vp->v_mount == NULL)
		return VM_PAGER_BAD;

	/*
	 * Calculate the number of pages.  Since we are paging in whole
	 * pages, adjust bytecount to be an integral multiple of the page
	 * size.  It will be clipped to the file EOF later on.
	 */
	bytecount = round_page(bytecount);
	count = bytecount / PAGE_SIZE;

	/*
	 * We could check m[reqpage]->valid here and shortcut the operation,
	 * but doing so breaks read-ahead.  Instead assume that the VM
	 * system has already done at least the check, don't worry about
	 * any races, and issue the VOP_READ to allow read-ahead to function.
	 *
	 * This keeps the pipeline full for I/O bound sequentially scanned
	 * mmap()'s
	 */
	/* don't shortcut */

	/*
	 * Discard pages past the file EOF.  If the requested page is past
	 * the file EOF we just leave its valid bits set to 0, the caller
	 * expects to maintain ownership of the requested page.  If the
	 * entire range is past file EOF discard everything and generate
	 * a pagein error.
	 */
	foff = IDX_TO_OFF(mpp[0]->pindex);
	if (foff >= vp->v_filesize) {
		for (i = 0; i < count; i++) {
			if (i != reqpage)
				vnode_pager_freepage(mpp[i]);
		}
		return VM_PAGER_ERROR;
	}

	if (foff + bytecount > vp->v_filesize) {
		bytecount = vp->v_filesize - foff;
		i = round_page(bytecount) / PAGE_SIZE;
		while (count > i) {
			--count;
			if (count != reqpage)
				vnode_pager_freepage(mpp[count]);
		}
	}

	/*
	 * The size of the transfer is bytecount.  bytecount will be an
	 * integral multiple of the page size unless it has been clipped
	 * to the file EOF.  The transfer cannot exceed the file EOF.
	 *
	 * When dealing with real devices we must round-up to the device
	 * sector size.
	 */
	if (vp->v_type == VBLK || vp->v_type == VCHR) {
		int secmask = vp->v_rdev->si_bsize_phys - 1;
		KASSERT(secmask < PAGE_SIZE, ("vnode_pager_generic_getpages: sector size %d too large", secmask + 1));
		bytecount = (bytecount + secmask) & ~secmask;
	}

	/*
	 * Severe hack to avoid deadlocks with the buffer cache
	 */
	for (i = 0; i < count; ++i) {
		vm_page_t mt = mpp[i];

		vm_page_io_start(mt);
		vm_page_wakeup(mt);
	}

	/*
	 * Issue the I/O with some read-ahead if bytecount > PAGE_SIZE
	 */
	ioflags = IO_VMIO;
	if (seqaccess)
		ioflags |= IO_SEQMAX << IO_SEQSHIFT;

	aiov.iov_base = NULL;
	aiov.iov_len = bytecount;
	auio.uio_iov = &aiov;
	auio.uio_iovcnt = 1;
	auio.uio_offset = foff;
	auio.uio_segflg = UIO_NOCOPY;
	auio.uio_rw = UIO_READ;
	auio.uio_resid = bytecount;
	auio.uio_td = NULL;
	mycpu->gd_cnt.v_vnodein++;
	mycpu->gd_cnt.v_vnodepgsin += count;

	error = VOP_READ(vp, &auio, ioflags, proc0.p_ucred);

	/*
	 * Severe hack to avoid deadlocks with the buffer cache
	 */
	for (i = 0; i < count; ++i) {
		vm_page_busy_wait(mpp[i], FALSE, "getpgs");
		vm_page_io_finish(mpp[i]);
	}

	/*
	 * Calculate the actual number of bytes read and clean up the
	 * page list.  
	 */
	bytecount -= auio.uio_resid;

	for (i = 0; i < count; ++i) {
		vm_page_t mt = mpp[i];

		if (i != reqpage) {
			if (error == 0 && mt->valid) {
				if (mt->flags & PG_REFERENCED)
					vm_page_activate(mt);
				else
					vm_page_deactivate(mt);
				vm_page_wakeup(mt);
			} else {
				vnode_pager_freepage(mt);
			}
		} else if (mt->valid == 0) {
			if (error == 0) {
				kprintf("page failed but no I/O error page "
					"%p object %p pindex %d\n",
					mt, mt->object, (int) mt->pindex);
				/* whoops, something happened */
				error = EINVAL;
			}
		} else if (mt->valid != VM_PAGE_BITS_ALL) {
			/*
			 * Zero-extend the requested page if necessary (if
			 * the filesystem is using a small block size).
			 */
			vm_page_zero_invalid(mt, TRUE);
		}
	}
	if (error) {
		kprintf("vnode_pager_getpage: I/O read error\n");
	}
	return (error ? VM_PAGER_ERROR : VM_PAGER_OK);
}