/* * EOPNOTSUPP is no longer legal. For local media VFS's that do not * implement their own VOP_PUTPAGES, their VOP_PUTPAGES should call to * vnode_pager_generic_putpages() to implement the previous behaviour. * * Caller has already cleared the pmap modified bits, if any. * * All other FS's should use the bypass to get to the local media * backing vp's VOP_PUTPAGES. */ static void vnode_pager_putpages(vm_object_t object, vm_page_t *m, int count, int sync, int *rtvals) { int rtval; struct vnode *vp; int bytes = count * PAGE_SIZE; /* * Force synchronous operation if we are extremely low on memory * to prevent a low-memory deadlock. VOP operations often need to * allocate more memory to initiate the I/O ( i.e. do a BMAP * operation ). The swapper handles the case by limiting the amount * of asynchronous I/O, but that sort of solution doesn't scale well * for the vnode pager without a lot of work. * * Also, the backing vnode's iodone routine may not wake the pageout * daemon up. This should be probably be addressed XXX. */ if ((vmstats.v_free_count + vmstats.v_cache_count) < vmstats.v_pageout_free_min) { sync |= OBJPC_SYNC; } /* * Call device-specific putpages function */ vp = object->handle; rtval = VOP_PUTPAGES(vp, m, bytes, sync, rtvals, 0); if (rtval == EOPNOTSUPP) { kprintf("vnode_pager: *** WARNING *** stale FS putpages\n"); rtval = vnode_pager_generic_putpages( vp, m, bytes, sync, rtvals); } }
static int union_putpages(struct vop_putpages_args *ap) { int r; r = vnode_pager_generic_putpages(ap->a_vp, ap->a_m, ap->a_count, ap->a_sync, ap->a_rtvals); return(r); }
int vop_stdputpages(struct vop_putpages_args *ap) { struct mount *mp; int error; if ((mp = ap->a_vp->v_mount) != NULL) { error = vnode_pager_generic_putpages( ap->a_vp, ap->a_m, ap->a_count, ap->a_sync, ap->a_rtvals); } else { error = VM_PAGER_BAD; } return (error); }
/* * Vnode op for VM putpages. * possible bug: all IO done in sync mode * Note that vop_close always invalidate pages before close, so it's * not necessary to open vnode. * * nwfs_putpages(struct vnode *a_vp, vm_page_t *a_m, int a_count, * int a_sync, int *a_rtvals, vm_ooffset_t a_offset) */ int nwfs_putpages(struct vop_putpages_args *ap) { int error; struct thread *td = curthread; /* XXX */ struct vnode *vp = ap->a_vp; struct ucred *cred; #ifndef NWFS_RWCACHE KKASSERT(td->td_proc); cred = td->td_proc->p_ucred; /* XXX */ VOP_OPEN(vp, FWRITE, cred, NULL); error = vnode_pager_generic_putpages(ap->a_vp, ap->a_m, ap->a_count, ap->a_sync, ap->a_rtvals); VOP_CLOSE(vp, FWRITE, cred); return error; #else struct uio uio; struct iovec iov; vm_offset_t kva; struct buf *bp; int i, npages, count; int *rtvals; struct nwmount *nmp; struct nwnode *np; vm_page_t *pages; KKASSERT(td->td_proc); cred = td->td_proc->p_ucred; /* XXX */ /* VOP_OPEN(vp, FWRITE, cred, NULL);*/ np = VTONW(vp); nmp = VFSTONWFS(vp->v_mount); pages = ap->a_m; count = ap->a_count; rtvals = ap->a_rtvals; npages = btoc(count); for (i = 0; i < npages; i++) { rtvals[i] = VM_PAGER_AGAIN; } bp = getpbuf_kva(&nwfs_pbuf_freecnt); kva = (vm_offset_t) bp->b_data; pmap_qenter(kva, pages, npages); iov.iov_base = (caddr_t) kva; iov.iov_len = count; uio.uio_iov = &iov; uio.uio_iovcnt = 1; uio.uio_offset = IDX_TO_OFF(pages[0]->pindex); uio.uio_resid = count; uio.uio_segflg = UIO_SYSSPACE; uio.uio_rw = UIO_WRITE; uio.uio_td = td; NCPVNDEBUG("ofs=%d,resid=%d\n",(int)uio.uio_offset, uio.uio_resid); error = ncp_write(NWFSTOCONN(nmp), &np->n_fh, &uio, cred); /* VOP_CLOSE(vp, FWRITE, cred);*/ NCPVNDEBUG("paged write done: %d\n", error); pmap_qremove(kva, npages); relpbuf(bp, &nwfs_pbuf_freecnt); if (!error) { int nwritten = round_page(count - uio.uio_resid) / PAGE_SIZE; for (i = 0; i < nwritten; i++) { rtvals[i] = VM_PAGER_OK; vm_page_undirty(pages[i]); } } return rtvals[0]; #endif /* NWFS_RWCACHE */ }