Exemple #1
0
/* Unmount the filesystem described by mp. */
static int
nwfs_unmount(struct mount *mp, int mntflags)
{
	struct nwmount *nmp = VFSTONWFS(mp);
	struct ncp_conn *conn;
	int error, flags;

	NCPVODEBUG("nwfs_unmount: flags=%04x\n",mntflags);
	flags = 0;
	if (mntflags & MNT_FORCE)
		flags |= FORCECLOSE;
	/* There is 1 extra root vnode reference from nwfs_mount(). */
	error = vflush(mp, 1, flags);
	if (error)
		return (error);
	conn = NWFSTOCONN(nmp);
	ncp_conn_puthandle(nmp->connh,NULL,0);
	if (ncp_conn_lock(conn, curthread, proc0.p_ucred, NCPM_WRITE | NCPM_EXECUTE) == 0) {
		if(ncp_disconnect(conn))
			ncp_conn_unlock(conn, curthread);
	}
	mp->mnt_data = (qaddr_t)0;
	if (nmp->m.flags & NWFS_MOUNT_HAVE_NLS)
		kfree(nmp->m.nls.to_lower, M_NWFSDATA);
	kfree(nmp, M_NWFSDATA);
	mp->mnt_flag &= ~MNT_LOCAL;
	return (error);
}
Exemple #2
0
int
nwfs_readvnode(struct vnode *vp, struct uio *uiop, struct ucred *cred)
{
	struct nwmount *nmp = VFSTONWFS(vp->v_mount);
	struct nwnode *np = VTONW(vp);
	struct vattr vattr;
	int error;

	if (vp->v_type != VREG && vp->v_type != VDIR) {
		kprintf("%s: vn types other than VREG or VDIR are unsupported !\n",__func__);
		return EIO;
	}
	if (uiop->uio_resid == 0) return 0;
	if (uiop->uio_offset < 0) return EINVAL;
	if (vp->v_type == VDIR) {
		error = nwfs_readvdir(vp, uiop, cred);
		return error;
	}
	if (np->n_flag & NMODIFIED) {
		nwfs_attr_cacheremove(vp);
		error = VOP_GETATTR(vp, &vattr);
		if (error) return (error);
		np->n_mtime = vattr.va_mtime.tv_sec;
	} else {
		error = VOP_GETATTR(vp, &vattr);
		if (error) return (error);
		if (np->n_mtime != vattr.va_mtime.tv_sec) {
			error = nwfs_vinvalbuf(vp, V_SAVE, 1);
			if (error) return (error);
			np->n_mtime = vattr.va_mtime.tv_sec;
		}
	}
	error = ncp_read(NWFSTOCONN(nmp), &np->n_fh, uiop,cred);
	return (error);
}
Exemple #3
0
/*
 * nwfs_statfs call
 */
int
nwfs_statfs(struct mount *mp, struct statfs *sbp, struct ucred *cred)
{
	struct nwmount *nmp = VFSTONWFS(mp);
	int error = 0, secsize;
	struct nwnode *np = nmp->n_root;
	struct ncp_volume_info vi;

	if (np == NULL) return EINVAL;
	error = ncp_get_volume_info_with_number(NWFSTOCONN(nmp), nmp->n_volume,
						&vi, curthread, cred);
	if (error) return error;
	secsize = 512;			/* XXX how to get real value ??? */
	sbp->f_spare2=0;		/* placeholder */
	/* fundamental file system block size */
	sbp->f_bsize = vi.sectors_per_block*secsize;
	/* optimal transfer block size */
	sbp->f_iosize = NWFSTOCONN(nmp)->buffer_size;
	/* total data blocks in file system */
	sbp->f_blocks= vi.total_blocks;
	/* free blocks in fs */
	sbp->f_bfree = vi.free_blocks + vi.purgeable_blocks;
	/* free blocks avail to non-superuser */
	sbp->f_bavail= vi.free_blocks+vi.purgeable_blocks;
	/* total file nodes in file system */
	sbp->f_files = vi.total_dir_entries;
	/* free file nodes in fs */
	sbp->f_ffree = vi.available_dir_entries;
	sbp->f_flags = 0;		/* copy of mount exported flags */
	if (sbp != &mp->mnt_stat) {
		sbp->f_fsid = mp->mnt_stat.f_fsid;	/* file system id */
		sbp->f_owner = mp->mnt_stat.f_owner;	/* user that mounted the filesystem */
		sbp->f_type = mp->mnt_vfc->vfc_typenum;	/* type of filesystem */
		bcopy(mp->mnt_stat.f_mntfromname, sbp->f_mntfromname, MNAMELEN);
	}
	strncpy(sbp->f_fstypename, mp->mnt_vfc->vfc_name, MFSNAMELEN);
	return 0;
}
Exemple #4
0
/*  Return locked vnode to root of a filesystem */
static int
nwfs_root(struct mount *mp, struct vnode **vpp)
{
	struct vnode *vp;
	struct nwmount *nmp;
	struct nwnode *np;
	struct ncp_conn *conn;
	struct nw_entry_info fattr;
	struct thread *td = curthread;	/* XXX */
	struct ucred *cred;
	int error, nsf, opt;
	u_char vol;

	KKASSERT(td->td_proc);
	cred = td->td_proc->p_ucred;

	nmp = VFSTONWFS(mp);
	conn = NWFSTOCONN(nmp);
	if (nmp->n_root) {
		*vpp = NWTOV(nmp->n_root);
		while (vget(*vpp, LK_EXCLUSIVE) != 0) /* XXX */
			;
		return 0;
	}
	error = ncp_lookup_volume(conn, nmp->m.mounted_vol, &vol, 
		&nmp->n_rootent.f_id, td, cred);
	if (error)
		return ENOENT;
	nmp->n_volume = vol;
	error = ncp_get_namespaces(conn, vol, &nsf, td, cred);
	if (error)
		return ENOENT;
	if (nsf & NW_NSB_OS2) {
		NCPVODEBUG("volume %s has os2 namespace\n",nmp->m.mounted_vol);
		if ((nmp->m.flags & NWFS_MOUNT_NO_OS2) == 0) {
			nmp->name_space = NW_NS_OS2;
			nmp->m.nls.opt &= ~NWHP_DOS;
		}
	}
	opt = nmp->m.nls.opt;
	nsf = opt & (NWHP_UPPER | NWHP_LOWER);
	if (opt & NWHP_DOS) {
		if (nsf == (NWHP_UPPER | NWHP_LOWER)) {
			nmp->m.nls.opt &= ~(NWHP_LOWER | NWHP_UPPER);
		} else if (nsf == 0) {
			nmp->m.nls.opt |= NWHP_LOWER;
		}
	} else {
		if (nsf == (NWHP_UPPER | NWHP_LOWER)) {
			nmp->m.nls.opt &= ~(NWHP_LOWER | NWHP_UPPER);
		}
	}
	if (nmp->m.root_path[0]) {
		nmp->m.root_path[0]--;
		error = ncp_obtain_info(nmp, nmp->n_rootent.f_id,
		    -nmp->m.root_path[0], nmp->m.root_path, &fattr, td, cred);
		if (error) {
			NCPFATAL("Invalid root path specified\n");
			return ENOENT;
		}
		nmp->n_rootent.f_parent = fattr.dirEntNum;
		nmp->m.root_path[0]++;
		error = ncp_obtain_info(nmp, nmp->n_rootent.f_id,
		    -nmp->m.root_path[0], nmp->m.root_path, &fattr, td, cred);
		if (error) {
			NCPFATAL("Invalid root path specified\n");
			return ENOENT;
		}
		nmp->n_rootent.f_id = fattr.dirEntNum;
	} else {
		error = ncp_obtain_info(nmp, nmp->n_rootent.f_id,
		    0, NULL, &fattr, td, cred);
		if (error) {
			NCPFATAL("Can't obtain volume info\n");
			return ENOENT;
		}
		fattr.nameLen = strlen(strcpy(fattr.entryName, NWFS_ROOTVOL));
		nmp->n_rootent.f_parent = nmp->n_rootent.f_id;
	}
	error = nwfs_nget(mp, nmp->n_rootent, &fattr, NULL, &vp);
	if (error)
		return (error);
	vsetflags(vp, VROOT);
	np = VTONW(vp);
	if (nmp->m.root_path[0] == 0)
		np->n_flag |= NVOLUME;
	nmp->n_root = np;
/*	error = VOP_GETATTR(vp, &vattr);
	if (error) {
		vput(vp);
		NCPFATAL("Can't get root directory entry\n");
		return error;
	}*/
	*vpp = vp;
	return (0);
}
Exemple #5
0
/*
 * How to keep the brain busy ...
 * Currently lookup routine can make two lookup for vnode. This can be
 * avoided by reorg the code.
 *
 * nwfs_lookup(struct vnode *a_dvp, struct vnode **a_vpp,
 *	       struct componentname *a_cnp)
 */
int
nwfs_lookup(struct vop_old_lookup_args *ap)
{
	struct componentname *cnp = ap->a_cnp;
	struct vnode *dvp = ap->a_dvp;
	struct vnode **vpp = ap->a_vpp;
	int flags = cnp->cn_flags;
	struct vnode *vp;
	struct nwmount *nmp;
	struct mount *mp = dvp->v_mount;
	struct nwnode *dnp, *npp;
	struct nw_entry_info fattr, *fap;
	ncpfid fid;
	int nameiop=cnp->cn_nameiop;
	int lockparent, wantparent, error = 0, notfound;
	struct thread *td = cnp->cn_td;
	char _name[cnp->cn_namelen+1];
	bcopy(cnp->cn_nameptr,_name,cnp->cn_namelen);
	_name[cnp->cn_namelen]=0;
	
	if (dvp->v_type != VDIR)
		return (ENOTDIR);
	if ((flags & CNP_ISDOTDOT) && (dvp->v_flag & VROOT)) {
		kprintf("nwfs_lookup: invalid '..'\n");
		return EIO;
	}

	NCPVNDEBUG("%d '%s' in '%s' id=d\n", nameiop, _name, 
		VTONW(dvp)->n_name/*, VTONW(dvp)->n_name*/);

	if ((mp->mnt_flag & MNT_RDONLY) && nameiop != NAMEI_LOOKUP)
		return (EROFS);
	if ((error = VOP_EACCESS(dvp, VEXEC, cnp->cn_cred)))
		return (error);
	lockparent = flags & CNP_LOCKPARENT;
	wantparent = flags & (CNP_LOCKPARENT | CNP_WANTPARENT);
	nmp = VFSTONWFS(mp);
	dnp = VTONW(dvp);
/*
kprintf("dvp %d:%d:%d\n", (int)mp, (int)dvp->v_flag & VROOT, (int)flags & CNP_ISDOTDOT);
*/
	error = ncp_pathcheck(cnp->cn_nameptr, cnp->cn_namelen, &nmp->m.nls, 
	    (nameiop == NAMEI_CREATE || nameiop == NAMEI_RENAME) && (nmp->m.nls.opt & NWHP_NOSTRICT) == 0);
	if (error) 
	    return ENOENT;

	error = 0;
	*vpp = NULLVP;
	fap = NULL;
	if (flags & CNP_ISDOTDOT) {
		if (NWCMPF(&dnp->n_parent, &nmp->n_rootent)) {
			fid = nmp->n_rootent;
			fap = NULL;
			notfound = 0;
		} else {
			error = nwfs_lookupnp(nmp, dnp->n_parent, td, &npp);
			if (error) {
				return error;
			}
			fid = dnp->n_parent;
			fap = &fattr;
			/*np = *npp;*/
			notfound = ncp_obtain_info(nmp, npp->n_dosfid,
			    0, NULL, fap, td, cnp->cn_cred);
		}
	} else {
		fap = &fattr;
		notfound = ncp_lookup(dvp, cnp->cn_namelen, cnp->cn_nameptr,
			fap, td, cnp->cn_cred);
		fid.f_id = fap->dirEntNum;
		if (cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.') {
			fid.f_parent = dnp->n_fid.f_parent;
		} else
			fid.f_parent = dnp->n_fid.f_id;
		NCPVNDEBUG("call to ncp_lookup returned=%d\n",notfound);
	}
	if (notfound && notfound < 0x80 )
		return (notfound);	/* hard error */
	if (notfound) { /* entry not found */
		/* Handle RENAME or CREATE case... */
		if ((nameiop == NAMEI_CREATE || nameiop == NAMEI_RENAME) && wantparent) {
			if (!lockparent)
				vn_unlock(dvp);
			return (EJUSTRETURN);
		}
		return ENOENT;
	}/* else {
		NCPVNDEBUG("Found entry %s with id=%d\n", fap->entryName, fap->dirEntNum);
	}*/
	/* handle DELETE case ... */
	if (nameiop == NAMEI_DELETE) { 	/* delete last component */
		error = VOP_EACCESS(dvp, VWRITE, cnp->cn_cred);
		if (error) return (error);
		if (NWCMPF(&dnp->n_fid, &fid)) {	/* we found ourselfs */
			vref(dvp);
			*vpp = dvp;
			return 0;
		}
		error = nwfs_nget(mp, fid, fap, dvp, &vp);
		if (error) return (error);
		*vpp = vp;
		if (!lockparent) vn_unlock(dvp);
		return (0);
	}
	if (nameiop == NAMEI_RENAME && wantparent) {
		error = VOP_EACCESS(dvp, VWRITE, cnp->cn_cred);
		if (error) return (error);
		if (NWCMPF(&dnp->n_fid, &fid)) return EISDIR;
		error = nwfs_nget(mp, fid, fap, dvp, &vp);
		if (error) return (error);
		*vpp = vp;
		if (!lockparent)
			vn_unlock(dvp);
		return (0);
	}
	if (flags & CNP_ISDOTDOT) {
		vn_unlock(dvp);	/* race to get the inode */
		error = nwfs_nget(mp, fid, NULL, NULL, &vp);
		if (error) {
			vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
			return (error);
		}
		if (lockparent) {
			error = vn_lock(dvp, LK_EXCLUSIVE | LK_FAILRECLAIM);
			if (error) {
				vput(vp);
				return (error);
			}
		}
		*vpp = vp;
	} else if (NWCMPF(&dnp->n_fid, &fid)) {
		vref(dvp);
		*vpp = dvp;
	} else {
		error = nwfs_nget(mp, fid, fap, dvp, &vp);
		if (error) return (error);
		*vpp = vp;
		NCPVNDEBUG("lookup: getnewvp!\n");
		if (!lockparent)
			vn_unlock(dvp);
	}
#if 0
	/* XXX MOVE TO NREMOVE */
	if ((cnp->cn_flags & CNP_MAKEENTRY)) {
		VTONW(*vpp)->n_ctime = VTONW(*vpp)->n_vattr.va_ctime.tv_sec;
		/* XXX */
	}
#endif
	return (0);
}
Exemple #6
0
/*
 * Vnode op for VM putpages.
 * possible bug: all IO done in sync mode
 * Note that vop_close always invalidate pages before close, so it's
 * not necessary to open vnode.
 *
 * nwfs_putpages(struct vnode *a_vp, vm_page_t *a_m, int a_count,
 *		 int a_sync, int *a_rtvals, vm_ooffset_t a_offset)
 */
int
nwfs_putpages(struct vop_putpages_args *ap)
{
	int error;
	struct thread *td = curthread;	/* XXX */
	struct vnode *vp = ap->a_vp;
	struct ucred *cred;

#ifndef NWFS_RWCACHE
	KKASSERT(td->td_proc);
	cred = td->td_proc->p_ucred;		/* XXX */
	VOP_OPEN(vp, FWRITE, cred, NULL);
	error = vnode_pager_generic_putpages(ap->a_vp, ap->a_m, ap->a_count,
		ap->a_sync, ap->a_rtvals);
	VOP_CLOSE(vp, FWRITE, cred);
	return error;
#else
	struct uio uio;
	struct iovec iov;
	vm_offset_t kva;
	struct buf *bp;
	int i, npages, count;
	int *rtvals;
	struct nwmount *nmp;
	struct nwnode *np;
	vm_page_t *pages;

	KKASSERT(td->td_proc);
	cred = td->td_proc->p_ucred;		/* XXX */

/*	VOP_OPEN(vp, FWRITE, cred, NULL);*/
	np = VTONW(vp);
	nmp = VFSTONWFS(vp->v_mount);
	pages = ap->a_m;
	count = ap->a_count;
	rtvals = ap->a_rtvals;
	npages = btoc(count);

	for (i = 0; i < npages; i++) {
		rtvals[i] = VM_PAGER_AGAIN;
	}

	bp = getpbuf_kva(&nwfs_pbuf_freecnt);
	kva = (vm_offset_t) bp->b_data;
	pmap_qenter(kva, pages, npages);

	iov.iov_base = (caddr_t) kva;
	iov.iov_len = count;
	uio.uio_iov = &iov;
	uio.uio_iovcnt = 1;
	uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
	uio.uio_resid = count;
	uio.uio_segflg = UIO_SYSSPACE;
	uio.uio_rw = UIO_WRITE;
	uio.uio_td = td;
	NCPVNDEBUG("ofs=%d,resid=%d\n",(int)uio.uio_offset, uio.uio_resid);

	error = ncp_write(NWFSTOCONN(nmp), &np->n_fh, &uio, cred);
/*	VOP_CLOSE(vp, FWRITE, cred);*/
	NCPVNDEBUG("paged write done: %d\n", error);

	pmap_qremove(kva, npages);
	relpbuf(bp, &nwfs_pbuf_freecnt);

	if (!error) {
		int nwritten = round_page(count - uio.uio_resid) / PAGE_SIZE;
		for (i = 0; i < nwritten; i++) {
			rtvals[i] = VM_PAGER_OK;
			vm_page_undirty(pages[i]);
		}
	}
	return rtvals[0];
#endif /* NWFS_RWCACHE */
}
Exemple #7
0
/*
 * Vnode op for VM getpages.
 * Wish wish .... get rid from multiple IO routines
 *
 * nwfs_getpages(struct vnode *a_vp, vm_page_t *a_m, int a_count,
 *		 int a_reqpage, vm_ooffset_t a_offset)
 */
int
nwfs_getpages(struct vop_getpages_args *ap)
{
#ifndef NWFS_RWCACHE
	return vnode_pager_generic_getpages(ap->a_vp, ap->a_m, ap->a_count,
					    ap->a_reqpage, ap->a_seqaccess);
#else
	int i, error, npages;
	size_t nextoff, toff;
	size_t count;
	size_t size;
	struct uio uio;
	struct iovec iov;
	vm_offset_t kva;
	struct buf *bp;
	struct vnode *vp;
	struct thread *td = curthread;	/* XXX */
	struct ucred *cred;
	struct nwmount *nmp;
	struct nwnode *np;
	vm_page_t *pages;

	KKASSERT(td->td_proc);
	cred = td->td_proc->p_ucred;

	vp = ap->a_vp;
	np = VTONW(vp);
	nmp = VFSTONWFS(vp->v_mount);
	pages = ap->a_m;
	count = (size_t)ap->a_count;

	if (vp->v_object == NULL) {
		kprintf("nwfs_getpages: called with non-merged cache vnode??\n");
		return VM_PAGER_ERROR;
	}

	bp = getpbuf_kva(&nwfs_pbuf_freecnt);
	npages = btoc(count);
	kva = (vm_offset_t) bp->b_data;
	pmap_qenter(kva, pages, npages);

	iov.iov_base = (caddr_t) kva;
	iov.iov_len = count;
	uio.uio_iov = &iov;
	uio.uio_iovcnt = 1;
	uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
	uio.uio_resid = count;
	uio.uio_segflg = UIO_SYSSPACE;
	uio.uio_rw = UIO_READ;
	uio.uio_td = td;

	error = ncp_read(NWFSTOCONN(nmp), &np->n_fh, &uio,cred);
	pmap_qremove(kva, npages);

	relpbuf(bp, &nwfs_pbuf_freecnt);

	if (error && (uio.uio_resid == count)) {
		kprintf("nwfs_getpages: error %d\n",error);
		for (i = 0; i < npages; i++) {
			if (ap->a_reqpage != i)
				vnode_pager_freepage(pages[i]);
		}
		return VM_PAGER_ERROR;
	}

	size = count - uio.uio_resid;

	for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
		vm_page_t m;
		nextoff = toff + PAGE_SIZE;
		m = pages[i];

		m->flags &= ~PG_ZERO;

		/*
		 * NOTE: pmap dirty bit should have already been cleared.
		 *	 We do not clear it here.
		 */
		if (nextoff <= size) {
			m->valid = VM_PAGE_BITS_ALL;
			m->dirty = 0;
		} else {
			int nvalid = ((size + DEV_BSIZE - 1) - toff) &
				      ~(DEV_BSIZE - 1);
			vm_page_set_validclean(m, 0, nvalid);
		}
		
		if (i != ap->a_reqpage) {
			/*
			 * Whether or not to leave the page activated is up in
			 * the air, but we should put the page on a page queue
			 * somewhere (it already is in the object).  Result:
			 * It appears that emperical results show that
			 * deactivating pages is best.
			 */

			/*
			 * Just in case someone was asking for this page we
			 * now tell them that it is ok to use.
			 */
			if (!error) {
				if (m->flags & PG_REFERENCED)
					vm_page_activate(m);
				else
					vm_page_deactivate(m);
				vm_page_wakeup(m);
			} else {
				vnode_pager_freepage(m);
			}
		}
	}
	return 0;
#endif /* NWFS_RWCACHE */
}
Exemple #8
0
/*
 * Do an I/O operation to/from a cache block.
 */
int
nwfs_doio(struct vnode *vp, struct bio *bio, struct ucred *cr, struct thread *td)
{
	struct buf *bp = bio->bio_buf;
	struct uio *uiop;
	struct nwnode *np;
	struct nwmount *nmp;
	int error = 0;
	struct uio uio;
	struct iovec io;

	np = VTONW(vp);
	nmp = VFSTONWFS(vp->v_mount);
	uiop = &uio;
	uiop->uio_iov = &io;
	uiop->uio_iovcnt = 1;
	uiop->uio_segflg = UIO_SYSSPACE;
	uiop->uio_td = td;

	if (bp->b_cmd == BUF_CMD_READ) {
	    io.iov_len = uiop->uio_resid = (size_t)bp->b_bcount;
	    io.iov_base = bp->b_data;
	    uiop->uio_rw = UIO_READ;
	    switch (vp->v_type) {
	      case VREG:
		uiop->uio_offset = bio->bio_offset;
		error = ncp_read(NWFSTOCONN(nmp), &np->n_fh, uiop, cr);
		if (error)
			break;
		if (uiop->uio_resid) {
			size_t left = uiop->uio_resid;
			size_t nread = bp->b_bcount - left;
			if (left > 0)
				bzero((char *)bp->b_data + nread, left);
		}
		break;
/*	    case VDIR:
		nfsstats.readdir_bios++;
		uiop->uio_offset = bio->bio_offset;
		if (nmp->nm_flag & NFSMNT_RDIRPLUS) {
			error = nfs_readdirplusrpc(vp, uiop, cr);
			if (error == NFSERR_NOTSUPP)
				nmp->nm_flag &= ~NFSMNT_RDIRPLUS;
		}
		if ((nmp->nm_flag & NFSMNT_RDIRPLUS) == 0)
			error = nfs_readdirrpc(vp, uiop, cr);
		if (error == 0 && uiop->uio_resid == (size_t)bp->b_bcount)
			bp->b_flags |= B_INVAL;
		break;
*/
	    default:
		kprintf("nwfs_doio:  type %x unexpected\n",vp->v_type);
		break;
	    }
	    if (error) {
		bp->b_flags |= B_ERROR;
		bp->b_error = error;
	    }
	} else { /* write */
	    KKASSERT(bp->b_cmd == BUF_CMD_WRITE);
	    if (bio->bio_offset + bp->b_dirtyend > np->n_size)
		bp->b_dirtyend = np->n_size - bio->bio_offset;

	    if (bp->b_dirtyend > bp->b_dirtyoff) {
		io.iov_len = uiop->uio_resid =
			(size_t)(bp->b_dirtyend - bp->b_dirtyoff);
		uiop->uio_offset = bio->bio_offset + bp->b_dirtyoff;
		io.iov_base = (char *)bp->b_data + bp->b_dirtyoff;
		uiop->uio_rw = UIO_WRITE;
		error = ncp_write(NWFSTOCONN(nmp), &np->n_fh, uiop, cr);

		/*
		 * For an interrupted write, the buffer is still valid
		 * and the write hasn't been pushed to the server yet,
		 * so we can't set B_ERROR and report the interruption
		 * by setting B_EINTR. For the async case, B_EINTR
		 * is not relevant, so the rpc attempt is essentially
		 * a noop.  For the case of a V3 write rpc not being
		 * committed to stable storage, the block is still
		 * dirty and requires either a commit rpc or another
		 * write rpc with iomode == NFSV3WRITE_FILESYNC before
		 * the block is reused. This is indicated by setting
		 * the B_DELWRI and B_NEEDCOMMIT flags.
		 */
    		if (error == EINTR
		    || (!error && (bp->b_flags & B_NEEDCOMMIT))) {

			crit_enter();
			bp->b_flags &= ~(B_INVAL|B_NOCACHE);
			if ((bp->b_flags & B_PAGING) == 0)
			    bdirty(bp);
			bp->b_flags |= B_EINTR;
			crit_exit();
	    	} else {
			if (error) {
				bp->b_flags |= B_ERROR;
				bp->b_error /*= np->n_error */= error;
/*				np->n_flag |= NWRITEERR;*/
			}
			bp->b_dirtyoff = bp->b_dirtyend = 0;
		}
	    } else {
		bp->b_resid = 0;
		biodone(bio);
		return (0);
	    }
	}
	bp->b_resid = (int)uiop->uio_resid;
	biodone(bio);
	return (error);
}