int ncp_initsearch(struct vnode *dvp, struct thread *td, struct ucred *cred) { struct nwmount *nmp = VTONWFS(dvp); struct ncp_conn *conn = NWFSTOCONN(nmp); struct nwnode *np = VTONW(dvp); struct ncp_rq *rqp; u_int8_t volnum = nmp->n_volume; u_int32_t dirent = np->n_fid.f_id; int error; NCPNDEBUG("vol=%d,dir=%d\n", volnum, dirent); error = ncp_rq_alloc(87, conn, td, cred, &rqp); if (error) return error; mb_put_uint8(&rqp->rq, 2); /* subfunction */ mb_put_uint8(&rqp->rq, nmp->name_space); mb_put_uint8(&rqp->rq, 0); /* reserved */ ncp_rq_dbase_path(rqp, volnum, dirent, 0, NULL, NULL); rqp->nr_minrplen = sizeof(np->n_seq); error = ncp_request(rqp); if (error) return error; md_get_mem(&rqp->rp, (caddr_t)&np->n_seq, sizeof(np->n_seq), MB_MSYSTEM); ncp_rq_done(rqp); return 0; }
int nwfs_readvnode(struct vnode *vp, struct uio *uiop, struct ucred *cred) { struct nwmount *nmp = VFSTONWFS(vp->v_mount); struct nwnode *np = VTONW(vp); struct vattr vattr; int error; if (vp->v_type != VREG && vp->v_type != VDIR) { kprintf("%s: vn types other than VREG or VDIR are unsupported !\n",__func__); return EIO; } if (uiop->uio_resid == 0) return 0; if (uiop->uio_offset < 0) return EINVAL; if (vp->v_type == VDIR) { error = nwfs_readvdir(vp, uiop, cred); return error; } if (np->n_flag & NMODIFIED) { nwfs_attr_cacheremove(vp); error = VOP_GETATTR(vp, &vattr); if (error) return (error); np->n_mtime = vattr.va_mtime.tv_sec; } else { error = VOP_GETATTR(vp, &vattr); if (error) return (error); if (np->n_mtime != vattr.va_mtime.tv_sec) { error = nwfs_vinvalbuf(vp, V_SAVE, 1); if (error) return (error); np->n_mtime = vattr.va_mtime.tv_sec; } } error = ncp_read(NWFSTOCONN(nmp), &np->n_fh, uiop,cred); return (error); }
/* * Returns information for a (one-component) name relative to the specified * directory. */ int ncp_obtain_info(struct nwmount *nmp, u_int32_t dirent, int namelen, char *path, struct nw_entry_info *target, struct thread *td,struct ucred *cred) { struct ncp_conn *conn=NWFSTOCONN(nmp); struct ncp_rq *rqp; int error; u_char volnum = nmp->n_volume, ns; if (target == NULL) { NCPFATAL("target == NULL\n"); return EINVAL; } ns = (path == NULL || path[0] == 0) ? NW_NS_DOS : nmp->name_space; error = ncp_rq_alloc(87, conn, td, cred, &rqp); if (error) return error; mb_put_uint8(&rqp->rq, 6); /* subfunction */ mb_put_uint8(&rqp->rq, ns); mb_put_uint8(&rqp->rq, ns); /* DestNameSpace */ mb_put_uint16le(&rqp->rq, 0xff); /* get all */ mb_put_uint32le(&rqp->rq, IM_ALL); ncp_rq_dbase_path(rqp, volnum, dirent, namelen, path, &nmp->m.nls); error = ncp_request(rqp); if (error) return error; error = ncp_extract_file_info(nmp, rqp, target, path != NULL); ncp_rq_done(rqp); return error; }
/* * nwfs_close(struct vnode *a_vp, int a_fflag) */ static int nwfs_close(struct vop_close_args *ap) { thread_t td = curthread; /* XXX */ struct vnode *vp = ap->a_vp; struct nwnode *np = VTONW(vp); int error; NCPVNDEBUG("name=%s,td=%p,c=%d\n",np->n_name,ap->a_td,np->opened); vn_lock(vp, LK_UPGRADE | LK_RETRY); error = 0; if (vp->v_type == VDIR) goto done; if (np->opened == 0) goto done; error = nwfs_vinvalbuf(vp, V_SAVE, 1); if (np->opened == 0) { error = 0; /* huh? */ goto done; } if (--np->opened == 0) { error = ncp_close_file(NWFSTOCONN(VTONWFS(vp)), &np->n_fh, td, proc0.p_ucred); } np->n_atime = 0; done: vop_stdclose(ap); return (error); }
/* Unmount the filesystem described by mp. */ static int nwfs_unmount(struct mount *mp, int mntflags) { struct nwmount *nmp = VFSTONWFS(mp); struct ncp_conn *conn; int error, flags; NCPVODEBUG("nwfs_unmount: flags=%04x\n",mntflags); flags = 0; if (mntflags & MNT_FORCE) flags |= FORCECLOSE; /* There is 1 extra root vnode reference from nwfs_mount(). */ error = vflush(mp, 1, flags); if (error) return (error); conn = NWFSTOCONN(nmp); ncp_conn_puthandle(nmp->connh,NULL,0); if (ncp_conn_lock(conn, curthread, proc0.p_ucred, NCPM_WRITE | NCPM_EXECUTE) == 0) { if(ncp_disconnect(conn)) ncp_conn_unlock(conn, curthread); } mp->mnt_data = (qaddr_t)0; if (nmp->m.flags & NWFS_MOUNT_HAVE_NLS) kfree(nmp->m.nls.to_lower, M_NWFSDATA); kfree(nmp, M_NWFSDATA); mp->mnt_flag &= ~MNT_LOCAL; return (error); }
int ncp_modify_file_or_subdir_dos_info(struct nwmount *nmp, struct vnode *vp, u_int32_t info_mask, struct nw_modify_dos_info *info, struct thread *td,struct ucred *cred) { struct nwnode *np=VTONW(vp); struct ncp_rq *rqp; u_int8_t volnum = nmp->n_volume; u_int32_t dirent = np->n_fid.f_id; struct ncp_conn *conn=NWFSTOCONN(nmp); int error; error = ncp_rq_alloc(87, conn, td, cred, &rqp); if (error) return error; mb_put_uint8(&rqp->rq, 7); /* subfunction */ mb_put_uint8(&rqp->rq, nmp->name_space); mb_put_uint8(&rqp->rq, 0); /* reserved */ mb_put_uint16le(&rqp->rq, SA_ALL); /* search attribs: all */ mb_put_uint32le(&rqp->rq, info_mask); mb_put_mem(&rqp->rq, (caddr_t)info, sizeof(*info), MB_MSYSTEM); ncp_rq_dbase_path(rqp, volnum, dirent, 0, NULL, NULL); error = ncp_request(rqp); if (!error) ncp_rq_done(rqp); return error; }
int ncp_search_for_file_or_subdir(struct nwmount *nmp, struct nw_search_seq *seq, struct nw_entry_info *target, struct thread *td,struct ucred *cred) { struct ncp_conn *conn = NWFSTOCONN(nmp); struct ncp_rq *rqp; int error; error = ncp_rq_alloc(87, conn, td, cred, &rqp); if (error) return error; mb_put_uint8(&rqp->rq, 3); /* subfunction */ mb_put_uint8(&rqp->rq, nmp->name_space); mb_put_uint8(&rqp->rq, 0); /* data stream */ mb_put_uint16le(&rqp->rq, 0xffff); /* Search attribs */ mb_put_uint32le(&rqp->rq, IM_ALL); /* return info mask */ mb_put_mem(&rqp->rq, (caddr_t)seq, 9, MB_MSYSTEM); mb_put_uint8(&rqp->rq, 2); /* 2 byte pattern */ mb_put_uint8(&rqp->rq, 0xff); /* following is a wildcard */ mb_put_uint8(&rqp->rq, '*'); rqp->nr_minrplen = sizeof(*seq) + 1 + NCP_INFOSZ + 1; error = ncp_request(rqp); if (error) return error; md_get_mem(&rqp->rp, (caddr_t)seq, sizeof(*seq), MB_MSYSTEM); md_get_uint8(&rqp->rp, NULL); /* skip */ error = ncp_extract_file_info(nmp, rqp, target, 1); ncp_rq_done(rqp); return error; }
int nwfs_writevnode(struct vnode *vp, struct uio *uiop, struct ucred *cred, int ioflag) { struct nwmount *nmp = VTONWFS(vp); struct nwnode *np = VTONW(vp); struct thread *td; /* struct vattr vattr;*/ int error = 0; if (vp->v_type != VREG) { kprintf("%s: vn types other than VREG unsupported !\n",__func__); return EIO; } NCPVNDEBUG("ofs=%d,resid=%d\n",(int)uiop->uio_offset, uiop->uio_resid); if (uiop->uio_offset < 0) return EINVAL; td = uiop->uio_td; if (ioflag & (IO_APPEND | IO_SYNC)) { if (np->n_flag & NMODIFIED) { nwfs_attr_cacheremove(vp); error = nwfs_vinvalbuf(vp, V_SAVE, 1); if (error) return (error); } if (ioflag & IO_APPEND) { /* We can relay only on local information about file size, * because until file is closed NetWare will not return * the correct size. */ #if 0 /* notyet */ nwfs_attr_cacheremove(vp); error = VOP_GETATTR(vp, &vattr); if (error) return (error); #endif uiop->uio_offset = np->n_size; } } if (uiop->uio_resid == 0) return 0; if (td->td_proc && uiop->uio_offset + uiop->uio_resid > td->td_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) { lwpsignal(td->td_proc, td->td_lwp, SIGXFSZ); return (EFBIG); } error = ncp_write(NWFSTOCONN(nmp), &np->n_fh, uiop, cred); NCPVNDEBUG("after: ofs=%d,resid=%d\n",(int)uiop->uio_offset, uiop->uio_resid); if (!error) { if (uiop->uio_offset > np->n_size) { np->n_vattr.va_size = np->n_size = uiop->uio_offset; vnode_pager_setsize(vp, np->n_size); } } return (error); }
/* * nwfs_statfs call */ int nwfs_statfs(struct mount *mp, struct statfs *sbp, struct ucred *cred) { struct nwmount *nmp = VFSTONWFS(mp); int error = 0, secsize; struct nwnode *np = nmp->n_root; struct ncp_volume_info vi; if (np == NULL) return EINVAL; error = ncp_get_volume_info_with_number(NWFSTOCONN(nmp), nmp->n_volume, &vi, curthread, cred); if (error) return error; secsize = 512; /* XXX how to get real value ??? */ sbp->f_spare2=0; /* placeholder */ /* fundamental file system block size */ sbp->f_bsize = vi.sectors_per_block*secsize; /* optimal transfer block size */ sbp->f_iosize = NWFSTOCONN(nmp)->buffer_size; /* total data blocks in file system */ sbp->f_blocks= vi.total_blocks; /* free blocks in fs */ sbp->f_bfree = vi.free_blocks + vi.purgeable_blocks; /* free blocks avail to non-superuser */ sbp->f_bavail= vi.free_blocks+vi.purgeable_blocks; /* total file nodes in file system */ sbp->f_files = vi.total_dir_entries; /* free file nodes in fs */ sbp->f_ffree = vi.available_dir_entries; sbp->f_flags = 0; /* copy of mount exported flags */ if (sbp != &mp->mnt_stat) { sbp->f_fsid = mp->mnt_stat.f_fsid; /* file system id */ sbp->f_owner = mp->mnt_stat.f_owner; /* user that mounted the filesystem */ sbp->f_type = mp->mnt_vfc->vfc_typenum; /* type of filesystem */ bcopy(mp->mnt_stat.f_mntfromname, sbp->f_mntfromname, MNAMELEN); } strncpy(sbp->f_fstypename, mp->mnt_vfc->vfc_name, MFSNAMELEN); return 0; }
/* * If both dir and name are NULL, then in target there's already a looked-up * entry that wants to be opened. */ int ncp_open_create_file_or_subdir(struct nwmount *nmp,struct vnode *dvp,int namelen, char *name, int open_create_mode, u_int32_t create_attributes, int desired_acc_rights, struct ncp_open_info *nop, struct thread *td,struct ucred *cred) { struct ncp_conn *conn=NWFSTOCONN(nmp); struct ncp_rq *rqp; u_int16_t search_attribs = SA_ALL & (~SA_SUBDIR_FILES); u_int8_t volnum; u_int32_t dirent; int error; error = ncp_rq_alloc(87, conn, td, cred, &rqp); if (error) return error; volnum = nmp->n_volume; dirent = VTONW(dvp)->n_fid.f_id; if ((create_attributes & aDIR) != 0) { search_attribs |= SA_SUBDIR_FILES; } mb_put_uint8(&rqp->rq, 1);/* subfunction */ mb_put_uint8(&rqp->rq, nmp->name_space); mb_put_uint8(&rqp->rq, open_create_mode); mb_put_uint16le(&rqp->rq, search_attribs); mb_put_uint32le(&rqp->rq, IM_ALL); mb_put_uint32le(&rqp->rq, create_attributes); /* * The desired acc rights seem to be the inherited rights mask for * directories */ mb_put_uint16le(&rqp->rq, desired_acc_rights); ncp_rq_dbase_path(rqp, volnum, dirent, namelen, name, &nmp->m.nls); error = ncp_request(rqp); if (error) { if (error == NWE_FILE_NO_CREATE_PRIV) error = EACCES; return error; } md_get_uint32le(&rqp->rp, &nop->origfh); md_get_uint8(&rqp->rp, &nop->action); md_get_uint8(&rqp->rp, NULL); /* skip */ error = ncp_extract_file_info(nmp, rqp, &nop->fattr, 1); ncp_rq_done(rqp); ConvertToNWfromDWORD(nop->origfh, &nop->fh); return error; }
/* * Flush and invalidate all dirty buffers. If another process is already * doing the flush, just wait for completion. */ int nwfs_vinvalbuf(struct vnode *vp, int flags, int intrflg) { struct nwnode *np = VTONW(vp); /* struct nwmount *nmp = VTONWFS(vp);*/ int error = 0, slpflag, slptimeo; if (vp->v_flag & VRECLAIMED) { return (0); } if (intrflg) { slpflag = PCATCH; slptimeo = 2 * hz; } else { slpflag = 0; slptimeo = 0; } while (np->n_flag & NFLUSHINPROG) { np->n_flag |= NFLUSHWANT; error = tsleep((caddr_t)&np->n_flag, 0, "nwfsvinv", slptimeo); error = ncp_chkintr(NWFSTOCONN(VTONWFS(vp)), curthread); if (error == EINTR && intrflg) return EINTR; } np->n_flag |= NFLUSHINPROG; error = vinvalbuf(vp, flags, slpflag, 0); while (error) { if (intrflg && (error == ERESTART || error == EINTR)) { np->n_flag &= ~NFLUSHINPROG; if (np->n_flag & NFLUSHWANT) { np->n_flag &= ~NFLUSHWANT; wakeup((caddr_t)&np->n_flag); } return EINTR; } error = vinvalbuf(vp, flags, slpflag, 0); } np->n_flag &= ~(NMODIFIED | NFLUSHINPROG); if (np->n_flag & NFLUSHWANT) { np->n_flag &= ~NFLUSHWANT; wakeup((caddr_t)&np->n_flag); } return (error); }
/* * nwfs_create call * Create a regular file. On entry the directory to contain the file being * created is locked. We must release before we return. * * nwfs_create(struct vnode *a_dvp, struct vnode **a_vpp, * struct componentname *a_cnpl, struct vattr *a_vap) */ static int nwfs_create(struct vop_old_create_args *ap) { struct vnode *dvp = ap->a_dvp; struct vattr *vap = ap->a_vap; struct vnode **vpp=ap->a_vpp; struct componentname *cnp = ap->a_cnp; struct vnode *vp = NULL; int error = 0, fmode; struct vattr vattr; struct nwnode *np; struct ncp_open_info no; struct nwmount *nmp=VTONWFS(dvp); ncpfid fid; NCPVNDEBUG("\n"); *vpp = NULL; if (vap->va_type == VSOCK) return (EOPNOTSUPP); if ((error = VOP_GETATTR(dvp, &vattr))) { return (error); } fmode = AR_READ | AR_WRITE; /* if (vap->va_vaflags & VA_EXCLUSIVE) fmode |= AR_DENY_READ | AR_DENY_WRITE;*/ error = ncp_open_create_file_or_subdir(nmp,dvp,cnp->cn_namelen,cnp->cn_nameptr, OC_MODE_CREATE | OC_MODE_OPEN | OC_MODE_REPLACE, 0, fmode, &no, cnp->cn_td, cnp->cn_cred); if (!error) { error = ncp_close_file(NWFSTOCONN(nmp), &no.fh, cnp->cn_td,cnp->cn_cred); fid.f_parent = VTONW(dvp)->n_fid.f_id; fid.f_id = no.fattr.dirEntNum; error = nwfs_nget(VTOVFS(dvp), fid, &no.fattr, dvp, &vp); if (!error) { np = VTONW(vp); np->opened = 0; *vpp = vp; } } return (error); }
int ncp_DeleteNSEntry(struct nwmount *nmp, u_int32_t dirent, int namelen,char *name,struct thread *td,struct ucred *cred) { struct ncp_rq *rqp; int error; struct ncp_conn *conn=NWFSTOCONN(nmp); error = ncp_rq_alloc(87, conn, td, cred, &rqp); if (error) return error; mb_put_uint8(&rqp->rq, 8); /* subfunction */ mb_put_uint8(&rqp->rq, nmp->name_space); mb_put_uint8(&rqp->rq, 0); /* reserved */ mb_put_uint16le(&rqp->rq, SA_ALL); /* search attribs: all */ ncp_rq_dbase_path(rqp, nmp->n_volume, dirent, namelen, name, &nmp->m.nls); error = ncp_request(rqp); if (!error) ncp_rq_done(rqp); return error; }
/* * nwfs_ioctl(struct vnode *a_vp, u_long a_command, caddr_t a_data, * int a_fflag, struct ucred *a_cred) */ int nwfs_ioctl(struct vop_ioctl_args *ap) { int error; struct thread *td = curthread; /* XXX */ struct ucred *cred = ap->a_cred; struct vnode *vp = ap->a_vp; struct nwnode *np = VTONW(vp); struct nwmount *nmp = VTONWFS(vp); struct ncp_conn *conn = NWFSTOCONN(nmp); struct ncp_handle *hp; struct nw_entry_info *fap; void *data = ap->a_data; switch (ap->a_command) { case NWFSIOC_GETCONN: error = ncp_conn_lock(conn, td, cred, NCPM_READ); if (error) break; error = ncp_conn_gethandle(conn, td, &hp); ncp_conn_unlock(conn, td); if (error) break; *(int*)data = hp->nh_id; break; case NWFSIOC_GETEINFO: if ((error = VOP_EACCESS(vp, VEXEC, cred))) break; fap = data; error = ncp_obtain_info(nmp, np->n_fid.f_id, 0, NULL, fap, td, ap->a_cred); strcpy(fap->entryName, np->n_name); fap->nameLen = np->n_nmlen; break; case NWFSIOC_GETNS: if ((error = VOP_EACCESS(vp, VEXEC, cred))) break; *(int*)data = nmp->name_space; break; default: error = EINVAL; } return (error); }
/* * nwfs_remove call. It isn't possible to emulate UFS behaivour because * NetWare doesn't allow delete/rename operations on an opened file. * * nwfs_remove(struct vnode *a_dvp, * struct vnode *a_vp, struct componentname *a_cnp) */ static int nwfs_remove(struct vop_old_remove_args *ap) { struct vnode *vp = ap->a_vp; struct vnode *dvp = ap->a_dvp; struct componentname *cnp = ap->a_cnp; struct nwnode *np = VTONW(vp); struct nwmount *nmp = VTONWFS(vp); int error; if (vp->v_type == VDIR || np->opened || VREFCNT(vp) > 1) { error = EPERM; } else if (!ncp_conn_valid(NWFSTOCONN(nmp))) { error = EIO; } else { error = ncp_DeleteNSEntry(nmp, VTONW(dvp)->n_fid.f_id, cnp->cn_namelen,cnp->cn_nameptr,cnp->cn_td,cnp->cn_cred); if (error == 0) np->n_flag |= NSHOULDFREE; else if (error == 0x899c) error = EACCES; } return (error); }
/* * Do an I/O operation to/from a cache block. */ int nwfs_doio(struct vnode *vp, struct bio *bio, struct ucred *cr, struct thread *td) { struct buf *bp = bio->bio_buf; struct uio *uiop; struct nwnode *np; struct nwmount *nmp; int error = 0; struct uio uio; struct iovec io; np = VTONW(vp); nmp = VFSTONWFS(vp->v_mount); uiop = &uio; uiop->uio_iov = &io; uiop->uio_iovcnt = 1; uiop->uio_segflg = UIO_SYSSPACE; uiop->uio_td = td; if (bp->b_cmd == BUF_CMD_READ) { io.iov_len = uiop->uio_resid = (size_t)bp->b_bcount; io.iov_base = bp->b_data; uiop->uio_rw = UIO_READ; switch (vp->v_type) { case VREG: uiop->uio_offset = bio->bio_offset; error = ncp_read(NWFSTOCONN(nmp), &np->n_fh, uiop, cr); if (error) break; if (uiop->uio_resid) { size_t left = uiop->uio_resid; size_t nread = bp->b_bcount - left; if (left > 0) bzero((char *)bp->b_data + nread, left); } break; /* case VDIR: nfsstats.readdir_bios++; uiop->uio_offset = bio->bio_offset; if (nmp->nm_flag & NFSMNT_RDIRPLUS) { error = nfs_readdirplusrpc(vp, uiop, cr); if (error == NFSERR_NOTSUPP) nmp->nm_flag &= ~NFSMNT_RDIRPLUS; } if ((nmp->nm_flag & NFSMNT_RDIRPLUS) == 0) error = nfs_readdirrpc(vp, uiop, cr); if (error == 0 && uiop->uio_resid == (size_t)bp->b_bcount) bp->b_flags |= B_INVAL; break; */ default: kprintf("nwfs_doio: type %x unexpected\n",vp->v_type); break; } if (error) { bp->b_flags |= B_ERROR; bp->b_error = error; } } else { /* write */ KKASSERT(bp->b_cmd == BUF_CMD_WRITE); if (bio->bio_offset + bp->b_dirtyend > np->n_size) bp->b_dirtyend = np->n_size - bio->bio_offset; if (bp->b_dirtyend > bp->b_dirtyoff) { io.iov_len = uiop->uio_resid = (size_t)(bp->b_dirtyend - bp->b_dirtyoff); uiop->uio_offset = bio->bio_offset + bp->b_dirtyoff; io.iov_base = (char *)bp->b_data + bp->b_dirtyoff; uiop->uio_rw = UIO_WRITE; error = ncp_write(NWFSTOCONN(nmp), &np->n_fh, uiop, cr); /* * For an interrupted write, the buffer is still valid * and the write hasn't been pushed to the server yet, * so we can't set B_ERROR and report the interruption * by setting B_EINTR. For the async case, B_EINTR * is not relevant, so the rpc attempt is essentially * a noop. For the case of a V3 write rpc not being * committed to stable storage, the block is still * dirty and requires either a commit rpc or another * write rpc with iomode == NFSV3WRITE_FILESYNC before * the block is reused. This is indicated by setting * the B_DELWRI and B_NEEDCOMMIT flags. */ if (error == EINTR || (!error && (bp->b_flags & B_NEEDCOMMIT))) { crit_enter(); bp->b_flags &= ~(B_INVAL|B_NOCACHE); if ((bp->b_flags & B_PAGING) == 0) bdirty(bp); bp->b_flags |= B_EINTR; crit_exit(); } else { if (error) { bp->b_flags |= B_ERROR; bp->b_error /*= np->n_error */= error; /* np->n_flag |= NWRITEERR;*/ } bp->b_dirtyoff = bp->b_dirtyend = 0; } } else { bp->b_resid = 0; biodone(bio); return (0); } } bp->b_resid = (int)uiop->uio_resid; biodone(bio); return (error); }
/* Return locked vnode to root of a filesystem */ static int nwfs_root(struct mount *mp, struct vnode **vpp) { struct vnode *vp; struct nwmount *nmp; struct nwnode *np; struct ncp_conn *conn; struct nw_entry_info fattr; struct thread *td = curthread; /* XXX */ struct ucred *cred; int error, nsf, opt; u_char vol; KKASSERT(td->td_proc); cred = td->td_proc->p_ucred; nmp = VFSTONWFS(mp); conn = NWFSTOCONN(nmp); if (nmp->n_root) { *vpp = NWTOV(nmp->n_root); while (vget(*vpp, LK_EXCLUSIVE) != 0) /* XXX */ ; return 0; } error = ncp_lookup_volume(conn, nmp->m.mounted_vol, &vol, &nmp->n_rootent.f_id, td, cred); if (error) return ENOENT; nmp->n_volume = vol; error = ncp_get_namespaces(conn, vol, &nsf, td, cred); if (error) return ENOENT; if (nsf & NW_NSB_OS2) { NCPVODEBUG("volume %s has os2 namespace\n",nmp->m.mounted_vol); if ((nmp->m.flags & NWFS_MOUNT_NO_OS2) == 0) { nmp->name_space = NW_NS_OS2; nmp->m.nls.opt &= ~NWHP_DOS; } } opt = nmp->m.nls.opt; nsf = opt & (NWHP_UPPER | NWHP_LOWER); if (opt & NWHP_DOS) { if (nsf == (NWHP_UPPER | NWHP_LOWER)) { nmp->m.nls.opt &= ~(NWHP_LOWER | NWHP_UPPER); } else if (nsf == 0) { nmp->m.nls.opt |= NWHP_LOWER; } } else { if (nsf == (NWHP_UPPER | NWHP_LOWER)) { nmp->m.nls.opt &= ~(NWHP_LOWER | NWHP_UPPER); } } if (nmp->m.root_path[0]) { nmp->m.root_path[0]--; error = ncp_obtain_info(nmp, nmp->n_rootent.f_id, -nmp->m.root_path[0], nmp->m.root_path, &fattr, td, cred); if (error) { NCPFATAL("Invalid root path specified\n"); return ENOENT; } nmp->n_rootent.f_parent = fattr.dirEntNum; nmp->m.root_path[0]++; error = ncp_obtain_info(nmp, nmp->n_rootent.f_id, -nmp->m.root_path[0], nmp->m.root_path, &fattr, td, cred); if (error) { NCPFATAL("Invalid root path specified\n"); return ENOENT; } nmp->n_rootent.f_id = fattr.dirEntNum; } else { error = ncp_obtain_info(nmp, nmp->n_rootent.f_id, 0, NULL, &fattr, td, cred); if (error) { NCPFATAL("Can't obtain volume info\n"); return ENOENT; } fattr.nameLen = strlen(strcpy(fattr.entryName, NWFS_ROOTVOL)); nmp->n_rootent.f_parent = nmp->n_rootent.f_id; } error = nwfs_nget(mp, nmp->n_rootent, &fattr, NULL, &vp); if (error) return (error); vsetflags(vp, VROOT); np = VTONW(vp); if (nmp->m.root_path[0] == 0) np->n_flag |= NVOLUME; nmp->n_root = np; /* error = VOP_GETATTR(vp, &vattr); if (error) { vput(vp); NCPFATAL("Can't get root directory entry\n"); return error; }*/ *vpp = vp; return (0); }
/* * Vnode op for VM putpages. * possible bug: all IO done in sync mode * Note that vop_close always invalidate pages before close, so it's * not necessary to open vnode. * * nwfs_putpages(struct vnode *a_vp, vm_page_t *a_m, int a_count, * int a_sync, int *a_rtvals, vm_ooffset_t a_offset) */ int nwfs_putpages(struct vop_putpages_args *ap) { int error; struct thread *td = curthread; /* XXX */ struct vnode *vp = ap->a_vp; struct ucred *cred; #ifndef NWFS_RWCACHE KKASSERT(td->td_proc); cred = td->td_proc->p_ucred; /* XXX */ VOP_OPEN(vp, FWRITE, cred, NULL); error = vnode_pager_generic_putpages(ap->a_vp, ap->a_m, ap->a_count, ap->a_sync, ap->a_rtvals); VOP_CLOSE(vp, FWRITE, cred); return error; #else struct uio uio; struct iovec iov; vm_offset_t kva; struct buf *bp; int i, npages, count; int *rtvals; struct nwmount *nmp; struct nwnode *np; vm_page_t *pages; KKASSERT(td->td_proc); cred = td->td_proc->p_ucred; /* XXX */ /* VOP_OPEN(vp, FWRITE, cred, NULL);*/ np = VTONW(vp); nmp = VFSTONWFS(vp->v_mount); pages = ap->a_m; count = ap->a_count; rtvals = ap->a_rtvals; npages = btoc(count); for (i = 0; i < npages; i++) { rtvals[i] = VM_PAGER_AGAIN; } bp = getpbuf_kva(&nwfs_pbuf_freecnt); kva = (vm_offset_t) bp->b_data; pmap_qenter(kva, pages, npages); iov.iov_base = (caddr_t) kva; iov.iov_len = count; uio.uio_iov = &iov; uio.uio_iovcnt = 1; uio.uio_offset = IDX_TO_OFF(pages[0]->pindex); uio.uio_resid = count; uio.uio_segflg = UIO_SYSSPACE; uio.uio_rw = UIO_WRITE; uio.uio_td = td; NCPVNDEBUG("ofs=%d,resid=%d\n",(int)uio.uio_offset, uio.uio_resid); error = ncp_write(NWFSTOCONN(nmp), &np->n_fh, &uio, cred); /* VOP_CLOSE(vp, FWRITE, cred);*/ NCPVNDEBUG("paged write done: %d\n", error); pmap_qremove(kva, npages); relpbuf(bp, &nwfs_pbuf_freecnt); if (!error) { int nwritten = round_page(count - uio.uio_resid) / PAGE_SIZE; for (i = 0; i < nwritten; i++) { rtvals[i] = VM_PAGER_OK; vm_page_undirty(pages[i]); } } return rtvals[0]; #endif /* NWFS_RWCACHE */ }
/* * nwfs_file rename call * * nwfs_rename(struct vnode *a_fdvp, struct vnode *a_fvp, * struct componentname *a_fcnp, struct vnode *a_tdvp, * struct vnode *a_tvp, struct componentname *a_tcnp) */ static int nwfs_rename(struct vop_old_rename_args *ap) { struct vnode *fvp = ap->a_fvp; struct vnode *tvp = ap->a_tvp; struct vnode *fdvp = ap->a_fdvp; struct vnode *tdvp = ap->a_tdvp; struct componentname *tcnp = ap->a_tcnp; struct componentname *fcnp = ap->a_fcnp; struct nwmount *nmp=VTONWFS(fvp); u_int16_t oldtype = 6; int error=0; /* Check for cross-device rename */ if ((fvp->v_mount != tdvp->v_mount) || (tvp && (fvp->v_mount != tvp->v_mount))) { error = EXDEV; goto out; } if (tvp && VREFCNT(tvp) > 1) { error = EBUSY; goto out; } if (tvp && tvp != fvp) { error = ncp_DeleteNSEntry(nmp, VTONW(tdvp)->n_fid.f_id, tcnp->cn_namelen, tcnp->cn_nameptr, tcnp->cn_td, tcnp->cn_cred); if (error == 0x899c) error = EACCES; if (error) goto out; } if (fvp->v_type == VDIR) { oldtype |= NW_TYPE_SUBDIR; } else if (fvp->v_type == VREG) { oldtype |= NW_TYPE_FILE; } else return EINVAL; error = ncp_nsrename(NWFSTOCONN(nmp), nmp->n_volume, nmp->name_space, oldtype, &nmp->m.nls, VTONW(fdvp)->n_fid.f_id, fcnp->cn_nameptr, fcnp->cn_namelen, VTONW(tdvp)->n_fid.f_id, tcnp->cn_nameptr, tcnp->cn_namelen, tcnp->cn_td,tcnp->cn_cred); if (error == 0x8992) error = EEXIST; out: if (tdvp == tvp) vrele(tdvp); else vput(tdvp); if (tvp) vput(tvp); vrele(fdvp); vrele(fvp); nwfs_attr_cacheremove(fdvp); nwfs_attr_cacheremove(tdvp); /* * Need to get rid of old vnodes, because netware will change * file id on rename */ vgone_vxlocked(fvp); if (tvp) vgone_vxlocked(tvp); /* * Kludge: Map ENOENT => 0 assuming that it is a reply to a retry. */ if (error == ENOENT) error = 0; return (error); }
/* * Vnode op for VM getpages. * Wish wish .... get rid from multiple IO routines * * nwfs_getpages(struct vnode *a_vp, vm_page_t *a_m, int a_count, * int a_reqpage, vm_ooffset_t a_offset) */ int nwfs_getpages(struct vop_getpages_args *ap) { #ifndef NWFS_RWCACHE return vnode_pager_generic_getpages(ap->a_vp, ap->a_m, ap->a_count, ap->a_reqpage, ap->a_seqaccess); #else int i, error, npages; size_t nextoff, toff; size_t count; size_t size; struct uio uio; struct iovec iov; vm_offset_t kva; struct buf *bp; struct vnode *vp; struct thread *td = curthread; /* XXX */ struct ucred *cred; struct nwmount *nmp; struct nwnode *np; vm_page_t *pages; KKASSERT(td->td_proc); cred = td->td_proc->p_ucred; vp = ap->a_vp; np = VTONW(vp); nmp = VFSTONWFS(vp->v_mount); pages = ap->a_m; count = (size_t)ap->a_count; if (vp->v_object == NULL) { kprintf("nwfs_getpages: called with non-merged cache vnode??\n"); return VM_PAGER_ERROR; } bp = getpbuf_kva(&nwfs_pbuf_freecnt); npages = btoc(count); kva = (vm_offset_t) bp->b_data; pmap_qenter(kva, pages, npages); iov.iov_base = (caddr_t) kva; iov.iov_len = count; uio.uio_iov = &iov; uio.uio_iovcnt = 1; uio.uio_offset = IDX_TO_OFF(pages[0]->pindex); uio.uio_resid = count; uio.uio_segflg = UIO_SYSSPACE; uio.uio_rw = UIO_READ; uio.uio_td = td; error = ncp_read(NWFSTOCONN(nmp), &np->n_fh, &uio,cred); pmap_qremove(kva, npages); relpbuf(bp, &nwfs_pbuf_freecnt); if (error && (uio.uio_resid == count)) { kprintf("nwfs_getpages: error %d\n",error); for (i = 0; i < npages; i++) { if (ap->a_reqpage != i) vnode_pager_freepage(pages[i]); } return VM_PAGER_ERROR; } size = count - uio.uio_resid; for (i = 0, toff = 0; i < npages; i++, toff = nextoff) { vm_page_t m; nextoff = toff + PAGE_SIZE; m = pages[i]; m->flags &= ~PG_ZERO; /* * NOTE: pmap dirty bit should have already been cleared. * We do not clear it here. */ if (nextoff <= size) { m->valid = VM_PAGE_BITS_ALL; m->dirty = 0; } else { int nvalid = ((size + DEV_BSIZE - 1) - toff) & ~(DEV_BSIZE - 1); vm_page_set_validclean(m, 0, nvalid); } if (i != ap->a_reqpage) { /* * Whether or not to leave the page activated is up in * the air, but we should put the page on a page queue * somewhere (it already is in the object). Result: * It appears that emperical results show that * deactivating pages is best. */ /* * Just in case someone was asking for this page we * now tell them that it is ok to use. */ if (!error) { if (m->flags & PG_REFERENCED) vm_page_activate(m); else vm_page_deactivate(m); vm_page_wakeup(m); } else { vnode_pager_freepage(m); } } } return 0; #endif /* NWFS_RWCACHE */ }