STATIC int linvfs_readlink( struct dentry *dentry, char *buf, int size) { vnode_t *vp = LINVFS_GET_VP(dentry->d_inode); uio_t uio; iovec_t iov; int error; iov.iov_base = buf; iov.iov_len = size; uio.uio_iov = &iov; uio.uio_offset = 0; uio.uio_segflg = UIO_USERSPACE; uio.uio_resid = size; uio.uio_iovcnt = 1; VOP_READLINK(vp, &uio, 0, NULL, error); if (error) return -error; return (size - uio.uio_resid); }
int #endif vnode_iop_follow_link( DENT_T *dentry, /* link */ struct nameidata *nd /* link resolution */ ) { INODE_T *ip; struct uio uio; iovec_t iov; int err = 0; char *buf = KMEM_ALLOC(PATH_MAX, KM_SLEEP); CALL_DATA_T cd; if (buf == NULL) #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13) return ERR_PTR(-ENOMEM); #else return -ENOMEM; #endif uio.uio_iov = &iov; mfs_uioset(&uio, buf, PATH_MAX-1, 0, UIO_SYSSPACE); mdki_linux_init_call_data(&cd); ip = dentry->d_inode; ASSERT_KERNEL_UNLOCKED(); ASSERT_I_SEM_NOT_MINE(ip); err = VOP_READLINK(ITOV(ip), &uio, &cd); err = mdki_errno_unix_to_linux(err); mdki_linux_destroy_call_data(&cd); if (err == 0) { if (uio.uio_resid == 0) err = -ENAMETOOLONG; else { /* readlink doesn't copy a NUL at the end, we must do it */ buf[uio.uio_offset] = '\0'; #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13) /* follow the link */ err = vfs_follow_link(nd, buf); #else nd_set_link(nd, buf); return(buf); /* vnop_iop_put_link() will free this buf. */ #endif } } KMEM_FREE(buf, PATH_MAX); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,13) return ERR_PTR(err); #else return(err); #endif }
/* * Read the contents of a symbolic link. */ ssize_t readlink(char *name, char *buf, size_t count) { vnode_t *vp; struct iovec aiov; struct uio auio; int error; struct vattr vattr; ssize_t cnt; int estale_retry = 0; if ((cnt = (ssize_t)count) < 0) return (set_errno(EINVAL)); lookup: if (error = lookupname(name, UIO_USERSPACE, NO_FOLLOW, NULLVPP, &vp)) { if ((error == ESTALE) && fs_need_estale_retry(estale_retry++)) goto lookup; return (set_errno(error)); } if (vp->v_type != VLNK) { /* * Ask the underlying filesystem if it wants this * object to look like a symlink at user-level. */ vattr.va_mask = AT_TYPE; error = VOP_GETATTR(vp, &vattr, 0, CRED(), NULL); if (error || vattr.va_type != VLNK) { VN_RELE(vp); if ((error == ESTALE) && fs_need_estale_retry(estale_retry++)) goto lookup; return (set_errno(EINVAL)); } } aiov.iov_base = buf; aiov.iov_len = cnt; auio.uio_iov = &aiov; auio.uio_iovcnt = 1; auio.uio_loffset = 0; auio.uio_segflg = UIO_USERSPACE; auio.uio_extflg = UIO_COPY_CACHED; auio.uio_resid = cnt; error = VOP_READLINK(vp, &auio, CRED(), NULL); VN_RELE(vp); if (error) { if ((error == ESTALE) && fs_need_estale_retry(estale_retry++)) goto lookup; return (set_errno(error)); } return ((ssize_t)(cnt - auio.uio_resid)); }
int RUMP_VOP_READLINK(struct vnode *vp, struct uio *uio, struct kauth_cred *cred) { int error; rump_schedule(); error = VOP_READLINK(vp, uio, cred); rump_unschedule(); return error; }
static int zfsfuse_readlink(fuse_req_t req, fuse_ino_t ino) { vfs_t *vfs = (vfs_t *) fuse_req_userdata(req); zfsvfs_t *zfsvfs = vfs->vfs_data; ZFS_ENTER(zfsvfs); znode_t *znode; int error = zfs_zget(zfsvfs, ino, &znode, B_FALSE); if(error) { ZFS_EXIT(zfsvfs); /* If the inode we are trying to get was recently deleted dnode_hold_impl will return EEXIST instead of ENOENT */ return error == EEXIST ? ENOENT : error; } ASSERT(znode != NULL); vnode_t *vp = ZTOV(znode); ASSERT(vp != NULL); char buffer[PATH_MAX + 1]; iovec_t iovec; uio_t uio; uio.uio_iov = &iovec; uio.uio_iovcnt = 1; uio.uio_segflg = UIO_SYSSPACE; uio.uio_fmode = 0; uio.uio_llimit = RLIM64_INFINITY; iovec.iov_base = buffer; iovec.iov_len = sizeof(buffer) - 1; uio.uio_resid = iovec.iov_len; uio.uio_loffset = 0; cred_t cred; zfsfuse_getcred(req, &cred); error = VOP_READLINK(vp, &uio, &cred, NULL); VN_RELE(vp); ZFS_EXIT(zfsvfs); if(!error) { VERIFY(uio.uio_loffset < sizeof(buffer)); buffer[uio.uio_loffset] = '\0'; fuse_reply_readlink(req, buffer); } return error; }
/* * careful here - this function can get called recursively, so * we need to be very careful about how much stack we use. * uio is kmalloced for this reason... */ STATIC int linvfs_follow_link( struct dentry *dentry, struct nameidata *nd) { vnode_t *vp; uio_t *uio; iovec_t iov; int error; char *link; ASSERT(dentry); ASSERT(nd); link = (char *)kmalloc(MAXNAMELEN+1, GFP_KERNEL); if (!link) return -ENOMEM; uio = (uio_t *)kmalloc(sizeof(uio_t), GFP_KERNEL); if (!uio) { kfree(link); return -ENOMEM; } vp = LINVFS_GET_VP(dentry->d_inode); iov.iov_base = link; iov.iov_len = MAXNAMELEN; uio->uio_iov = &iov; uio->uio_offset = 0; uio->uio_segflg = UIO_SYSSPACE; uio->uio_resid = MAXNAMELEN; uio->uio_iovcnt = 1; VOP_READLINK(vp, uio, 0, NULL, error); if (error) { kfree(uio); kfree(link); return -error; } link[MAXNAMELEN - uio->uio_resid] = '\0'; kfree(uio); /* vfs_follow_link returns (-) errors */ error = vfs_follow_link(nd, link); kfree(link); return error; }
/* * careful here - this function can get called recursively, so * we need to be very careful about how much stack we use. * uio is kmalloced for this reason... */ STATIC void * linvfs_follow_link( struct dentry *dentry, struct nameidata *nd) { vnode_t *vp; uio_t *uio; iovec_t iov; int error; char *link; ASSERT(dentry); ASSERT(nd); link = (char *)kmalloc(MAXPATHLEN+1, GFP_KERNEL); if (!link) { nd_set_link(nd, ERR_PTR(-ENOMEM)); return NULL; } uio = (uio_t *)kmalloc(sizeof(uio_t), GFP_KERNEL); if (!uio) { kfree(link); nd_set_link(nd, ERR_PTR(-ENOMEM)); return NULL; } vp = LINVFS_GET_VP(dentry->d_inode); iov.iov_base = link; iov.iov_len = MAXPATHLEN; uio->uio_iov = &iov; uio->uio_offset = 0; uio->uio_segflg = UIO_SYSSPACE; uio->uio_resid = MAXPATHLEN; uio->uio_iovcnt = 1; VOP_READLINK(vp, uio, 0, NULL, error); if (error) { kfree(link); link = ERR_PTR(-error); } else { link[MAXPATHLEN - uio->uio_resid] = '\0'; } kfree(uio); nd_set_link(nd, link); return NULL; }
STATIC int xfs_readlink_by_handle( xfs_mount_t *mp, void __user *arg, struct file *parfilp, struct inode *parinode) { int error; struct iovec aiov; struct uio auio; struct inode *inode; xfs_fsop_handlereq_t hreq; vnode_t *vp; __u32 olen; if (!capable(CAP_SYS_ADMIN)) return -XFS_ERROR(EPERM); if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t))) return -XFS_ERROR(EFAULT); error = xfs_vget_fsop_handlereq(mp, parinode, &hreq, &vp, &inode); if (error) return -error; /* Restrict this handle operation to symlinks only. */ if (!S_ISLNK(inode->i_mode)) { VN_RELE(vp); return -XFS_ERROR(EINVAL); } if (copy_from_user(&olen, hreq.ohandlen, sizeof(__u32))) { VN_RELE(vp); return -XFS_ERROR(EFAULT); } aiov.iov_len = olen; aiov.iov_base = hreq.ohandle; auio.uio_iov = &aiov; auio.uio_iovcnt = 1; auio.uio_offset = 0; auio.uio_segflg = UIO_USERSPACE; auio.uio_resid = olen; VOP_READLINK(vp, &auio, IO_INVIS, NULL, error); VN_RELE(vp); return (olen - auio.uio_resid); }
/* * Read the contents of a symlink, allocate a path buffer out of the * namei_oc and initialize the supplied nlcomponent with the result. * * If an error occurs no buffer will be allocated or returned in the nlc. */ int nreadsymlink(struct nlookupdata *nd, struct nchandle *nch, struct nlcomponent *nlc) { struct vnode *vp; struct iovec aiov; struct uio auio; int linklen; int error; char *cp; nlc->nlc_nameptr = NULL; nlc->nlc_namelen = 0; if (nch->ncp->nc_vp == NULL) return(ENOENT); if ((error = cache_vget(nch, nd->nl_cred, LK_SHARED, &vp)) != 0) return(error); cp = objcache_get(namei_oc, M_WAITOK); aiov.iov_base = cp; aiov.iov_len = MAXPATHLEN; auio.uio_iov = &aiov; auio.uio_iovcnt = 1; auio.uio_offset = 0; auio.uio_rw = UIO_READ; auio.uio_segflg = UIO_SYSSPACE; auio.uio_td = nd->nl_td; auio.uio_resid = MAXPATHLEN - 1; error = VOP_READLINK(vp, &auio, nd->nl_cred); if (error) goto fail; linklen = MAXPATHLEN - 1 - auio.uio_resid; if (varsym_enable) { linklen = varsymreplace(cp, linklen, MAXPATHLEN - 1); if (linklen < 0) { error = ENAMETOOLONG; goto fail; } } cp[linklen] = 0; nlc->nlc_nameptr = cp; nlc->nlc_namelen = linklen; vput(vp); return(0); fail: objcache_put(namei_oc, cp); vput(vp); return(error); }
/* * Does most of the work for readlink(). * * Note, however, if you're implementing symlinks, that various * other parts of the VFS layer are missing crucial elements of * support for symlinks. */ int vfs_readlink(char *path, struct uio *uio) { struct vnode *vn; int result; result = vfs_lookup(path, &vn); if (result) { return result; } result = VOP_READLINK(vn, uio); VOP_DECREF(vn); return result; }
static int unionfs_readlink(void *v) { struct vop_readlink_args *ap = v; int error; struct unionfs_node *unp; struct vnode *vp; UNIONFS_INTERNAL_DEBUG("unionfs_readlink: enter\n"); unp = VTOUNIONFS(ap->a_vp); vp = (unp->un_uppervp != NULLVP ? unp->un_uppervp : unp->un_lowervp); error = VOP_READLINK(vp, ap->a_uio, ap->a_cred); UNIONFS_INTERNAL_DEBUG("unionfs_readlink: leave (%d)\n", error); return (error); }
int pn_getsymlink(vnode_t *vp, struct pathname *pnp, cred_t *crp) { struct iovec aiov; struct uio auio; int error; aiov.iov_base = pnp->pn_path = pnp->pn_buf; aiov.iov_len = pnp->pn_bufsize; auio.uio_iov = &aiov; auio.uio_iovcnt = 1; auio.uio_loffset = 0; auio.uio_segflg = UIO_SYSSPACE; auio.uio_extflg = UIO_COPY_CACHED; auio.uio_resid = pnp->pn_bufsize; if ((error = VOP_READLINK(vp, &auio, crp, NULL)) == 0) { pnp->pn_pathlen = pnp->pn_bufsize - auio.uio_resid; if (pnp->pn_pathlen == pnp->pn_bufsize) error = ENAMETOOLONG; else pnp->pn_path[pnp->pn_pathlen] = '\0'; } return (error); }
extern int vnode_iop_readlink( DENT_T *dentry, char * buf, int buflen ) { INODE_T *ip; struct uio uio; iovec_t iov; int err = 0; CALL_DATA_T cd; /* * This routine is not called for shadow objects which need * special handling; they're done in shadow_readlink. */ uio.uio_iov = &iov; mdki_linux_uioset(&uio, buf, buflen, 0, UIO_USERSPACE); mdki_linux_init_call_data(&cd); ip = dentry->d_inode; ASSERT_KERNEL_UNLOCKED(); ASSERT_I_SEM_NOT_MINE(ip); err = VOP_READLINK(ITOV(ip), &uio, &cd); err = mdki_errno_unix_to_linux(err); mdki_linux_destroy_call_data(&cd); if (err == 0) { /* return count of bytes */ err = buflen - uio.uio_resid; } return(err); }
/* * Set up nameidata for a lookup() call and do it. * * If pubflag is set, this call is done for a lookup operation on the * public filehandle. In that case we allow crossing mountpoints and * absolute pathnames. However, the caller is expected to check that * the lookup result is within the public fs, and deny access if * it is not. * * nfs_namei() clears out garbage fields that namei() might leave garbage. * This is mainly ni_vp and ni_dvp when an error occurs, and ni_dvp when no * error occurs but the parent was not requested. * * dirp may be set whether an error is returned or not, and must be * released by the caller. */ int nfs_namei(struct nameidata *ndp, struct nfsrv_descript *nfsd, fhandle_t *fhp, int len, struct nfssvc_sock *slp, struct sockaddr *nam, struct mbuf **mdp, caddr_t *dposp, struct vnode **retdirp, int v3, struct vattr *retdirattrp, int *retdirattr_retp, int pubflag) { int i, rem; struct mbuf *md; char *fromcp, *tocp, *cp; struct iovec aiov; struct uio auio; struct vnode *dp; int error, rdonly, linklen; struct componentname *cnp = &ndp->ni_cnd; int lockleaf = (cnp->cn_flags & LOCKLEAF) != 0; *retdirp = NULL; cnp->cn_flags |= NOMACCHECK; cnp->cn_pnbuf = uma_zalloc(namei_zone, M_WAITOK); /* * Copy the name from the mbuf list to ndp->ni_pnbuf * and set the various ndp fields appropriately. */ fromcp = *dposp; tocp = cnp->cn_pnbuf; md = *mdp; rem = mtod(md, caddr_t) + md->m_len - fromcp; for (i = 0; i < len; i++) { while (rem == 0) { md = md->m_next; if (md == NULL) { error = EBADRPC; goto out; } fromcp = mtod(md, caddr_t); rem = md->m_len; } if (*fromcp == '\0' || (!pubflag && *fromcp == '/')) { error = EACCES; goto out; } *tocp++ = *fromcp++; rem--; } *tocp = '\0'; *mdp = md; *dposp = fromcp; len = nfsm_rndup(len)-len; if (len > 0) { if (rem >= len) *dposp += len; else if ((error = nfs_adv(mdp, dposp, len, rem)) != 0) goto out; } if (!pubflag && nfs_ispublicfh(fhp)) return (ESTALE); /* * Extract and set starting directory. */ error = nfsrv_fhtovp(fhp, 0, &dp, nfsd, slp, nam, &rdonly); if (error) goto out; if (dp->v_type != VDIR) { vput(dp); error = ENOTDIR; goto out; } if (rdonly) cnp->cn_flags |= RDONLY; /* * Set return directory. Reference to dp is implicitly transfered * to the returned pointer */ *retdirp = dp; if (v3) { *retdirattr_retp = VOP_GETATTR(dp, retdirattrp, ndp->ni_cnd.cn_cred); } VOP_UNLOCK(dp, 0); if (pubflag) { /* * Oh joy. For WebNFS, handle those pesky '%' escapes, * and the 'native path' indicator. */ cp = uma_zalloc(namei_zone, M_WAITOK); fromcp = cnp->cn_pnbuf; tocp = cp; if ((unsigned char)*fromcp >= WEBNFS_SPECCHAR_START) { switch ((unsigned char)*fromcp) { case WEBNFS_NATIVE_CHAR: /* * 'Native' path for us is the same * as a path according to the NFS spec, * just skip the escape char. */ fromcp++; break; /* * More may be added in the future, range 0x80-0xff */ default: error = EIO; uma_zfree(namei_zone, cp); goto out; } } /* * Translate the '%' escapes, URL-style. */ while (*fromcp != '\0') { if (*fromcp == WEBNFS_ESC_CHAR) { if (fromcp[1] != '\0' && fromcp[2] != '\0') { fromcp++; *tocp++ = HEXSTRTOI(fromcp); fromcp += 2; continue; } else { error = ENOENT; uma_zfree(namei_zone, cp); goto out; } } else *tocp++ = *fromcp++; } *tocp = '\0'; uma_zfree(namei_zone, cnp->cn_pnbuf); cnp->cn_pnbuf = cp; } ndp->ni_pathlen = (tocp - cnp->cn_pnbuf) + 1; ndp->ni_segflg = UIO_SYSSPACE; if (pubflag) { ndp->ni_rootdir = rootvnode; ndp->ni_loopcnt = 0; if (cnp->cn_pnbuf[0] == '/') dp = rootvnode; } else { cnp->cn_flags |= NOCROSSMOUNT; } /* * Initialize for scan, set ni_startdir and bump ref on dp again * because lookup() will dereference ni_startdir. */ cnp->cn_thread = curthread; VREF(dp); ndp->ni_startdir = dp; if (!lockleaf) cnp->cn_flags |= LOCKLEAF; for (;;) { cnp->cn_nameptr = cnp->cn_pnbuf; /* * Call lookup() to do the real work. If an error occurs, * ndp->ni_vp and ni_dvp are left uninitialized or NULL and * we do not have to dereference anything before returning. * In either case ni_startdir will be dereferenced and NULLed * out. */ error = lookup(ndp); if (error) break; /* * Check for encountering a symbolic link. Trivial * termination occurs if no symlink encountered. * Note: zfree is safe because error is 0, so we will * not zfree it again when we break. */ if ((cnp->cn_flags & ISSYMLINK) == 0) { if (cnp->cn_flags & (SAVENAME | SAVESTART)) cnp->cn_flags |= HASBUF; else uma_zfree(namei_zone, cnp->cn_pnbuf); if (ndp->ni_vp && !lockleaf) VOP_UNLOCK(ndp->ni_vp, 0); break; } /* * Validate symlink */ if ((cnp->cn_flags & LOCKPARENT) && ndp->ni_pathlen == 1) VOP_UNLOCK(ndp->ni_dvp, 0); if (!pubflag) { error = EINVAL; goto badlink2; } if (ndp->ni_loopcnt++ >= MAXSYMLINKS) { error = ELOOP; goto badlink2; } if (ndp->ni_pathlen > 1) cp = uma_zalloc(namei_zone, M_WAITOK); else cp = cnp->cn_pnbuf; aiov.iov_base = cp; aiov.iov_len = MAXPATHLEN; auio.uio_iov = &aiov; auio.uio_iovcnt = 1; auio.uio_offset = 0; auio.uio_rw = UIO_READ; auio.uio_segflg = UIO_SYSSPACE; auio.uio_td = NULL; auio.uio_resid = MAXPATHLEN; error = VOP_READLINK(ndp->ni_vp, &auio, cnp->cn_cred); if (error) { badlink1: if (ndp->ni_pathlen > 1) uma_zfree(namei_zone, cp); badlink2: vput(ndp->ni_vp); vrele(ndp->ni_dvp); break; } linklen = MAXPATHLEN - auio.uio_resid; if (linklen == 0) { error = ENOENT; goto badlink1; } if (linklen + ndp->ni_pathlen >= MAXPATHLEN) { error = ENAMETOOLONG; goto badlink1; } /* * Adjust or replace path */ if (ndp->ni_pathlen > 1) { bcopy(ndp->ni_next, cp + linklen, ndp->ni_pathlen); uma_zfree(namei_zone, cnp->cn_pnbuf); cnp->cn_pnbuf = cp; } else cnp->cn_pnbuf[linklen] = '\0'; ndp->ni_pathlen += linklen; /* * Cleanup refs for next loop and check if root directory * should replace current directory. Normally ni_dvp * becomes the new base directory and is cleaned up when * we loop. Explicitly null pointers after invalidation * to clarify operation. */ vput(ndp->ni_vp); ndp->ni_vp = NULL; if (cnp->cn_pnbuf[0] == '/') { vrele(ndp->ni_dvp); ndp->ni_dvp = ndp->ni_rootdir; VREF(ndp->ni_dvp); } ndp->ni_startdir = ndp->ni_dvp; ndp->ni_dvp = NULL; } if (!lockleaf) cnp->cn_flags &= ~LOCKLEAF; /* * nfs_namei() guarentees that fields will not contain garbage * whether an error occurs or not. This allows the caller to track * cleanup state trivially. */ out: if (error) { uma_zfree(namei_zone, cnp->cn_pnbuf); ndp->ni_vp = NULL; ndp->ni_dvp = NULL; ndp->ni_startdir = NULL; cnp->cn_flags &= ~HASBUF; } else if ((ndp->ni_cnd.cn_flags & (WANTPARENT|LOCKPARENT)) == 0) { ndp->ni_dvp = NULL; } return (error); }
/* * Convert a pathname into a pointer to a vnode. * * The FOLLOW flag is set when symbolic links are to be followed * when they occur at the end of the name translation process. * Symbolic links are always followed for all other pathname * components other than the last. * * If the LOCKLEAF flag is set, a locked vnode is returned. * * The segflg defines whether the name is to be copied from user * space or kernel space. * * Overall outline of namei: * * copy in name * get starting directory * while (!done && !error) { * call lookup to search path. * if symbolic link, massage name in buffer and continue * } */ int namei(struct nameidata *ndp) { struct filedesc *fdp; /* pointer to file descriptor state */ char *cp; /* pointer into pathname argument */ struct vnode *dp; /* the directory we are searching */ struct iovec aiov; /* uio for reading symbolic links */ struct uio auio; int error, linklen; struct componentname *cnp = &ndp->ni_cnd; struct proc *p = cnp->cn_proc; ndp->ni_cnd.cn_cred = ndp->ni_cnd.cn_proc->p_ucred; #ifdef DIAGNOSTIC if (!cnp->cn_cred || !cnp->cn_proc) panic ("namei: bad cred/proc"); if (cnp->cn_nameiop & (~OPMASK)) panic ("namei: nameiop contaminated with flags"); if (cnp->cn_flags & OPMASK) panic ("namei: flags contaminated with nameiops"); #endif fdp = cnp->cn_proc->p_fd; /* * Get a buffer for the name to be translated, and copy the * name into the buffer. */ if ((cnp->cn_flags & HASBUF) == 0) cnp->cn_pnbuf = pool_get(&namei_pool, PR_WAITOK); if (ndp->ni_segflg == UIO_SYSSPACE) error = copystr(ndp->ni_dirp, cnp->cn_pnbuf, MAXPATHLEN, &ndp->ni_pathlen); else error = copyinstr(ndp->ni_dirp, cnp->cn_pnbuf, MAXPATHLEN, &ndp->ni_pathlen); /* * Fail on null pathnames */ if (error == 0 && ndp->ni_pathlen == 1) error = ENOENT; if (error) { pool_put(&namei_pool, cnp->cn_pnbuf); ndp->ni_vp = NULL; return (error); } #ifdef KTRACE if (KTRPOINT(cnp->cn_proc, KTR_NAMEI)) ktrnamei(cnp->cn_proc, cnp->cn_pnbuf); #endif #if NSYSTRACE > 0 if (ISSET(cnp->cn_proc->p_flag, P_SYSTRACE)) systrace_namei(ndp); #endif /* * Strip trailing slashes, as requested */ if (cnp->cn_flags & STRIPSLASHES) { char *end = cnp->cn_pnbuf + ndp->ni_pathlen - 2; cp = end; while (cp >= cnp->cn_pnbuf && (*cp == '/')) cp--; /* Still some remaining characters in the buffer */ if (cp >= cnp->cn_pnbuf) { ndp->ni_pathlen -= (end - cp); *(cp + 1) = '\0'; } } ndp->ni_loopcnt = 0; /* * Get starting point for the translation. */ if ((ndp->ni_rootdir = fdp->fd_rdir) == NULL) ndp->ni_rootdir = rootvnode; /* * Check if starting from root directory or current directory. */ if (cnp->cn_pnbuf[0] == '/') { dp = ndp->ni_rootdir; vref(dp); } else { dp = fdp->fd_cdir; vref(dp); } for (;;) { if (!dp->v_mount) { /* Give up if the directory is no longer mounted */ pool_put(&namei_pool, cnp->cn_pnbuf); return (ENOENT); } cnp->cn_nameptr = cnp->cn_pnbuf; ndp->ni_startdir = dp; if ((error = lookup(ndp)) != 0) { pool_put(&namei_pool, cnp->cn_pnbuf); return (error); } /* * If not a symbolic link, return search result. */ if ((cnp->cn_flags & ISSYMLINK) == 0) { if ((cnp->cn_flags & (SAVENAME | SAVESTART)) == 0) pool_put(&namei_pool, cnp->cn_pnbuf); else cnp->cn_flags |= HASBUF; return (0); } if ((cnp->cn_flags & LOCKPARENT) && (cnp->cn_flags & ISLASTCN)) VOP_UNLOCK(ndp->ni_dvp, 0, p); if (ndp->ni_loopcnt++ >= MAXSYMLINKS) { error = ELOOP; break; } if (ndp->ni_pathlen > 1) cp = pool_get(&namei_pool, PR_WAITOK); else cp = cnp->cn_pnbuf; aiov.iov_base = cp; aiov.iov_len = MAXPATHLEN; auio.uio_iov = &aiov; auio.uio_iovcnt = 1; auio.uio_offset = 0; auio.uio_rw = UIO_READ; auio.uio_segflg = UIO_SYSSPACE; auio.uio_procp = cnp->cn_proc; auio.uio_resid = MAXPATHLEN; error = VOP_READLINK(ndp->ni_vp, &auio, cnp->cn_cred); if (error) { badlink: if (ndp->ni_pathlen > 1) pool_put(&namei_pool, cp); break; } linklen = MAXPATHLEN - auio.uio_resid; if (linklen == 0) { error = ENOENT; goto badlink; } if (linklen + ndp->ni_pathlen >= MAXPATHLEN) { error = ENAMETOOLONG; goto badlink; } if (ndp->ni_pathlen > 1) { bcopy(ndp->ni_next, cp + linklen, ndp->ni_pathlen); pool_put(&namei_pool, cnp->cn_pnbuf); cnp->cn_pnbuf = cp; } else cnp->cn_pnbuf[linklen] = '\0'; ndp->ni_pathlen += linklen; vput(ndp->ni_vp); dp = ndp->ni_dvp; /* * Check if root directory should replace current directory. */ if (cnp->cn_pnbuf[0] == '/') { vrele(dp); dp = ndp->ni_rootdir; vref(dp); } } pool_put(&namei_pool, cnp->cn_pnbuf); vrele(ndp->ni_dvp); vput(ndp->ni_vp); ndp->ni_vp = NULL; return (error); }
int fscache_name_to_fsid(cachefscache_t *cachep, char *namep, ino64_t *fsidp) { int error; char dirname[CFS_FRONTFILE_NAME_SIZE]; vnode_t *linkvp = NULL; struct uio uio; struct iovec iov; ino64_t nodeid; char *pd; int xx; int c; /* get the vnode of the name */ error = VOP_LOOKUP(cachep->c_dirvp, namep, &linkvp, NULL, 0, NULL, kcred, NULL, NULL, NULL); if (error) goto out; /* the vnode had better be a link */ if (linkvp->v_type != VLNK) { error = EINVAL; goto out; } /* read the contents of the link */ iov.iov_len = CFS_FRONTFILE_NAME_SIZE; iov.iov_base = dirname; uio.uio_iov = &iov; uio.uio_iovcnt = 1; uio.uio_resid = iov.iov_len; uio.uio_segflg = UIO_SYSSPACE; uio.uio_loffset = 0; uio.uio_fmode = 0; uio.uio_extflg = UIO_COPY_CACHED; error = VOP_READLINK(linkvp, &uio, kcred, NULL); if (error) { cmn_err(CE_WARN, "cachefs: Can't read filesystem cache link"); goto out; } /* convert the contents of the link to a ino64_t */ nodeid = 0; pd = dirname; for (xx = 0; xx < (CFS_FRONTFILE_NAME_SIZE - 2); xx++) { nodeid <<= 4; c = *pd++; if (c <= '9') c -= '0'; else if (c <= 'F') c = c - 'A' + 10; else c = c - 'a' + 10; nodeid += c; } *fsidp = nodeid; out: if (linkvp) VN_RELE(linkvp); return (error); }