Пример #1
0
/*
 * vop_compat_resolve { struct nchandle *a_nch, struct vnode *dvp }
 * XXX STOPGAP FUNCTION
 *
 * XXX OLD API ROUTINE!  WHEN ALL VFSs HAVE BEEN CLEANED UP THIS PROCEDURE
 * WILL BE REMOVED.  This procedure exists for all VFSs which have not
 * yet implemented VOP_NRESOLVE().  It converts VOP_NRESOLVE() into a 
 * vop_old_lookup() and does appropriate translations.
 *
 * Resolve a ncp for VFSs which do not support the VOP.  Eventually all
 * VFSs will support this VOP and this routine can be removed, since
 * VOP_NRESOLVE() is far less complex then the older LOOKUP/CACHEDLOOKUP
 * API.
 *
 * A locked ncp is passed in to be resolved.  The NCP is resolved by
 * figuring out the vnode (if any) and calling cache_setvp() to attach the
 * vnode to the entry.  If the entry represents a non-existant node then
 * cache_setvp() is called with a NULL vnode to resolve the entry into a
 * negative cache entry.  No vnode locks are retained and the
 * ncp is left locked on return.
 *
 * The ncp will NEVER represent "", "." or "..", or contain any slashes.
 *
 * There is a potential directory and vnode interlock.   The lock order
 * requirement is: namecache, governing directory, resolved vnode.
 */
int
vop_compat_nresolve(struct vop_nresolve_args *ap)
{
	int error;
	struct vnode *dvp;
	struct vnode *vp;
	struct nchandle *nch;
	struct namecache *ncp;
	struct componentname cnp;

	nch = ap->a_nch;	/* locked namecache node */
	ncp = nch->ncp;
	dvp = ap->a_dvp;

	/*
	 * UFS currently stores all sorts of side effects, including a loop
	 * variable, in the directory inode.  That needs to be fixed and the
	 * other VFS's audited before we can switch to LK_SHARED.
	 */
	if ((error = vget(dvp, LK_EXCLUSIVE)) != 0) {
		kprintf("[diagnostic] vop_compat_resolve: EAGAIN on ncp %p %s\n",
			ncp, ncp->nc_name);
		return(EAGAIN);
	}

	bzero(&cnp, sizeof(cnp));
	cnp.cn_nameiop = NAMEI_LOOKUP;
	cnp.cn_flags = 0;
	cnp.cn_nameptr = ncp->nc_name;
	cnp.cn_namelen = ncp->nc_nlen;
	cnp.cn_cred = ap->a_cred;
	cnp.cn_td = curthread; /* XXX */

	/*
	 * vop_old_lookup() always returns vp locked.  dvp may or may not be
	 * left locked depending on CNP_PDIRUNLOCK.
	 */
	error = vop_old_lookup(ap->a_head.a_ops, dvp, &vp, &cnp);
	if (error == 0)
		vn_unlock(vp);
	if ((cnp.cn_flags & CNP_PDIRUNLOCK) == 0)
		vn_unlock(dvp);
	if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) {
		/* was resolved by another process while we were unlocked */
		if (error == 0)
			vrele(vp);
	} else if (error == 0) {
		KKASSERT(vp != NULL);
		cache_setvp(nch, vp);
		vrele(vp);
	} else if (error == ENOENT) {
		KKASSERT(vp == NULL);
		if (cnp.cn_flags & CNP_ISWHITEOUT)
			ncp->nc_flag |= NCF_WHITEOUT;
		cache_setvp(nch, NULL);
	}
	vrele(dvp);
	return (error);
}
Пример #2
0
static int
tmpfs_nresolve(struct vop_nresolve_args *v)
{
	struct vnode *dvp = v->a_dvp;
	struct vnode *vp = NULL;
	struct namecache *ncp = v->a_nch->ncp;
	struct tmpfs_node *tnode;
	struct mount *mp;
	struct tmpfs_dirent *de;
	struct tmpfs_node *dnode;
	int error;

	mp = dvp->v_mount;

	dnode = VP_TO_TMPFS_DIR(dvp);

	TMPFS_NODE_LOCK_SH(dnode);
	de = tmpfs_dir_lookup(dnode, NULL, ncp);
	if (de == NULL) {
		error = ENOENT;
	} else {
		/*
		 * Allocate a vnode for the node we found.
		 */
		tnode = de->td_node;
		error = tmpfs_alloc_vp(dvp->v_mount, tnode,
				       LK_EXCLUSIVE | LK_RETRY, &vp);
		if (error)
			goto out;
		KKASSERT(vp);
	}

out:
	TMPFS_NODE_UNLOCK(dnode);

	if ((dnode->tn_status & TMPFS_NODE_ACCESSED) == 0) {
		TMPFS_NODE_LOCK(dnode);
		dnode->tn_status |= TMPFS_NODE_ACCESSED;
		TMPFS_NODE_UNLOCK(dnode);
	}

	/*
	 * Store the result of this lookup in the cache.  Avoid this if the
	 * request was for creation, as it does not improve timings on
	 * emprical tests.
	 */
	if (vp) {
		vn_unlock(vp);
		cache_setvp(v->a_nch, vp);
		vrele(vp);
	} else if (error == ENOENT) {
		cache_setvp(v->a_nch, NULL);
	}
	return (error);
}
Пример #3
0
static int
devfs_vop_nmkdir(struct vop_nmkdir_args *ap)
{
	struct devfs_node *dnode = DEVFS_NODE(ap->a_dvp);
	struct devfs_node *node;

	if (!devfs_node_is_accessible(dnode))
		return ENOENT;

	if ((dnode->node_type != Nroot) && (dnode->node_type != Ndir))
		goto out;

	lockmgr(&devfs_lock, LK_EXCLUSIVE);
	devfs_allocvp(ap->a_dvp->v_mount, ap->a_vpp, Ndir,
		      ap->a_nch->ncp->nc_name, dnode, NULL);

	if (*ap->a_vpp) {
		node = DEVFS_NODE(*ap->a_vpp);
		node->flags |= DEVFS_USER_CREATED;
		cache_setunresolved(ap->a_nch);
		cache_setvp(ap->a_nch, *ap->a_vpp);
	}
	lockmgr(&devfs_lock, LK_RELEASE);
out:
	return ((*ap->a_vpp == NULL) ? ENOTDIR : 0);
}
Пример #4
0
static int
devfs_vop_nsymlink(struct vop_nsymlink_args *ap)
{
	struct devfs_node *dnode = DEVFS_NODE(ap->a_dvp);
	struct devfs_node *node;
	size_t targetlen;

	if (!devfs_node_is_accessible(dnode))
		return ENOENT;

	ap->a_vap->va_type = VLNK;

	if ((dnode->node_type != Nroot) && (dnode->node_type != Ndir))
		goto out;

	lockmgr(&devfs_lock, LK_EXCLUSIVE);
	devfs_allocvp(ap->a_dvp->v_mount, ap->a_vpp, Nlink,
		      ap->a_nch->ncp->nc_name, dnode, NULL);

	targetlen = strlen(ap->a_target);
	if (*ap->a_vpp) {
		node = DEVFS_NODE(*ap->a_vpp);
		node->flags |= DEVFS_USER_CREATED;
		node->symlink_namelen = targetlen;
		node->symlink_name = kmalloc(targetlen + 1, M_DEVFS, M_WAITOK);
		memcpy(node->symlink_name, ap->a_target, targetlen);
		node->symlink_name[targetlen] = '\0';
		cache_setunresolved(ap->a_nch);
		cache_setvp(ap->a_nch, *ap->a_vpp);
	}
	lockmgr(&devfs_lock, LK_RELEASE);
out:
	return ((*ap->a_vpp == NULL) ? ENOTDIR : 0);
}
Пример #5
0
static int
puffs_vnop_mknod(struct vop_nmknod_args *ap)
{
    PUFFS_MSG_VARS(vn, mknod);
    struct vnode *dvp = ap->a_dvp;
    struct vattr *vap = ap->a_vap;
    struct puffs_node *dpn = VPTOPP(dvp);
    struct nchandle *nch = ap->a_nch;
    struct namecache *ncp = nch->ncp;
    struct ucred *cred = ap->a_cred;
    struct mount *mp = dvp->v_mount;
    struct puffs_mount *pmp = MPTOPUFFSMP(mp);
    int error;

    if (!EXISTSOP(pmp, MKNOD))
        return EOPNOTSUPP;

    DPRINTF(("puffs_mknod: dvp %p, name: %s\n",
             dvp, ncp->nc_name));

    if (vap->va_type != VFIFO)
        return EINVAL;

    if ((error = vget(dvp, LK_EXCLUSIVE)) != 0) {
        DPRINTF(("puffs_vnop_mknod: EAGAIN on ncp %p %s\n",
                 ncp, ncp->nc_name));
        return EAGAIN;
    }

    PUFFS_MSG_ALLOC(vn, mknod);
    puffs_makecn(&mknod_msg->pvnr_cn, &mknod_msg->pvnr_cn_cred,
                 ncp, cred);
    mknod_msg->pvnr_va = *ap->a_vap;
    puffs_msg_setinfo(park_mknod, PUFFSOP_VN,
                      PUFFS_VN_MKNOD, VPTOPNC(dvp));

    PUFFS_MSG_ENQUEUEWAIT2(pmp, park_mknod, dvp->v_data, NULL, error);

    error = checkerr(pmp, error, __func__);
    if (error)
        goto out;

    error = puffs_newnode(mp, dvp, ap->a_vpp,
                          mknod_msg->pvnr_newnode, vap->va_type);
    if (error)
        puffs_abortbutton(pmp, PUFFS_ABORT_MKNOD, dpn->pn_cookie,
                          mknod_msg->pvnr_newnode, ncp, cred);

out:
    vput(dvp);
    if (!error) {
        cache_setunresolved(nch);
        cache_setvp(nch, *ap->a_vpp);
    }
    PUFFS_MSG_RELEASE(mknod);
    return error;
}
Пример #6
0
static int
puffs_vnop_link(struct vop_nlink_args *ap)
{
    PUFFS_MSG_VARS(vn, link);
    struct vnode *dvp = ap->a_dvp;
    struct vnode *vp = ap->a_vp;
    struct puffs_node *dpn = VPTOPP(dvp);
    struct puffs_node *pn = VPTOPP(vp);
    struct puffs_mount *pmp = MPTOPUFFSMP(dvp->v_mount);
    struct nchandle *nch = ap->a_nch;
    struct namecache *ncp = nch->ncp;
    struct ucred *cred = ap->a_cred;
    int error;

    if (!EXISTSOP(pmp, LINK))
        return EOPNOTSUPP;

    if (vp->v_mount != dvp->v_mount)
        return EXDEV;

    if ((error = vget(dvp, LK_EXCLUSIVE)) != 0) {
        DPRINTF(("puffs_vnop_link: EAGAIN on ncp %p %s\n",
                 ncp, ncp->nc_name));
        return EAGAIN;
    }

    PUFFS_MSG_ALLOC(vn, link);
    link_msg->pvnr_cookie_targ = VPTOPNC(vp);
    puffs_makecn(&link_msg->pvnr_cn, &link_msg->pvnr_cn_cred,
                 ncp, cred);
    puffs_msg_setinfo(park_link, PUFFSOP_VN,
                      PUFFS_VN_LINK, VPTOPNC(dvp));

    puffs_msg_enqueue(pmp, park_link);
    error = puffs_msg_wait2(pmp, park_link, dpn, pn);

    PUFFS_MSG_RELEASE(link);

    error = checkerr(pmp, error, __func__);

    /*
     * XXX: stay in touch with the cache.  I don't like this, but
     * don't have a better solution either.  See also puffs_rename().
     */
    if (error == 0) {
        puffs_updatenode(pn, PUFFS_UPDATECTIME);
    }

    vput(dvp);
    if (error == 0) {
        cache_setunresolved(nch);
        cache_setvp(nch, vp);
    }
    return error;
}
Пример #7
0
static int
puffs_vnop_symlink(struct vop_nsymlink_args *ap)
{
    PUFFS_MSG_VARS(vn, symlink);
    struct vnode *dvp = ap->a_dvp;
    struct puffs_node *dpn = VPTOPP(dvp);
    struct mount *mp = dvp->v_mount;
    struct puffs_mount *pmp = MPTOPUFFSMP(dvp->v_mount);
    struct nchandle *nch = ap->a_nch;
    struct namecache *ncp = nch->ncp;
    struct ucred *cred = ap->a_cred;
    int error;

    if (!EXISTSOP(pmp, SYMLINK))
        return EOPNOTSUPP;

    if ((error = vget(dvp, LK_EXCLUSIVE)) != 0) {
        DPRINTF(("puffs_vnop_symlink: EAGAIN on ncp %p %s\n",
                 ncp, ncp->nc_name));
        return EAGAIN;
    }

    *ap->a_vpp = NULL;

    PUFFS_MSG_ALLOC(vn, symlink);
    puffs_makecn(&symlink_msg->pvnr_cn, &symlink_msg->pvnr_cn_cred,
                 ncp, cred);
    symlink_msg->pvnr_va = *ap->a_vap;
    (void)strlcpy(symlink_msg->pvnr_link, ap->a_target,
                  sizeof(symlink_msg->pvnr_link));
    puffs_msg_setinfo(park_symlink, PUFFSOP_VN,
                      PUFFS_VN_SYMLINK, VPTOPNC(dvp));

    PUFFS_MSG_ENQUEUEWAIT2(pmp, park_symlink, dvp->v_data, NULL, error);

    error = checkerr(pmp, error, __func__);
    if (error)
        goto out;

    error = puffs_newnode(mp, dvp, ap->a_vpp,
                          symlink_msg->pvnr_newnode, VLNK);
    if (error)
        puffs_abortbutton(pmp, PUFFS_ABORT_SYMLINK, dpn->pn_cookie,
                          symlink_msg->pvnr_newnode, ncp, cred);

out:
    vput(dvp);
    PUFFS_MSG_RELEASE(symlink);
    if (!error) {
        cache_setunresolved(nch);
        cache_setvp(nch, *ap->a_vpp);
    }
    return error;
}
Пример #8
0
static int
tmpfs_nsymlink(struct vop_nsymlink_args *v)
{
	struct vnode *dvp = v->a_dvp;
	struct vnode **vpp = v->a_vpp;
	struct namecache *ncp = v->a_nch->ncp;
	struct vattr *vap = v->a_vap;
	struct ucred *cred = v->a_cred;
	char *target = v->a_target;
	int error;

	vap->va_type = VLNK;
	error = tmpfs_alloc_file(dvp, vpp, vap, ncp, cred, target);
	if (error == 0) {
		tmpfs_knote(*vpp, NOTE_WRITE);
		cache_setunresolved(v->a_nch);
		cache_setvp(v->a_nch, *vpp);
	}
	return error;
}
Пример #9
0
static int
tmpfs_ncreate(struct vop_ncreate_args *ap)
{
	struct vnode *dvp = ap->a_dvp;
	struct vnode **vpp = ap->a_vpp;
	struct namecache *ncp = ap->a_nch->ncp;
	struct vattr *vap = ap->a_vap;
	struct ucred *cred = ap->a_cred;
	int error;

	KKASSERT(vap->va_type == VREG || vap->va_type == VSOCK);

	error = tmpfs_alloc_file(dvp, vpp, vap, ncp, cred, NULL);
	if (error == 0) {
		cache_setunresolved(ap->a_nch);
		cache_setvp(ap->a_nch, *vpp);
		tmpfs_knote(dvp, NOTE_WRITE);
	}
	return (error);
}
Пример #10
0
static int
tmpfs_nmkdir(struct vop_nmkdir_args *v)
{
	struct vnode *dvp = v->a_dvp;
	struct vnode **vpp = v->a_vpp;
	struct namecache *ncp = v->a_nch->ncp;
	struct vattr *vap = v->a_vap;
	struct ucred *cred = v->a_cred;
	int error;

	KKASSERT(vap->va_type == VDIR);

	error = tmpfs_alloc_file(dvp, vpp, vap, ncp, cred, NULL);
	if (error == 0) {
		cache_setunresolved(v->a_nch);
		cache_setvp(v->a_nch, *vpp);
		tmpfs_knote(dvp, NOTE_WRITE | NOTE_LINK);
	}

	return error;
}
Пример #11
0
/*
 * Resolve a mount point's glue ncp.  This ncp connects creates the illusion
 * of continuity in the namecache tree by connecting the ncp related to the
 * vnode under the mount to the ncp related to the mount's root vnode.
 *
 * If no error occured a locked, ref'd ncp is stored in *ncpp.
 */
int
nlookup_mp(struct mount *mp, struct nchandle *nch)
{
    struct vnode *vp;
    int error;

    error = 0;
    cache_get(&mp->mnt_ncmountpt, nch);
    if (nch->ncp->nc_flag & NCF_UNRESOLVED) {
	while (vfs_busy(mp, 0))
	    ;
	error = VFS_ROOT(mp, &vp);
	vfs_unbusy(mp);
	if (error) {
	    cache_put(nch);
	} else {
	    cache_setvp(nch, vp);
	    vput(vp);
	}
    }
    return(error);
}
Пример #12
0
static int
tmpfs_nmknod(struct vop_nmknod_args *v)
{
	struct vnode *dvp = v->a_dvp;
	struct vnode **vpp = v->a_vpp;
	struct namecache *ncp = v->a_nch->ncp;
	struct vattr *vap = v->a_vap;
	struct ucred *cred = v->a_cred;
	int error;

	if (vap->va_type != VBLK && vap->va_type != VCHR &&
	    vap->va_type != VFIFO) {
		return (EINVAL);
	}

	error = tmpfs_alloc_file(dvp, vpp, vap, ncp, cred, NULL);
	if (error == 0) {
		cache_setunresolved(v->a_nch);
		cache_setvp(v->a_nch, *vpp);
		tmpfs_knote(dvp, NOTE_WRITE);
	}
	return error;
}
Пример #13
0
static int
tmpfs_nrmdir(struct vop_nrmdir_args *v)
{
	struct vnode *dvp = v->a_dvp;
	struct namecache *ncp = v->a_nch->ncp;
	struct vnode *vp;
	struct tmpfs_dirent *de;
	struct tmpfs_mount *tmp;
	struct tmpfs_node *dnode;
	struct tmpfs_node *node;
	int error;

	/*
	 * We have to acquire the vp from v->a_nch because we will likely
	 * unresolve the namecache entry, and a vrele/vput is needed to
	 * trigger the tmpfs_inactive/tmpfs_reclaim sequence.
	 *
	 * We have to use vget to clear any inactive state on the vnode,
	 * otherwise the vnode may remain inactive and thus tmpfs_inactive
	 * will not get called when we release it.
	 */
	error = cache_vget(v->a_nch, v->a_cred, LK_SHARED, &vp);
	KKASSERT(error == 0);
	vn_unlock(vp);

	/*
	 * Prevalidate so we don't hit an assertion later
	 */
	if (vp->v_type != VDIR) {
		error = ENOTDIR;
		goto out;
	}

	tmp = VFS_TO_TMPFS(dvp->v_mount);
	dnode = VP_TO_TMPFS_DIR(dvp);
	node = VP_TO_TMPFS_DIR(vp);

	/* Directories with more than two entries ('.' and '..') cannot be
	 * removed. */
	 if (node->tn_size > 0) {
		 error = ENOTEMPTY;
		 goto out;
	 }

	if ((dnode->tn_flags & APPEND)
	    || (node->tn_flags & (NOUNLINK | IMMUTABLE | APPEND))) {
		error = EPERM;
		goto out;
	}

	/* This invariant holds only if we are not trying to remove "..".
	  * We checked for that above so this is safe now. */
	KKASSERT(node->tn_dir.tn_parent == dnode);

	/* Get the directory entry associated with node (vp).  This was
	 * filled by tmpfs_lookup while looking up the entry. */
	de = tmpfs_dir_lookup(dnode, node, ncp);
	KKASSERT(TMPFS_DIRENT_MATCHES(de,
	    ncp->nc_name,
	    ncp->nc_nlen));

	/* Check flags to see if we are allowed to remove the directory. */
	if ((dnode->tn_flags & APPEND) ||
	    node->tn_flags & (NOUNLINK | IMMUTABLE | APPEND)) {
		error = EPERM;
		goto out;
	}


	/* Detach the directory entry from the directory (dnode). */
	tmpfs_dir_detach(dnode, de);

	/* No vnode should be allocated for this entry from this point */
	TMPFS_NODE_LOCK(node);
	TMPFS_ASSERT_ELOCKED(node);
	TMPFS_NODE_LOCK(dnode);
	TMPFS_ASSERT_ELOCKED(dnode);

#if 0
	/* handled by tmpfs_free_node */
	KKASSERT(node->tn_links > 0);
	node->tn_links--;
	node->tn_dir.tn_parent = NULL;
#endif
	node->tn_status |= TMPFS_NODE_ACCESSED | TMPFS_NODE_CHANGED | \
	    TMPFS_NODE_MODIFIED;

#if 0
	/* handled by tmpfs_free_node */
	KKASSERT(dnode->tn_links > 0);
	dnode->tn_links--;
#endif
	dnode->tn_status |= TMPFS_NODE_ACCESSED | \
	    TMPFS_NODE_CHANGED | TMPFS_NODE_MODIFIED;

	TMPFS_NODE_UNLOCK(dnode);
	TMPFS_NODE_UNLOCK(node);

	/* Free the directory entry we just deleted.  Note that the node
	 * referred by it will not be removed until the vnode is really
	 * reclaimed. */
	tmpfs_free_dirent(tmp, de);

	/* Release the deleted vnode (will destroy the node, notify
	 * interested parties and clean it from the cache). */

	TMPFS_NODE_LOCK(dnode);
	dnode->tn_status |= TMPFS_NODE_CHANGED;
	TMPFS_NODE_UNLOCK(dnode);
	tmpfs_update(dvp);

	cache_setunresolved(v->a_nch);
	cache_setvp(v->a_nch, NULL);
	/*cache_inval_vp(vp, CINV_DESTROY);*/
	tmpfs_knote(dvp, NOTE_WRITE | NOTE_LINK);
	error = 0;

out:
	vrele(vp);

	return error;
}
Пример #14
0
/*
 * Do a generic nlookup.  Note that the passed nd is not nlookup_done()'d
 * on return, even if an error occurs.  If no error occurs or NLC_CREATE
 * is flagged and ENOENT is returned, then the returned nl_nch is always
 * referenced and locked exclusively.
 *
 * WARNING: For any general error other than ENOENT w/NLC_CREATE, the
 *	    the resulting nl_nch may or may not be locked and if locked
 *	    might be locked either shared or exclusive.
 *
 * Intermediate directory elements, including the current directory, require
 * execute (search) permission.  nlookup does not examine the access 
 * permissions on the returned element.
 *
 * If NLC_CREATE is set the last directory must allow node creation,
 * and an error code of 0 will be returned for a non-existant
 * target (not ENOENT).
 *
 * If NLC_RENAME_DST is set the last directory mut allow node deletion,
 * plus the sticky check is made, and an error code of 0 will be returned
 * for a non-existant target (not ENOENT).
 *
 * If NLC_DELETE is set the last directory mut allow node deletion,
 * plus the sticky check is made.
 *
 * If NLC_REFDVP is set nd->nl_dvp will be set to the directory vnode
 * of the returned entry.  The vnode will be referenced, but not locked,
 * and will be released by nlookup_done() along with everything else.
 *
 * NOTE: As an optimization we attempt to obtain a shared namecache lock
 *	 on any intermediate elements.  On success, the returned element
 *	 is ALWAYS locked exclusively.
 */
int
nlookup(struct nlookupdata *nd)
{
    globaldata_t gd = mycpu;
    struct nlcomponent nlc;
    struct nchandle nch;
    struct nchandle par;
    struct nchandle nctmp;
    struct mount *mp;
    struct vnode *hvp;		/* hold to prevent recyclement */
    int wasdotordotdot;
    char *ptr;
    char *nptr;
    int error;
    int len;
    int dflags;
    int hit = 1;
    int saveflag = nd->nl_flags & ~NLC_NCDIR;
    boolean_t doretry = FALSE;
    boolean_t inretry = FALSE;

nlookup_start:
#ifdef KTRACE
    if (KTRPOINT(nd->nl_td, KTR_NAMEI))
	ktrnamei(nd->nl_td->td_lwp, nd->nl_path);
#endif
    bzero(&nlc, sizeof(nlc));

    /*
     * Setup for the loop.  The current working namecache element is
     * always at least referenced.  We lock it as required, but always
     * return a locked, resolved namecache entry.
     */
    nd->nl_loopcnt = 0;
    if (nd->nl_dvp) {
	vrele(nd->nl_dvp);
	nd->nl_dvp = NULL;
    }
    ptr = nd->nl_path;

    /*
     * Loop on the path components.  At the top of the loop nd->nl_nch
     * is ref'd and unlocked and represents our current position.
     */
    for (;;) {
	/*
	 * Make sure nl_nch is locked so we can access the vnode, resolution
	 * state, etc.
	 */
	if ((nd->nl_flags & NLC_NCPISLOCKED) == 0) {
		nd->nl_flags |= NLC_NCPISLOCKED;
		cache_lock_maybe_shared(&nd->nl_nch, wantsexcllock(nd, ptr));
	}

	/*
	 * Check if the root directory should replace the current
	 * directory.  This is done at the start of a translation
	 * or after a symbolic link has been found.  In other cases
	 * ptr will never be pointing at a '/'.
	 */
	if (*ptr == '/') {
	    do {
		++ptr;
	    } while (*ptr == '/');
	    cache_unlock(&nd->nl_nch);
	    cache_get_maybe_shared(&nd->nl_rootnch, &nch,
				   wantsexcllock(nd, ptr));
	    if (nd->nl_flags & NLC_NCDIR) {
		    cache_drop_ncdir(&nd->nl_nch);
		    nd->nl_flags &= ~NLC_NCDIR;
	    } else {
		    cache_drop(&nd->nl_nch);
	    }
	    nd->nl_nch = nch;		/* remains locked */

	    /*
	     * Fast-track termination.  There is no parent directory of
	     * the root in the same mount from the point of view of
	     * the caller so return EACCES if NLC_REFDVP is specified,
	     * and EEXIST if NLC_CREATE is also specified.
	     * e.g. 'rmdir /' or 'mkdir /' are not allowed.
	     */
	    if (*ptr == 0) {
		if (nd->nl_flags & NLC_REFDVP)
			error = (nd->nl_flags & NLC_CREATE) ? EEXIST : EACCES;
		else
			error = 0;
		break;
	    }
	    continue;
	}

	/*
	 * Pre-calculate next path component so we can check whether the
	 * current component directory is the last directory in the path
	 * or not.
	 */
	for (nptr = ptr; *nptr && *nptr != '/'; ++nptr)
		;

	/*
	 * Check directory search permissions (nd->nl_nch is locked & refd).
	 * This will load dflags to obtain directory-special permissions to
	 * be checked along with the last component.
	 *
	 * We only need to pass-in &dflags for the second-to-last component.
	 * Optimize by passing-in NULL for any prior components, which may
	 * allow the code to bypass the naccess() call.
	 */
	dflags = 0;
	if (*nptr == '/')
	    error = naccess(&nd->nl_nch, NLC_EXEC, nd->nl_cred, NULL);
	else
	    error = naccess(&nd->nl_nch, NLC_EXEC, nd->nl_cred, &dflags);
	if (error) {
	    if (keeperror(nd, error))
		    break;
	    error = 0;
	}

	/*
	 * Extract the next (or last) path component.  Path components are
	 * limited to 255 characters.
	 */
	nlc.nlc_nameptr = ptr;
	nlc.nlc_namelen = nptr - ptr;
	ptr = nptr;
	if (nlc.nlc_namelen >= 256) {
	    error = ENAMETOOLONG;
	    break;
	}

	/*
	 * Lookup the path component in the cache, creating an unresolved
	 * entry if necessary.  We have to handle "." and ".." as special
	 * cases.
	 *
	 * When handling ".." we have to detect a traversal back through a
	 * mount point.   If we are at the root, ".." just returns the root.
	 *
	 * When handling "." or ".." we also have to recalculate dflags
	 * since our dflags will be for some sub-directory instead of the
	 * parent dir.
	 *
	 * This subsection returns a locked, refd 'nch' unless it errors out,
	 * and an unlocked but still ref'd nd->nl_nch.
	 *
	 * The namecache topology is not allowed to be disconnected, so 
	 * encountering a NULL parent will generate EINVAL.  This typically
	 * occurs when a directory is removed out from under a process.
	 *
	 * WARNING! The unlocking of nd->nl_nch is sensitive code.
	 */
	KKASSERT(nd->nl_flags & NLC_NCPISLOCKED);

	if (nlc.nlc_namelen == 1 && nlc.nlc_nameptr[0] == '.') {
	    cache_unlock(&nd->nl_nch);
	    nd->nl_flags &= ~NLC_NCPISLOCKED;
	    cache_get_maybe_shared(&nd->nl_nch, &nch, wantsexcllock(nd, ptr));
	    wasdotordotdot = 1;
	} else if (nlc.nlc_namelen == 2 && 
		   nlc.nlc_nameptr[0] == '.' && nlc.nlc_nameptr[1] == '.') {
	    if (nd->nl_nch.mount == nd->nl_rootnch.mount &&
		nd->nl_nch.ncp == nd->nl_rootnch.ncp
	    ) {
		/*
		 * ".." at the root returns the root
		 */
		cache_unlock(&nd->nl_nch);
		nd->nl_flags &= ~NLC_NCPISLOCKED;
		cache_get_maybe_shared(&nd->nl_nch, &nch,
				       wantsexcllock(nd, ptr));
	    } else {
		/*
		 * Locate the parent ncp.  If we are at the root of a
		 * filesystem mount we have to skip to the mounted-on
		 * point in the underlying filesystem.
		 *
		 * Expect the parent to always be good since the
		 * mountpoint doesn't go away.  XXX hack.  cache_get()
		 * requires the ncp to already have a ref as a safety.
		 *
		 * However, a process which has been broken out of a chroot
		 * will wind up with a NULL parent if it tries to '..' above
		 * the real root, deal with the case.  Note that this does
		 * not protect us from a jail breakout, it just stops a panic
		 * if the jail-broken process tries to '..' past the real
		 * root.
		 */
		nctmp = nd->nl_nch;
		while (nctmp.ncp == nctmp.mount->mnt_ncmountpt.ncp) {
			nctmp = nctmp.mount->mnt_ncmounton;
			if (nctmp.ncp == NULL)
				break;
		}
		if (nctmp.ncp == NULL) {
			if (curthread->td_proc) {
				kprintf("vfs_nlookup: '..' traverse broke "
					"jail: pid %d (%s)\n",
					curthread->td_proc->p_pid,
					curthread->td_comm);
			}
			nctmp = nd->nl_rootnch;
		} else {
			nctmp.ncp = nctmp.ncp->nc_parent;
		}
		cache_hold(&nctmp);
		cache_unlock(&nd->nl_nch);
		nd->nl_flags &= ~NLC_NCPISLOCKED;
		cache_get_maybe_shared(&nctmp, &nch, wantsexcllock(nd, ptr));
		cache_drop(&nctmp);		/* NOTE: zero's nctmp */
	    }
	    wasdotordotdot = 2;
	} else {
	    /*
	     * Must unlock nl_nch when traversing down the path.  However,
	     * the child ncp has not yet been found/created and the parent's
	     * child list might be empty.  Thus releasing the lock can
	     * allow a race whereby the parent ncp's vnode is recycled.
	     * This case can occur especially when maxvnodes is set very low.
	     *
	     * We need the parent's ncp to remain resolved for all normal
	     * filesystem activities, so we vhold() the vp during the lookup
	     * to prevent recyclement due to vnlru / maxvnodes.
	     *
	     * If we race an unlink or rename the ncp might be marked
	     * DESTROYED after resolution, requiring a retry.
	     */
	    if ((hvp = nd->nl_nch.ncp->nc_vp) != NULL)
		vhold(hvp);
	    cache_unlock(&nd->nl_nch);
	    nd->nl_flags &= ~NLC_NCPISLOCKED;
	    error = cache_nlookup_maybe_shared(&nd->nl_nch, &nlc,
					       wantsexcllock(nd, ptr), &nch);
	    if (error == EWOULDBLOCK) {
		    nch = cache_nlookup(&nd->nl_nch, &nlc);
		    if (nch.ncp->nc_flag & NCF_UNRESOLVED)
			hit = 0;
		    for (;;) {
			error = cache_resolve(&nch, nd->nl_cred);
			if (error != EAGAIN &&
			    (nch.ncp->nc_flag & NCF_DESTROYED) == 0) {
				if (error == ESTALE) {
				    if (!inretry)
					error = ENOENT;
				    doretry = TRUE;
				}
				break;
			}
			kprintf("[diagnostic] nlookup: relookup %*.*s\n",
				nch.ncp->nc_nlen, nch.ncp->nc_nlen,
				nch.ncp->nc_name);
			cache_put(&nch);
			nch = cache_nlookup(&nd->nl_nch, &nlc);
		    }
	    }
	    if (hvp)
		vdrop(hvp);
	    wasdotordotdot = 0;
	}

	/*
	 * If the last component was "." or ".." our dflags no longer
	 * represents the parent directory and we have to explicitly
	 * look it up.
	 *
	 * Expect the parent to be good since nch is locked.
	 */
	if (wasdotordotdot && error == 0) {
	    dflags = 0;
	    if ((par.ncp = nch.ncp->nc_parent) != NULL) {
		par.mount = nch.mount;
		cache_hold(&par);
		cache_lock_maybe_shared(&par, wantsexcllock(nd, ptr));
		error = naccess(&par, 0, nd->nl_cred, &dflags);
		cache_put(&par);
		if (error) {
		    if (!keeperror(nd, error))
			    error = 0;
		}
	    }
	}

	/*
	 * [end of subsection]
	 *
	 * nch is locked and referenced.
	 * nd->nl_nch is unlocked and referenced.
	 *
	 * nl_nch must be unlocked or we could chain lock to the root
	 * if a resolve gets stuck (e.g. in NFS).
	 */
	KKASSERT((nd->nl_flags & NLC_NCPISLOCKED) == 0);

	/*
	 * Resolve the namespace if necessary.  The ncp returned by
	 * cache_nlookup() is referenced and locked.
	 *
	 * XXX neither '.' nor '..' should return EAGAIN since they were
	 * previously resolved and thus cannot be newly created ncp's.
	 */
	if (nch.ncp->nc_flag & NCF_UNRESOLVED) {
	    hit = 0;
	    error = cache_resolve(&nch, nd->nl_cred);
	    if (error == ESTALE) {
		if (!inretry)
		    error = ENOENT;
		doretry = TRUE;
	    }
	    KKASSERT(error != EAGAIN);
	} else {
	    error = nch.ncp->nc_error;
	}

	/*
	 * Early completion.  ENOENT is not an error if this is the last
	 * component and NLC_CREATE or NLC_RENAME (rename target) was
	 * requested.  Note that ncp->nc_error is left as ENOENT in that
	 * case, which we check later on.
	 *
	 * Also handle invalid '.' or '..' components terminating a path
	 * for a create/rename/delete.  The standard requires this and pax
	 * pretty stupidly depends on it.
	 */
	if (islastelement(ptr)) {
	    if (error == ENOENT &&
		(nd->nl_flags & (NLC_CREATE | NLC_RENAME_DST))
	    ) {
		if (nd->nl_flags & NLC_NFS_RDONLY) {
			error = EROFS;
		} else {
			error = naccess(&nch, nd->nl_flags | dflags,
					nd->nl_cred, NULL);
		}
	    }
	    if (error == 0 && wasdotordotdot &&
		(nd->nl_flags & (NLC_CREATE | NLC_DELETE |
				 NLC_RENAME_SRC | NLC_RENAME_DST))) {
		/*
		 * POSIX junk
		 */
		if (nd->nl_flags & NLC_CREATE)
			error = EEXIST;
		else if (nd->nl_flags & NLC_DELETE)
			error = (wasdotordotdot == 1) ? EINVAL : ENOTEMPTY;
		else
			error = EINVAL;
	    }
	}

	/*
	 * Early completion on error.
	 */
	if (error) {
	    cache_put(&nch);
	    break;
	}

	/*
	 * If the element is a symlink and it is either not the last
	 * element or it is the last element and we are allowed to
	 * follow symlinks, resolve the symlink.
	 */
	if ((nch.ncp->nc_flag & NCF_ISSYMLINK) &&
	    (*ptr || (nd->nl_flags & NLC_FOLLOW))
	) {
	    if (nd->nl_loopcnt++ >= MAXSYMLINKS) {
		error = ELOOP;
		cache_put(&nch);
		break;
	    }
	    error = nreadsymlink(nd, &nch, &nlc);
	    cache_put(&nch);
	    if (error)
		break;

	    /*
	     * Concatenate trailing path elements onto the returned symlink.
	     * Note that if the path component (ptr) is not exhausted, it
	     * will being with a '/', so we do not have to add another one.
	     *
	     * The symlink may not be empty.
	     */
	    len = strlen(ptr);
	    if (nlc.nlc_namelen == 0 || nlc.nlc_namelen + len >= MAXPATHLEN) {
		error = nlc.nlc_namelen ? ENAMETOOLONG : ENOENT;
		objcache_put(namei_oc, nlc.nlc_nameptr);
		break;
	    }
	    bcopy(ptr, nlc.nlc_nameptr + nlc.nlc_namelen, len + 1);
	    if (nd->nl_flags & NLC_HASBUF)
		objcache_put(namei_oc, nd->nl_path);
	    nd->nl_path = nlc.nlc_nameptr;
	    nd->nl_flags |= NLC_HASBUF;
	    ptr = nd->nl_path;

	    /*
	     * Go back up to the top to resolve any initial '/'s in the
	     * symlink.
	     */
	    continue;
	}
	
	/*
	 * If the element is a directory and we are crossing a mount point,
	 * Locate the mount.
	 */
	while ((nch.ncp->nc_flag & NCF_ISMOUNTPT) && 
	    (nd->nl_flags & NLC_NOCROSSMOUNT) == 0 &&
	    (mp = cache_findmount(&nch)) != NULL
	) {
	    struct vnode *tdp;
	    int vfs_do_busy = 0;

	    /*
	     * VFS must be busied before the namecache entry is locked,
	     * but we don't want to waste time calling vfs_busy() if the
	     * mount point is already resolved.
	     */
again:
	    cache_put(&nch);
	    if (vfs_do_busy) {
		while (vfs_busy(mp, 0)) {
		    if (mp->mnt_kern_flag & MNTK_UNMOUNT) {
			kprintf("nlookup: warning umount race avoided\n");
			cache_dropmount(mp);
			error = EBUSY;
			vfs_do_busy = 0;
			goto double_break;
		    }
		}
	    }
	    cache_get_maybe_shared(&mp->mnt_ncmountpt, &nch,
				   wantsexcllock(nd, ptr));

	    if (nch.ncp->nc_flag & NCF_UNRESOLVED) {
		if (vfs_do_busy == 0) {
		    vfs_do_busy = 1;
		    goto again;
		}
		error = VFS_ROOT(mp, &tdp);
		vfs_unbusy(mp);
		vfs_do_busy = 0;
		if (keeperror(nd, error)) {
		    cache_dropmount(mp);
		    break;
		}
		if (error == 0) {
		    cache_setvp(&nch, tdp);
		    vput(tdp);
		}
	    }
	    if (vfs_do_busy)
		vfs_unbusy(mp);
	    cache_dropmount(mp);
	}

	if (keeperror(nd, error)) {
	    cache_put(&nch);
double_break:
	    break;
	}
	    
	/*
	 * Skip any slashes to get to the next element.  If there 
	 * are any slashes at all the current element must be a
	 * directory or, in the create case, intended to become a directory.
	 * If it isn't we break without incrementing ptr and fall through
	 * to the failure case below.
	 */
	while (*ptr == '/') {
	    if ((nch.ncp->nc_flag & NCF_ISDIR) == 0 && 
		!(nd->nl_flags & NLC_WILLBEDIR)
	    ) {
		break;
	    }
	    ++ptr;
	}

	/*
	 * Continuation case: additional elements and the current
	 * element is a directory.
	 */
	if (*ptr && (nch.ncp->nc_flag & NCF_ISDIR)) {
	    if (nd->nl_flags & NLC_NCDIR) {
		    cache_drop_ncdir(&nd->nl_nch);
		    nd->nl_flags &= ~NLC_NCDIR;
	    } else {
		    cache_drop(&nd->nl_nch);
	    }
	    cache_unlock(&nch);
	    KKASSERT((nd->nl_flags & NLC_NCPISLOCKED) == 0);
	    nd->nl_nch = nch;
	    continue;
	}

	/*
	 * Failure case: additional elements and the current element
	 * is not a directory
	 */
	if (*ptr) {
	    cache_put(&nch);
	    error = ENOTDIR;
	    break;
	}

	/*
	 * Successful lookup of last element.
	 *
	 * Check permissions if the target exists.  If the target does not
	 * exist directory permissions were already tested in the early
	 * completion code above.
	 *
	 * nd->nl_flags will be adjusted on return with NLC_APPENDONLY
	 * if the file is marked append-only, and NLC_STICKY if the directory
	 * containing the file is sticky.
	 */
	if (nch.ncp->nc_vp && (nd->nl_flags & NLC_ALLCHKS)) {
	    error = naccess(&nch, nd->nl_flags | dflags,
			    nd->nl_cred, NULL);
	    if (keeperror(nd, error)) {
		cache_put(&nch);
		break;
	    }
	}

	/*
	 * Termination: no more elements.
	 *
	 * If NLC_REFDVP is set acquire a referenced parent dvp.
	 */
	if (nd->nl_flags & NLC_REFDVP) {
		cache_lock(&nd->nl_nch);
		error = cache_vref(&nd->nl_nch, nd->nl_cred, &nd->nl_dvp);
		cache_unlock(&nd->nl_nch);
		if (keeperror(nd, error)) {
			kprintf("NLC_REFDVP: Cannot ref dvp of %p\n", nch.ncp);
			cache_put(&nch);
			break;
		}
	}
	if (nd->nl_flags & NLC_NCDIR) {
		cache_drop_ncdir(&nd->nl_nch);
		nd->nl_flags &= ~NLC_NCDIR;
	} else {
		cache_drop(&nd->nl_nch);
	}
	nd->nl_nch = nch;
	nd->nl_flags |= NLC_NCPISLOCKED;
	error = 0;
	break;
    }

    if (hit)
	++gd->gd_nchstats->ncs_longhits;
    else
	++gd->gd_nchstats->ncs_longmiss;

    if (nd->nl_flags & NLC_NCPISLOCKED)
	KKASSERT(cache_lockstatus(&nd->nl_nch) > 0);

    /*
     * Retry the whole thing if doretry flag is set, but only once.
     * autofs(5) may mount another filesystem under its root directory
     * while resolving a path.
     */
    if (doretry && !inretry) {
	inretry = TRUE;
	nd->nl_flags &= NLC_NCDIR;
	nd->nl_flags |= saveflag;
	goto nlookup_start;
    }

    /*
     * NOTE: If NLC_CREATE was set the ncp may represent a negative hit
     * (ncp->nc_error will be ENOENT), but we will still return an error
     * code of 0.
     */
    return(error);
}
Пример #15
0
/*
 * Begin vnode operations.
 *
 * A word from the keymaster about locks: generally we don't want
 * to use the vnode locks at all: it creates an ugly dependency between
 * the userlandia file server and the kernel.  But we'll play along with
 * the kernel vnode locks for now.  However, even currently we attempt
 * to release locks as early as possible.  This is possible for some
 * operations which a) don't need a locked vnode after the userspace op
 * and b) return with the vnode unlocked.  Theoretically we could
 * unlock-do op-lock for others and order the graph in userspace, but I
 * don't want to think of the consequences for the time being.
 */
static int
puffs_vnop_lookup(struct vop_nresolve_args *ap)
{
    PUFFS_MSG_VARS(vn, lookup);
    struct puffs_mount *pmp = MPTOPUFFSMP(ap->a_dvp->v_mount);
    struct nchandle *nch = ap->a_nch;
    struct namecache *ncp = nch->ncp;
    struct ucred *cred = ap->a_cred;
    struct vnode *vp = NULL, *dvp = ap->a_dvp;
    struct puffs_node *dpn;
    int error;

    DPRINTF(("puffs_lookup: \"%s\", parent vnode %p\n",
             ncp->nc_name, dvp));

    PUFFS_MSG_ALLOC(vn, lookup);
    puffs_makecn(&lookup_msg->pvnr_cn, &lookup_msg->pvnr_cn_cred,
                 ncp, cred);

    puffs_msg_setinfo(park_lookup, PUFFSOP_VN,
                      PUFFS_VN_LOOKUP, VPTOPNC(dvp));
    PUFFS_MSG_ENQUEUEWAIT2(pmp, park_lookup, dvp->v_data, NULL, error);
    DPRINTF(("puffs_lookup: return of the userspace, part %d\n", error));
    if (error) {
        error = checkerr(pmp, error, __func__);
        if (error == ENOENT)
            cache_setvp(nch, NULL);
        goto out;
    }

    /*
     * Check that we don't get our parent node back, that would cause
     * a pretty obvious deadlock.
     */
    dpn = VPTOPP(dvp);
    if (lookup_msg->pvnr_newnode == dpn->pn_cookie) {
        puffs_senderr(pmp, PUFFS_ERR_LOOKUP, EINVAL,
                      "lookup produced parent cookie", lookup_msg->pvnr_newnode);
        error = EPROTO;
        goto out;
    }

    error = puffs_cookie2vnode(pmp, lookup_msg->pvnr_newnode, 1, &vp);
    if (error == PUFFS_NOSUCHCOOKIE) {
        error = puffs_getvnode(dvp->v_mount,
                               lookup_msg->pvnr_newnode, lookup_msg->pvnr_vtype,
                               lookup_msg->pvnr_size, &vp);
        if (error) {
            puffs_abortbutton(pmp, PUFFS_ABORT_LOOKUP, VPTOPNC(dvp),
                              lookup_msg->pvnr_newnode, ncp, cred);
            goto out;
        }
    } else if (error) {
        puffs_abortbutton(pmp, PUFFS_ABORT_LOOKUP, VPTOPNC(dvp),
                          lookup_msg->pvnr_newnode, ncp, cred);
        goto out;
    }

out:
    if (!error && vp != NULL) {
        vn_unlock(vp);
        cache_setvp(nch, vp);
        vrele(vp);
    }
    DPRINTF(("puffs_lookup: returning %d\n", error));
    PUFFS_MSG_RELEASE(lookup);
    return error;
}
Пример #16
0
/*
 * vop_compat_nremove { struct nchandle *a_nch, 	XXX STOPGAP FUNCTION
 *			struct vnode *a_dvp,
 *			struct ucred *a_cred }
 */
int
vop_compat_nremove(struct vop_nremove_args *ap)
{
	struct thread *td = curthread;
	struct componentname cnp;
	struct nchandle *nch;
	struct namecache *ncp;
	struct vnode *dvp;
	struct vnode *vp;
	int error;

	/*
	 * Sanity checks, get a locked directory vnode.
	 */
	nch = ap->a_nch;		/* locked namecache node */
	ncp = nch->ncp;
	dvp = ap->a_dvp;

	if ((error = vget(dvp, LK_EXCLUSIVE)) != 0) {
		kprintf("[diagnostic] vop_compat_resolve: EAGAIN on ncp %p %s\n",
			ncp, ncp->nc_name);
		return(EAGAIN);
	}

	/*
	 * Setup the cnp for a traditional vop_old_lookup() call.  The lookup
	 * caches all information required to delete the entry in the
	 * directory inode.  We expect a return code of 0 for the DELETE
	 * case (meaning that a vp has been found).  The cnp must simulated
	 * a saved-name situation.
	 */
	bzero(&cnp, sizeof(cnp));
	cnp.cn_nameiop = NAMEI_DELETE;
	cnp.cn_flags = CNP_LOCKPARENT;
	cnp.cn_nameptr = ncp->nc_name;
	cnp.cn_namelen = ncp->nc_nlen;
	cnp.cn_cred = ap->a_cred;
	cnp.cn_td = td;

	/*
	 * The vnode must be a directory and must not represent the
	 * current directory.
	 */
	vp = NULL;
	error = vop_old_lookup(ap->a_head.a_ops, dvp, &vp, &cnp);
	if (error == 0 && vp->v_type == VDIR)
		error = EPERM;
	if (error == 0) {
		KKASSERT((cnp.cn_flags & CNP_PDIRUNLOCK) == 0);
		error = VOP_OLD_REMOVE(dvp, vp, &cnp);
		if (error == 0) {
			cache_setunresolved(nch);
			cache_setvp(nch, NULL);
			cache_inval_vp(vp, CINV_DESTROY);
		}
	}
	if (vp) {
		if (dvp == vp)
			vrele(vp);
		else	
			vput(vp);
	}
	if ((cnp.cn_flags & CNP_PDIRUNLOCK) == 0)
		vn_unlock(dvp);
	vrele(dvp);
	return (error);
}
Пример #17
0
int
vop_compat_nsymlink(struct vop_nsymlink_args *ap)
{
	struct thread *td = curthread;
	struct componentname cnp;
	struct nchandle *nch;
	struct namecache *ncp;
	struct vnode *dvp;
	struct vnode *vp;
	int error;

	/*
	 * Sanity checks, get a locked directory vnode.
	 */
	*ap->a_vpp = NULL;
	nch = ap->a_nch;		/* locked namecache node */
	ncp = nch->ncp;
	dvp = ap->a_dvp;

	if ((error = vget(dvp, LK_EXCLUSIVE)) != 0) {
		kprintf("[diagnostic] vop_compat_resolve: EAGAIN on ncp %p %s\n",
			ncp, ncp->nc_name);
		return(EAGAIN);
	}

	/*
	 * Setup the cnp for a traditional vop_old_lookup() call.  The lookup
	 * caches all information required to create the entry in the
	 * directory inode.  We expect a return code of EJUSTRETURN for
	 * the CREATE case.  The cnp must simulated a saved-name situation.
	 */
	bzero(&cnp, sizeof(cnp));
	cnp.cn_nameiop = NAMEI_CREATE;
	cnp.cn_flags = CNP_LOCKPARENT;
	cnp.cn_nameptr = ncp->nc_name;
	cnp.cn_namelen = ncp->nc_nlen;
	cnp.cn_cred = ap->a_cred;
	cnp.cn_td = td;

	vp = NULL;
	error = vop_old_lookup(ap->a_head.a_ops, dvp, &vp, &cnp);

	/*
	 * EJUSTRETURN should be returned for this case, which means that
	 * the VFS has setup the directory inode for the create.  The dvp we
	 * passed in is expected to remain in a locked state.
	 *
	 * If the VOP_OLD_SYMLINK is successful we are responsible for updating
	 * the cache state of the locked ncp that was passed to us.
	 */
	if (error == EJUSTRETURN) {
		KKASSERT((cnp.cn_flags & CNP_PDIRUNLOCK) == 0);
		error = VOP_OLD_SYMLINK(dvp, &vp, &cnp, ap->a_vap, ap->a_target);
		if (error == 0) {
			cache_setunresolved(nch);
			cache_setvp(nch, vp);
			*ap->a_vpp = vp;
		}
	} else {
		if (error == 0) {
			vput(vp);
			vp = NULL;
			error = EEXIST;
		}
		KKASSERT(vp == NULL);
	}
	if ((cnp.cn_flags & CNP_PDIRUNLOCK) == 0)
		vn_unlock(dvp);
	vrele(dvp);
	return (error);
}
Пример #18
0
static int
tmpfs_nremove(struct vop_nremove_args *v)
{
	struct vnode *dvp = v->a_dvp;
	struct namecache *ncp = v->a_nch->ncp;
	struct vnode *vp;
	int error;
	struct tmpfs_dirent *de;
	struct tmpfs_mount *tmp;
	struct tmpfs_node *dnode;
	struct tmpfs_node *node;

	/*
	 * We have to acquire the vp from v->a_nch because we will likely
	 * unresolve the namecache entry, and a vrele/vput is needed to
	 * trigger the tmpfs_inactive/tmpfs_reclaim sequence.
	 *
	 * We have to use vget to clear any inactive state on the vnode,
	 * otherwise the vnode may remain inactive and thus tmpfs_inactive
	 * will not get called when we release it.
	 */
	error = cache_vget(v->a_nch, v->a_cred, LK_SHARED, &vp);
	KKASSERT(error == 0);
	vn_unlock(vp);

	if (vp->v_type == VDIR) {
		error = EISDIR;
		goto out;
	}

	dnode = VP_TO_TMPFS_DIR(dvp);
	node = VP_TO_TMPFS_NODE(vp);
	tmp = VFS_TO_TMPFS(vp->v_mount);
	de = tmpfs_dir_lookup(dnode, node, ncp);
	if (de == NULL) {
		error = ENOENT;
		goto out;
	}

	/* Files marked as immutable or append-only cannot be deleted. */
	if ((node->tn_flags & (IMMUTABLE | APPEND | NOUNLINK)) ||
	    (dnode->tn_flags & APPEND)) {
		error = EPERM;
		goto out;
	}

	/* Remove the entry from the directory; as it is a file, we do not
	 * have to change the number of hard links of the directory. */
	tmpfs_dir_detach(dnode, de);

	/* Free the directory entry we just deleted.  Note that the node
	 * referred by it will not be removed until the vnode is really
	 * reclaimed. */
	tmpfs_free_dirent(tmp, de);

	if (node->tn_links > 0) {
	        TMPFS_NODE_LOCK(node);
		node->tn_status |= TMPFS_NODE_ACCESSED | TMPFS_NODE_CHANGED | \
	                TMPFS_NODE_MODIFIED;
	        TMPFS_NODE_UNLOCK(node);
	}

	cache_setunresolved(v->a_nch);
	cache_setvp(v->a_nch, NULL);
	tmpfs_knote(vp, NOTE_DELETE);
	/*cache_inval_vp(vp, CINV_DESTROY);*/
	tmpfs_knote(dvp, NOTE_WRITE);
	error = 0;

out:
	vrele(vp);

	return error;
}
Пример #19
0
static int
tmpfs_nlink(struct vop_nlink_args *v)
{
	struct vnode *dvp = v->a_dvp;
	struct vnode *vp = v->a_vp;
	struct namecache *ncp = v->a_nch->ncp;
	struct tmpfs_dirent *de;
	struct tmpfs_node *node;
	struct tmpfs_node *dnode;
	struct mount *mp;
	int error;

	mp = dvp->v_mount;

	KKASSERT(dvp != vp); /* XXX When can this be false? */

	node = VP_TO_TMPFS_NODE(vp);
	dnode = VP_TO_TMPFS_NODE(dvp);
	TMPFS_NODE_LOCK(dnode);

	/* XXX: Why aren't the following two tests done by the caller? */

	/* Hard links of directories are forbidden. */
	if (vp->v_type == VDIR) {
		error = EPERM;
		goto out;
	}

	/* Cannot create cross-device links. */
	if (dvp->v_mount != vp->v_mount) {
		error = EXDEV;
		goto out;
	}

	/* Ensure that we do not overflow the maximum number of links imposed
	 * by the system. */
	KKASSERT(node->tn_links <= LINK_MAX);
	if (node->tn_links >= LINK_MAX) {
		error = EMLINK;
		goto out;
	}

	/* We cannot create links of files marked immutable or append-only. */
	if (node->tn_flags & (IMMUTABLE | APPEND)) {
		error = EPERM;
		goto out;
	}

	/* Allocate a new directory entry to represent the node. */
	error = tmpfs_alloc_dirent(VFS_TO_TMPFS(vp->v_mount), node,
				   ncp->nc_name, ncp->nc_nlen, &de);
	if (error != 0)
		goto out;

	/* Insert the new directory entry into the appropriate directory. */
	tmpfs_dir_attach(dnode, de);

	/* vp link count has changed, so update node times. */

	TMPFS_NODE_LOCK(node);
	node->tn_status |= TMPFS_NODE_CHANGED;
	TMPFS_NODE_UNLOCK(node);
	tmpfs_update(vp);

	tmpfs_knote(vp, NOTE_LINK);
	cache_setunresolved(v->a_nch);
	cache_setvp(v->a_nch, vp);
	error = 0;

out:
	TMPFS_NODE_UNLOCK(dnode);
	if (error == 0)
		tmpfs_knote(dvp, NOTE_WRITE);
	return error;
}
Пример #20
0
static int
devfs_vop_nresolve(struct vop_nresolve_args *ap)
{
	struct devfs_node *dnode = DEVFS_NODE(ap->a_dvp);
	struct devfs_node *node, *found = NULL;
	struct namecache *ncp;
	struct vnode *vp = NULL;
	int error = 0;
	int len;
	int depth;

	ncp = ap->a_nch->ncp;
	len = ncp->nc_nlen;

	if (!devfs_node_is_accessible(dnode))
		return ENOENT;

	lockmgr(&devfs_lock, LK_EXCLUSIVE);

	if ((dnode->node_type != Nroot) && (dnode->node_type != Ndir)) {
		error = ENOENT;
		cache_setvp(ap->a_nch, NULL);
		goto out;
	}

	TAILQ_FOREACH(node, DEVFS_DENODE_HEAD(dnode), link) {
		if (len == node->d_dir.d_namlen) {
			if (!memcmp(ncp->nc_name, node->d_dir.d_name, len)) {
				found = node;
				break;
			}
		}
	}

	if (found) {
		depth = 0;
		while ((found->node_type == Nlink) && (found->link_target)) {
			if (depth >= 8) {
				devfs_debug(DEVFS_DEBUG_SHOW, "Recursive link or depth >= 8");
				break;
			}

			found = found->link_target;
			++depth;
		}

		if (!(found->flags & DEVFS_HIDDEN))
			devfs_allocv(/*ap->a_dvp->v_mount, */ &vp, found);
	}

	if (vp == NULL) {
		error = ENOENT;
		cache_setvp(ap->a_nch, NULL);
		goto out;

	}
	KKASSERT(vp);
	vn_unlock(vp);
	cache_setvp(ap->a_nch, vp);
	vrele(vp);
out:
	lockmgr(&devfs_lock, LK_RELEASE);

	return error;
}