コード例 #1
0
/*
 * Do operations associated with quotas
 */
int
ulfs_quotactl(struct mount *mp, struct quotactl_args *args)
{

#if !defined(LFS_QUOTA) && !defined(LFS_QUOTA2)
	(void) mp;
	(void) args;
	return (EOPNOTSUPP);
#else
	struct lwp *l = curlwp;
	int error;

	/* Mark the mount busy, as we're passing it to kauth(9). */
	error = vfs_busy(mp, NULL);
	if (error) {
		return (error);
	}
	mutex_enter(&mp->mnt_updating);

	error = lfsquota_handle_cmd(mp, l, args);

	mutex_exit(&mp->mnt_updating);
	vfs_unbusy(mp, false, NULL);
	return (error);
#endif
}
コード例 #2
0
/*
 * Do a lazy sync of the filesystem.
 */
int
sync_fsync(void *v)
{
	struct vop_fsync_args *ap = v;
	struct vnode *syncvp = ap->a_vp;
	struct mount *mp = syncvp->v_mount;
	int asyncflag;

	/*
	 * We only need to do something if this is a lazy evaluation.
	 */
	if (ap->a_waitfor != MNT_LAZY)
		return (0);

	/*
	 * Move ourselves to the back of the sync list.
	 */
	vn_syncer_add_to_worklist(syncvp, syncdelay);

	/*
	 * Walk the list of vnodes pushing all that are dirty and
	 * not already on the sync list.
	 */
	if (vfs_busy(mp, VB_READ|VB_NOWAIT) == 0) {
		asyncflag = mp->mnt_flag & MNT_ASYNC;
		mp->mnt_flag &= ~MNT_ASYNC;
		VFS_SYNC(mp, MNT_LAZY, ap->a_cred, ap->a_p);
		if (asyncflag)
			mp->mnt_flag |= MNT_ASYNC;
		vfs_unbusy(mp);
	}

	return (0);
}
コード例 #3
0
/* ARGSUSED */
int
mfs_start(struct mount *mp, int flags, struct proc *p)
{
	struct vnode *vp = VFSTOUFS(mp)->um_devvp;
	struct mfsnode *mfsp = VTOMFS(vp);
	struct buf *bp;
	caddr_t base;
	int sleepreturn = 0;

	base = mfsp->mfs_baseoff;
	while (mfsp->mfs_buflist != (struct buf *)-1) {
		while ((bp = mfsp->mfs_buflist) != NULL) {
			mfsp->mfs_buflist = bp->b_actf;
			mfs_doio(bp, base);
			wakeup((caddr_t)bp);
		}
		/*
		 * If a non-ignored signal is received, try to unmount.
		 * If that fails, clear the signal (it has been "processed"),
		 * otherwise we will loop here, as tsleep will always return
		 * EINTR/ERESTART.
		 */
		if (sleepreturn != 0) {
			if (vfs_busy(mp, VB_WRITE|VB_NOWAIT) ||
			    dounmount(mp, 0, p, NULL))
				CLRSIG(p, CURSIG(p));
			sleepreturn = 0;
			continue;
		}
		sleepreturn = tsleep((caddr_t)vp, mfs_pri, "mfsidl", 0);
	}
	return (0);
}
コード例 #4
0
ファイル: fs_fs.c プロジェクト: dzavalishin/oskit
static OSKIT_COMDECL filesystem_sync(oskit_filesystem_t *f,
				    oskit_bool_t wait)
{
    struct gfilesystem *fs = (struct gfilesystem *) f; 
    struct mount *mp;
    struct proc *p;
    oskit_error_t ferror;
    int error, asyncflag;

    
    if (!fs || !fs->count || !fs->mp)
	    return OSKIT_E_INVALIDARG;

    ferror = getproc(&p);
    if (ferror)
	    return ferror;

    mp = fs->mp;
    error = 0;
    if ((mp->mnt_flag & (MNT_MLOCK|MNT_RDONLY|MNT_MPBUSY)) == 0 &&
	!vfs_busy(mp)) {
	asyncflag = mp->mnt_flag & MNT_ASYNC;
	mp->mnt_flag &= ~MNT_ASYNC;
	error = VFS_SYNC(mp, wait ? MNT_WAIT : MNT_NOWAIT, p->p_ucred, p);
	if (asyncflag)
		mp->mnt_flag |= MNT_ASYNC;
	vfs_unbusy(mp);
    }
    prfree(p);
    if (error)
	    return errno_to_oskit_error(error);	

    return 0;
}
コード例 #5
0
static int
msdosfs_deget_dotdot(struct vnode *vp, u_long cluster, int blkoff,
    struct vnode **rvp)
{
	struct mount *mp;
	struct msdosfsmount *pmp;
	struct denode *rdp;
	int ltype, error;

	mp = vp->v_mount;
	pmp = VFSTOMSDOSFS(mp);
	ltype = VOP_ISLOCKED(vp);
	KASSERT(ltype == LK_EXCLUSIVE || ltype == LK_SHARED,
	    ("msdosfs_deget_dotdot: vp not locked"));

	error = vfs_busy(mp, MBF_NOWAIT);
	if (error != 0) {
		vfs_ref(mp);
		VOP_UNLOCK(vp, 0);
		error = vfs_busy(mp, 0);
		vn_lock(vp, ltype | LK_RETRY);
		vfs_rel(mp);
		if (error != 0)
			return (ENOENT);
		if (vp->v_iflag & VI_DOOMED) {
			vfs_unbusy(mp);
			return (ENOENT);
		}
	}
	VOP_UNLOCK(vp, 0);
	error = deget(pmp, cluster, blkoff,  &rdp);
	vfs_unbusy(mp);
	if (error == 0)
		*rvp = DETOV(rdp);
	if (*rvp != vp)
		vn_lock(vp, ltype | LK_RETRY);
	if (vp->v_iflag & VI_DOOMED) {
		if (error == 0) {
			if (*rvp == vp)
				vunref(*rvp);
			else
				vput(*rvp);
		}
		error = ENOENT;
	}
	return (error);
}
コード例 #6
0
int
compat_20_netbsd32_getfsstat(struct lwp *l, const struct compat_20_netbsd32_getfsstat_args *uap, register_t *retval)
{
	/* {
		syscallarg(netbsd32_statfsp_t) buf;
		syscallarg(netbsd32_long) bufsize;
		syscallarg(int) flags;
	} */
	struct mount *mp, *nmp;
	struct statvfs *sp;
	struct netbsd32_statfs sb32;
	void *sfsp;
	long count, maxcount, error;

	maxcount = SCARG(uap, bufsize) / sizeof(struct netbsd32_statfs);
	sfsp = SCARG_P32(uap, buf);
	mutex_enter(&mountlist_lock);
	count = 0;
	for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
		if (vfs_busy(mp, &nmp)) {
			continue;
		}
		if (sfsp && count < maxcount) {
			sp = &mp->mnt_stat;
			/*
			 * If MNT_NOWAIT or MNT_LAZY is specified, do not
			 * refresh the fsstat cache. MNT_WAIT or MNT_LAZY
			 * overrides MNT_NOWAIT.
			 */
			if (SCARG(uap, flags) != MNT_NOWAIT &&
			    SCARG(uap, flags) != MNT_LAZY &&
			    (SCARG(uap, flags) == MNT_WAIT ||
			     SCARG(uap, flags) == 0) &&
			    (error = VFS_STATVFS(mp, sp)) != 0) {
				mutex_enter(&mountlist_lock);
				vfs_unbusy(mp, false, &nmp);
				continue;
			}
			sp->f_flag = mp->mnt_flag & MNT_VISFLAGMASK;
			compat_20_netbsd32_from_statvfs(sp, &sb32);
			error = copyout(&sb32, sfsp, sizeof(sb32));
			if (error) {
				vfs_unbusy(mp, false, NULL);
				return (error);
			}
			sfsp = (char *)sfsp + sizeof(sb32);
		}
		count++;
		mutex_enter(&mountlist_lock);
		vfs_unbusy(mp, false, &nmp);
	}
	mutex_exit(&mountlist_lock);
	if (sfsp && count > maxcount)
		*retval = maxcount;
	else
		*retval = count;
	return (0);
}
コード例 #7
0
ファイル: fuse_internal.c プロジェクト: 95rangerxlt/macfuse
__private_extern__
int
fuse_internal_remove(vnode_t               dvp,
                     vnode_t               vp,
                     struct componentname *cnp,
                     enum fuse_opcode      op,
                     vfs_context_t         context)
{
    struct fuse_dispatcher fdi;

    struct vnode_attr *vap = VTOVA(vp);
    int need_invalidate = 0;
    uint64_t target_nlink = 0;
    mount_t mp = vnode_mount(vp);

    int err = 0;

    fdisp_init(&fdi, cnp->cn_namelen + 1);
    fdisp_make_vp(&fdi, op, dvp, context);

    memcpy(fdi.indata, cnp->cn_nameptr, cnp->cn_namelen);
    ((char *)fdi.indata)[cnp->cn_namelen] = '\0';

    if ((vap->va_nlink > 1) && vnode_isreg(vp)) {
        need_invalidate = 1;
        target_nlink = vap->va_nlink;
    }

    if (!(err = fdisp_wait_answ(&fdi))) {
        fuse_ticket_drop(fdi.tick);
    }

    fuse_invalidate_attr(dvp);
    fuse_invalidate_attr(vp);

    /*
     * XXX: M_MACFUSE_INVALIDATE_CACHED_VATTRS_UPON_UNLINK
     *
     * Consider the case where vap->va_nlink > 1 for the entity being
     * removed. In our world, other in-memory vnodes that share a link
     * count each with this one may not know right way that this one just
     * got deleted. We should let them know, say, through a vnode_iterate()
     * here and a callback that does fuse_invalidate_attr(vp) on each
     * relevant vnode.
     */
    if (need_invalidate && !err) {
        if (!vfs_busy(mp, LK_NOWAIT)) {
            vnode_iterate(mp, 0, fuse_internal_remove_callback,
                          (void *)&target_nlink);
            vfs_unbusy(mp);
        } else {
            IOLog("MacFUSE: skipping link count fixup upon remove\n");
        }
    }

    return err;
}
コード例 #8
0
int
procfs_domounts(struct lwp *curl, struct proc *p,
    struct pfsnode *pfs, struct uio *uio)
{
	char *bf, *mtab = NULL;
	const char *fsname;
	size_t len, mtabsz = 0;
	struct mount *mp, *nmp;
	struct statvfs *sfs;
	int error = 0;

	bf = malloc(LBFSZ, M_TEMP, M_WAITOK);
	mutex_enter(&mountlist_lock);
	for (mp = CIRCLEQ_FIRST(&mountlist); mp != (void *)&mountlist;
	     mp = nmp) {
		if (vfs_busy(mp, &nmp)) {
			continue;
		}

		sfs = &mp->mnt_stat;

		/* Linux uses different names for some filesystems */
		fsname = sfs->f_fstypename;
		if (strcmp(fsname, "procfs") == 0)
			fsname = "proc";
		else if (strcmp(fsname, "ext2fs") == 0)
			fsname = "ext2";

		len = snprintf(bf, LBFSZ, "%s %s %s %s%s%s%s%s%s 0 0\n",
			sfs->f_mntfromname,
			sfs->f_mntonname,
			fsname,
			(mp->mnt_flag & MNT_RDONLY) ? "ro" : "rw",
			(mp->mnt_flag & MNT_NOSUID) ? ",nosuid" : "",
			(mp->mnt_flag & MNT_NOEXEC) ? ",noexec" : "",
			(mp->mnt_flag & MNT_NODEV) ? ",nodev" : "",
			(mp->mnt_flag & MNT_SYNCHRONOUS) ? ",sync" : "",
			(mp->mnt_flag & MNT_NOATIME) ? ",noatime" : ""
			);

		mtab = realloc(mtab, mtabsz + len, M_TEMP, M_WAITOK);
		memcpy(mtab + mtabsz, bf, len);
		mtabsz += len;

		vfs_unbusy(mp, false, &nmp);
	}
	mutex_exit(&mountlist_lock);
	free(bf, M_TEMP);

	if (mtabsz > 0) {
		error = uiomove_frombuf(mtab, mtabsz, uio);
		free(mtab, M_TEMP);
	}

	return error;
}
コード例 #9
0
ファイル: opensolaris_lookup.c プロジェクト: 151706061/osv
int
traverse(vnode_t **cvpp, int lktype)
{
	vnode_t *cvp;
	vnode_t *tvp;
	vfs_t *vfsp;
	int error;

	cvp = *cvpp;
	tvp = NULL;

	/*
	 * If this vnode is mounted on, then we transparently indirect
	 * to the vnode which is the root of the mounted file system.
	 * Before we do this we must check that an unmount is not in
	 * progress on this vnode.
	 */

	for (;;) {
		/*
		 * Reached the end of the mount chain?
		 */
		vfsp = vn_mountedvfs(cvp);
		if (vfsp == NULL)
			break;
		error = vfs_busy(vfsp, 0);
		/*
		 * tvp is NULL for *cvpp vnode, which we can't unlock.
		 */
		if (tvp != NULL)
			vput(cvp);
		else
			vrele(cvp);
		if (error)
			return (error);

		/*
		 * The read lock must be held across the call to VFS_ROOT() to
		 * prevent a concurrent unmount from destroying the vfs.
		 */
		error = VFS_ROOT(vfsp, lktype, &tvp);
		vfs_unbusy(vfsp);
		if (error != 0)
			return (error);
		cvp = tvp;
	}

	*cvpp = cvp;
	return (0);
}
コード例 #10
0
ファイル: ufs_vfsops.c プロジェクト: AgamAgarwal/minix
/*
 * Do operations associated with quotas
 */
int
ufs_quotactl(struct mount *mp, prop_dictionary_t dict)
{
	struct lwp *l = curlwp;

#if !defined(QUOTA) && !defined(QUOTA2)
	(void) mp;
	(void) dict;
	(void) l;
	return (EOPNOTSUPP);
#else
	int  error;
	prop_dictionary_t cmddict;
	prop_array_t commands;
	prop_object_iterator_t iter;

	/* Mark the mount busy, as we're passing it to kauth(9). */
	error = vfs_busy(mp, NULL);
	if (error)
		return (error);

	error = quota_get_cmds(dict, &commands);
	if (error)
		goto out_vfs;
	iter = prop_array_iterator(commands);
	if (iter == NULL) {
		error = ENOMEM;
		goto out_vfs;
	}
		
		
	mutex_enter(&mp->mnt_updating);
	while ((cmddict = prop_object_iterator_next(iter)) != NULL) {
		if (prop_object_type(cmddict) != PROP_TYPE_DICTIONARY)
			continue;
		error = quota_handle_cmd(mp, l, cmddict);
		if (error)
			break;
	}
	prop_object_iterator_release(iter);
	mutex_exit(&mp->mnt_updating);
out_vfs:
	vfs_unbusy(mp, false, NULL);
	return (error);
#endif
}
コード例 #11
0
int
procfs_domounts(struct lwp *curl, struct proc *p,
    struct pfsnode *pfs, struct uio *uio)
{
	char *bf, *mtab = NULL;
	size_t mtabsz = 0;
	struct mount *mp, *nmp;
	int error = 0, root = 0;
	struct cwdinfo *cwdi = curl->l_proc->p_cwdi;

	bf = malloc(LBFSZ, M_TEMP, M_WAITOK);

	mutex_enter(&mountlist_lock);
	for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
		struct statvfs sfs;

		if (vfs_busy(mp, &nmp))
			continue;

		if ((error = dostatvfs(mp, &sfs, curl, MNT_WAIT, 0)) == 0)
			root |= procfs_format_sfs(&mtab, &mtabsz, bf, LBFSZ,
			    &sfs, curl, 0);

		vfs_unbusy(mp, false, &nmp);
	}
	mutex_exit(&mountlist_lock);

	/*
	 * If we are inside a chroot that is not itself a mount point,
	 * fake a root entry.
	 */
	if (!root && cwdi->cwdi_rdir)
		(void)procfs_format_sfs(&mtab, &mtabsz, bf, LBFSZ,
		    &cwdi->cwdi_rdir->v_mount->mnt_stat, curl, 1);

	free(bf, M_TEMP);

	if (mtabsz > 0) {
		error = uiomove_frombuf(mtab, mtabsz, uio);
		free(mtab, M_TEMP);
	}

	return error;
}
コード例 #12
0
ファイル: vfs_sync.c プロジェクト: Gwenio/DragonFlyBSD
/*
 * Do a lazy sync of the filesystem.
 *
 * sync_fsync { struct vnode *a_vp, int a_waitfor }
 */
static int
sync_fsync(struct vop_fsync_args *ap)
{
	struct vnode *syncvp = ap->a_vp;
	struct mount *mp = syncvp->v_mount;
	int asyncflag;

	/*
	 * We only need to do something if this is a lazy evaluation.
	 */
	if ((ap->a_waitfor & MNT_LAZY) == 0)
		return (0);

	/*
	 * Move ourselves to the back of the sync list.
	 */
	vn_syncer_add(syncvp, syncdelay);

	/*
	 * Walk the list of vnodes pushing all that are dirty and
	 * not already on the sync list, and freeing vnodes which have
	 * no refs and whos VM objects are empty.  vfs_msync() handles
	 * the VM issues and must be called whether the mount is readonly
	 * or not.
	 */
	if (vfs_busy(mp, LK_NOWAIT) != 0)
		return (0);
	if (mp->mnt_flag & MNT_RDONLY) {
		vfs_msync(mp, MNT_NOWAIT);
	} else {
		asyncflag = mp->mnt_flag & MNT_ASYNC;
		mp->mnt_flag &= ~MNT_ASYNC;	/* ZZZ hack */
		vfs_msync(mp, MNT_NOWAIT);
		VFS_SYNC(mp, MNT_NOWAIT | MNT_LAZY);
		if (asyncflag)
			mp->mnt_flag |= MNT_ASYNC;
	}
	vfs_unbusy(mp);
	return (0);
}
コード例 #13
0
ファイル: mfs_vfsops.c プロジェクト: ajinkya93/OpenBSD
/* ARGSUSED */
int
mfs_start(struct mount *mp, int flags, struct proc *p)
{
	struct vnode *vp = VFSTOUFS(mp)->um_devvp;
	struct mfsnode *mfsp = VTOMFS(vp);
	struct buf *bp;
	int sleepreturn = 0;

	while (1) {
		while (1) {
			if (mfsp->mfs_shutdown == 1)
				break;
			bp = bufq_dequeue(&mfsp->mfs_bufq);
			if (bp == NULL)
				break;
			mfs_doio(mfsp, bp);
			wakeup(bp);
		}
		if (mfsp->mfs_shutdown == 1)
			break;

		/*
		 * If a non-ignored signal is received, try to unmount.
		 * If that fails, clear the signal (it has been "processed"),
		 * otherwise we will loop here, as tsleep will always return
		 * EINTR/ERESTART.
		 */
		if (sleepreturn != 0) {
			if (vfs_busy(mp, VB_WRITE|VB_NOWAIT) ||
			    dounmount(mp,
			    (CURSIG(p) == SIGKILL) ? MNT_FORCE : 0, p, NULL))
				CLRSIG(p, CURSIG(p));
			sleepreturn = 0;
			continue;
		}
		sleepreturn = tsleep((caddr_t)vp, mfs_pri, "mfsidl", 0);
	}
	return (0);
}
コード例 #14
0
ファイル: vfs_nlookup.c プロジェクト: kusumi/DragonFlyBSD
/*
 * Resolve a mount point's glue ncp.  This ncp connects creates the illusion
 * of continuity in the namecache tree by connecting the ncp related to the
 * vnode under the mount to the ncp related to the mount's root vnode.
 *
 * If no error occured a locked, ref'd ncp is stored in *ncpp.
 */
int
nlookup_mp(struct mount *mp, struct nchandle *nch)
{
    struct vnode *vp;
    int error;

    error = 0;
    cache_get(&mp->mnt_ncmountpt, nch);
    if (nch->ncp->nc_flag & NCF_UNRESOLVED) {
	while (vfs_busy(mp, 0))
	    ;
	error = VFS_ROOT(mp, &vp);
	vfs_unbusy(mp);
	if (error) {
	    cache_put(nch);
	} else {
	    cache_setvp(nch, vp);
	    vput(vp);
	}
    }
    return(error);
}
コード例 #15
0
/*
 * sys_lfs_segclean:
 *
 * Mark the segment clean.
 *
 *  0 on success
 * -1/errno is return on error.
 */
int
sys_lfs_segclean(struct lwp *l, const struct sys_lfs_segclean_args *uap, register_t *retval)
{
	/* {
		syscallarg(fsid_t *) fsidp;
		syscallarg(u_long) segment;
	} */
	struct lfs *fs;
	struct mount *mntp;
	fsid_t fsid;
	int error;
	unsigned long segnum;

	error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_LFS,
	    KAUTH_REQ_SYSTEM_LFS_SEGCLEAN, NULL, NULL, NULL);
	if (error)
		return (error);

	if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0)
		return (error);
	if ((mntp = vfs_getvfs(&fsid)) == NULL)
		return (ENOENT);

	fs = VFSTOULFS(mntp)->um_lfs;
	segnum = SCARG(uap, segment);

	if ((error = vfs_busy(mntp, NULL)) != 0)
		return (error);

	KERNEL_LOCK(1, NULL);
	lfs_seglock(fs, SEGM_PROT);
	error = lfs_do_segclean(fs, segnum);
	lfs_segunlock(fs);
	KERNEL_UNLOCK_ONE(NULL);
	vfs_unbusy(mntp, false, NULL);
	return error;
}
コード例 #16
0
ファイル: ufs_quota.c プロジェクト: jaredmcneill/freebsd
/*
 * Q_QUOTAON - set up a quota file for a particular filesystem.
 */
int
quotaon(struct thread *td, struct mount *mp, int type, void *fname)
{
    struct ufsmount *ump;
    struct vnode *vp, **vpp;
    struct vnode *mvp;
    struct dquot *dq;
    int error, flags;
    struct nameidata nd;

    error = priv_check(td, PRIV_UFS_QUOTAON);
    if (error != 0) {
        vfs_unbusy(mp);
        return (error);
    }

    if ((mp->mnt_flag & MNT_RDONLY) != 0) {
        vfs_unbusy(mp);
        return (EROFS);
    }

    ump = VFSTOUFS(mp);
    dq = NODQUOT;

    NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, fname, td);
    flags = FREAD | FWRITE;
    vfs_ref(mp);
    vfs_unbusy(mp);
    error = vn_open(&nd, &flags, 0, NULL);
    if (error != 0) {
        vfs_rel(mp);
        return (error);
    }
    NDFREE(&nd, NDF_ONLY_PNBUF);
    vp = nd.ni_vp;
    error = vfs_busy(mp, MBF_NOWAIT);
    vfs_rel(mp);
    if (error == 0) {
        if (vp->v_type != VREG) {
            error = EACCES;
            vfs_unbusy(mp);
        }
    }
    if (error != 0) {
        VOP_UNLOCK(vp, 0);
        (void) vn_close(vp, FREAD|FWRITE, td->td_ucred, td);
        return (error);
    }

    UFS_LOCK(ump);
    if ((ump->um_qflags[type] & (QTF_OPENING|QTF_CLOSING)) != 0) {
        UFS_UNLOCK(ump);
        VOP_UNLOCK(vp, 0);
        (void) vn_close(vp, FREAD|FWRITE, td->td_ucred, td);
        vfs_unbusy(mp);
        return (EALREADY);
    }
    ump->um_qflags[type] |= QTF_OPENING|QTF_CLOSING;
    UFS_UNLOCK(ump);
    if ((error = dqopen(vp, ump, type)) != 0) {
        VOP_UNLOCK(vp, 0);
        UFS_LOCK(ump);
        ump->um_qflags[type] &= ~(QTF_OPENING|QTF_CLOSING);
        UFS_UNLOCK(ump);
        (void) vn_close(vp, FREAD|FWRITE, td->td_ucred, td);
        vfs_unbusy(mp);
        return (error);
    }
    VOP_UNLOCK(vp, 0);
    MNT_ILOCK(mp);
    mp->mnt_flag |= MNT_QUOTA;
    MNT_IUNLOCK(mp);

    vpp = &ump->um_quotas[type];
    if (*vpp != vp)
        quotaoff1(td, mp, type);

    /*
     * When the directory vnode containing the quota file is
     * inactivated, due to the shared lookup of the quota file
     * vput()ing the dvp, the qsyncvp() call for the containing
     * directory would try to acquire the quota lock exclusive.
     * At the same time, lookup already locked the quota vnode
     * shared.  Mark the quota vnode lock as allowing recursion
     * and automatically converting shared locks to exclusive.
     *
     * Also mark quota vnode as system.
     */
    vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
    vp->v_vflag |= VV_SYSTEM;
    VN_LOCK_AREC(vp);
    VN_LOCK_DSHARE(vp);
    VOP_UNLOCK(vp, 0);
    *vpp = vp;
    /*
     * Save the credential of the process that turned on quotas.
     * Set up the time limits for this quota.
     */
    ump->um_cred[type] = crhold(td->td_ucred);
    ump->um_btime[type] = MAX_DQ_TIME;
    ump->um_itime[type] = MAX_IQ_TIME;
    if (dqget(NULLVP, 0, ump, type, &dq) == 0) {
        if (dq->dq_btime > 0)
            ump->um_btime[type] = dq->dq_btime;
        if (dq->dq_itime > 0)
            ump->um_itime[type] = dq->dq_itime;
        dqrele(NULLVP, dq);
    }
    /*
     * Allow the getdq from getinoquota below to read the quota
     * from file.
     */
    UFS_LOCK(ump);
    ump->um_qflags[type] &= ~QTF_CLOSING;
    UFS_UNLOCK(ump);
    /*
     * Search vnodes associated with this mount point,
     * adding references to quota file being opened.
     * NB: only need to add dquot's for inodes being modified.
     */
again:
    MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
        if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td)) {
            MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
            goto again;
        }
        if (vp->v_type == VNON || vp->v_writecount == 0) {
            VOP_UNLOCK(vp, 0);
            vrele(vp);
            continue;
        }
        error = getinoquota(VTOI(vp));
        VOP_UNLOCK(vp, 0);
        vrele(vp);
        if (error) {
            MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
            break;
        }
    }

    if (error)
        quotaoff_inchange(td, mp, type);
    UFS_LOCK(ump);
    ump->um_qflags[type] &= ~QTF_OPENING;
    KASSERT((ump->um_qflags[type] & QTF_CLOSING) == 0,
            ("quotaon: leaking flags"));
    UFS_UNLOCK(ump);

    vfs_unbusy(mp);
    return (error);
}
コード例 #17
0
int
lfs_bmapv(struct proc *p, fsid_t *fsidp, BLOCK_INFO *blkiov, int blkcnt)
{
	BLOCK_INFO *blkp;
	IFILE *ifp;
	struct buf *bp;
	struct inode *ip = NULL;
	struct lfs *fs;
	struct mount *mntp;
	struct ulfsmount *ump;
	struct vnode *vp;
	ino_t lastino;
	daddr_t v_daddr;
	int cnt, error;
	int numrefed = 0;

	lfs_cleaner_pid = p->p_pid;

	if ((mntp = vfs_getvfs(fsidp)) == NULL)
		return (ENOENT);

	ump = VFSTOULFS(mntp);
	if ((error = vfs_busy(mntp, NULL)) != 0)
		return (error);

	cnt = blkcnt;

	fs = VFSTOULFS(mntp)->um_lfs;

	error = 0;

	/* these were inside the initialization for the for loop */
	v_daddr = LFS_UNUSED_DADDR;
	lastino = LFS_UNUSED_INUM;
	for (blkp = blkiov; cnt--; ++blkp)
	{
		/*
		 * Get the IFILE entry (only once) and see if the file still
		 * exists.
		 */
		if (lastino != blkp->bi_inode) {
			/*
			 * Finish the old file, if there was one.  The presence
			 * of a usable vnode in vp is signaled by a valid
			 * v_daddr.
			 */
			if (v_daddr != LFS_UNUSED_DADDR) {
				lfs_vunref(vp);
				if (VTOI(vp)->i_lfs_iflags & LFSI_BMAP) {
					mutex_enter(vp->v_interlock);
					if (vget(vp, LK_NOWAIT) == 0) {
						if (! vrecycle(vp))
							vrele(vp);
					}
				}
				numrefed--;
			}

			/*
			 * Start a new file
			 */
			lastino = blkp->bi_inode;
			if (blkp->bi_inode == LFS_IFILE_INUM)
				v_daddr = fs->lfs_idaddr;
			else {
				LFS_IENTRY(ifp, fs, blkp->bi_inode, bp);
				v_daddr = ifp->if_daddr;
				brelse(bp, 0);
			}
			if (v_daddr == LFS_UNUSED_DADDR) {
				blkp->bi_daddr = LFS_UNUSED_DADDR;
				continue;
			}
			/*
			 * A regular call to VFS_VGET could deadlock
			 * here.  Instead, we try an unlocked access.
			 */
			mutex_enter(&ulfs_ihash_lock);
			vp = ulfs_ihashlookup(ump->um_dev, blkp->bi_inode);
			if (vp != NULL && !(vp->v_iflag & VI_XLOCK)) {
				ip = VTOI(vp);
				mutex_enter(vp->v_interlock);
				mutex_exit(&ulfs_ihash_lock);
				if (lfs_vref(vp)) {
					v_daddr = LFS_UNUSED_DADDR;
					continue;
				}
				numrefed++;
			} else {
				mutex_exit(&ulfs_ihash_lock);
				/*
				 * Don't VFS_VGET if we're being unmounted,
				 * since we hold vfs_busy().
				 */
				if (mntp->mnt_iflag & IMNT_UNMOUNT) {
					v_daddr = LFS_UNUSED_DADDR;
					continue;
				}
				error = VFS_VGET(mntp, blkp->bi_inode, &vp);
				if (error) {
					DLOG((DLOG_CLEAN, "lfs_bmapv: vget ino"
					      "%d failed with %d",
					      blkp->bi_inode,error));
					v_daddr = LFS_UNUSED_DADDR;
					continue;
				} else {
					KASSERT(VOP_ISLOCKED(vp));
					VTOI(vp)->i_lfs_iflags |= LFSI_BMAP;
					VOP_UNLOCK(vp);
					numrefed++;
				}
			}
			ip = VTOI(vp);
		} else if (v_daddr == LFS_UNUSED_DADDR) {
			/*
			 * This can only happen if the vnode is dead.
			 * Keep going.	Note that we DO NOT set the
			 * bi_addr to anything -- if we failed to get
			 * the vnode, for example, we want to assume
			 * conservatively that all of its blocks *are*
			 * located in the segment in question.
			 * lfs_markv will throw them out if we are
			 * wrong.
			 */
			/* blkp->bi_daddr = LFS_UNUSED_DADDR; */
			continue;
		}

		/* Past this point we are guaranteed that vp, ip are valid. */

		if (blkp->bi_lbn == LFS_UNUSED_LBN) {
			/*
			 * We just want the inode address, which is
			 * conveniently in v_daddr.
			 */
			blkp->bi_daddr = v_daddr;
		} else {
			daddr_t bi_daddr;

			/* XXX ondisk32 */
			error = VOP_BMAP(vp, blkp->bi_lbn, NULL,
					 &bi_daddr, NULL);
			if (error)
			{
				blkp->bi_daddr = LFS_UNUSED_DADDR;
				continue;
			}
			blkp->bi_daddr = LFS_DBTOFSB(fs, bi_daddr);
			/* Fill in the block size, too */
			if (blkp->bi_lbn >= 0)
				blkp->bi_size = lfs_blksize(fs, ip, blkp->bi_lbn);
			else
				blkp->bi_size = fs->lfs_bsize;
		}
	}

	/*
	 * Finish the old file, if there was one.  The presence
	 * of a usable vnode in vp is signaled by a valid v_daddr.
	 */
	if (v_daddr != LFS_UNUSED_DADDR) {
		lfs_vunref(vp);
		/* Recycle as above. */
		if (ip->i_lfs_iflags & LFSI_BMAP) {
			mutex_enter(vp->v_interlock);
			if (vget(vp, LK_NOWAIT) == 0) {
				if (! vrecycle(vp))
					vrele(vp);
			}
		}
		numrefed--;
	}

#ifdef DIAGNOSTIC
	if (numrefed != 0)
		panic("lfs_bmapv: numrefed=%d", numrefed);
#endif

	vfs_unbusy(mntp, false, NULL);

	return 0;
}
コード例 #18
0
int
lfs_markv(struct proc *p, fsid_t *fsidp, BLOCK_INFO *blkiov,
    int blkcnt)
{
	BLOCK_INFO *blkp;
	IFILE *ifp;
	struct buf *bp;
	struct inode *ip = NULL;
	struct lfs *fs;
	struct mount *mntp;
	struct vnode *vp = NULL;
	ino_t lastino;
	daddr_t b_daddr, v_daddr;
	int cnt, error;
	int do_again = 0;
	int numrefed = 0;
	ino_t maxino;
	size_t obsize;

	/* number of blocks/inodes that we have already bwrite'ed */
	int nblkwritten, ninowritten;

	if ((mntp = vfs_getvfs(fsidp)) == NULL)
		return (ENOENT);

	fs = VFSTOULFS(mntp)->um_lfs;

	if (fs->lfs_ronly)
		return EROFS;

	maxino = (lfs_fragstoblks(fs, VTOI(fs->lfs_ivnode)->i_ffs1_blocks) -
		      fs->lfs_cleansz - fs->lfs_segtabsz) * fs->lfs_ifpb;

	cnt = blkcnt;

	if ((error = vfs_busy(mntp, NULL)) != 0)
		return (error);

	/*
	 * This seglock is just to prevent the fact that we might have to sleep
	 * from allowing the possibility that our blocks might become
	 * invalid.
	 *
	 * It is also important to note here that unless we specify SEGM_CKP,
	 * any Ifile blocks that we might be asked to clean will never get
	 * to the disk.
	 */
	lfs_seglock(fs, SEGM_CLEAN | SEGM_CKP | SEGM_SYNC);

	/* Mark blocks/inodes dirty.  */
	error = 0;

	/* these were inside the initialization for the for loop */
	v_daddr = LFS_UNUSED_DADDR;
	lastino = LFS_UNUSED_INUM;
	nblkwritten = ninowritten = 0;
	for (blkp = blkiov; cnt--; ++blkp)
	{
		/* Bounds-check incoming data, avoid panic for failed VGET */
		if (blkp->bi_inode <= 0 || blkp->bi_inode >= maxino) {
			error = EINVAL;
			goto err3;
		}
		/*
		 * Get the IFILE entry (only once) and see if the file still
		 * exists.
		 */
		if (lastino != blkp->bi_inode) {
			/*
			 * Finish the old file, if there was one.  The presence
			 * of a usable vnode in vp is signaled by a valid v_daddr.
			 */
			if (v_daddr != LFS_UNUSED_DADDR) {
				lfs_vunref(vp);
				numrefed--;
			}

			/*
			 * Start a new file
			 */
			lastino = blkp->bi_inode;
			if (blkp->bi_inode == LFS_IFILE_INUM)
				v_daddr = fs->lfs_idaddr;
			else {
				LFS_IENTRY(ifp, fs, blkp->bi_inode, bp);
				/* XXX fix for force write */
				v_daddr = ifp->if_daddr;
				brelse(bp, 0);
			}
			if (v_daddr == LFS_UNUSED_DADDR)
				continue;

			/* Get the vnode/inode. */
			error = lfs_fastvget(mntp, blkp->bi_inode, v_daddr,
					   &vp,
					   (blkp->bi_lbn == LFS_UNUSED_LBN
					    ? blkp->bi_bp
					    : NULL));

			if (!error) {
				numrefed++;
			}
			if (error) {
				DLOG((DLOG_CLEAN, "lfs_markv: lfs_fastvget"
				      " failed with %d (ino %d, segment %d)\n",
				      error, blkp->bi_inode,
				      lfs_dtosn(fs, blkp->bi_daddr)));
				/*
				 * If we got EAGAIN, that means that the
				 * Inode was locked.  This is
				 * recoverable: just clean the rest of
				 * this segment, and let the cleaner try
				 * again with another.	(When the
				 * cleaner runs again, this segment will
				 * sort high on the list, since it is
				 * now almost entirely empty.) But, we
				 * still set v_daddr = LFS_UNUSED_ADDR
				 * so as not to test this over and over
				 * again.
				 */
				if (error == EAGAIN) {
					error = 0;
					do_again++;
				}
#ifdef DIAGNOSTIC
				else if (error != ENOENT)
					panic("lfs_markv VFS_VGET FAILED");
#endif
				/* lastino = LFS_UNUSED_INUM; */
				v_daddr = LFS_UNUSED_DADDR;
				vp = NULL;
				ip = NULL;
				continue;
			}
			ip = VTOI(vp);
			ninowritten++;
		} else if (v_daddr == LFS_UNUSED_DADDR) {
			/*
			 * This can only happen if the vnode is dead (or
			 * in any case we can't get it...e.g., it is
			 * inlocked).  Keep going.
			 */
			continue;
		}

		/* Past this point we are guaranteed that vp, ip are valid. */

		/* Can't clean VU_DIROP directories in case of truncation */
		/* XXX - maybe we should mark removed dirs specially? */
		if (vp->v_type == VDIR && (vp->v_uflag & VU_DIROP)) {
			do_again++;
			continue;
		}

		/* If this BLOCK_INFO didn't contain a block, keep going. */
		if (blkp->bi_lbn == LFS_UNUSED_LBN) {
			/* XXX need to make sure that the inode gets written in this case */
			/* XXX but only write the inode if it's the right one */
			if (blkp->bi_inode != LFS_IFILE_INUM) {
				LFS_IENTRY(ifp, fs, blkp->bi_inode, bp);
				if (ifp->if_daddr == blkp->bi_daddr) {
					mutex_enter(&lfs_lock);
					LFS_SET_UINO(ip, IN_CLEANING);
					mutex_exit(&lfs_lock);
				}
				brelse(bp, 0);
			}
			continue;
		}

		b_daddr = 0;
		if (VOP_BMAP(vp, blkp->bi_lbn, NULL, &b_daddr, NULL) ||
		    LFS_DBTOFSB(fs, b_daddr) != blkp->bi_daddr)
		{
			if (lfs_dtosn(fs, LFS_DBTOFSB(fs, b_daddr)) ==
			    lfs_dtosn(fs, blkp->bi_daddr))
			{
				DLOG((DLOG_CLEAN, "lfs_markv: wrong da same seg: %llx vs %llx\n",
				      (long long)blkp->bi_daddr, (long long)LFS_DBTOFSB(fs, b_daddr)));
			}
			do_again++;
			continue;
		}

		/*
		 * Check block sizes.  The blocks being cleaned come from
		 * disk, so they should have the same size as their on-disk
		 * counterparts.
		 */
		if (blkp->bi_lbn >= 0)
			obsize = lfs_blksize(fs, ip, blkp->bi_lbn);
		else
			obsize = fs->lfs_bsize;
		/* Check for fragment size change */
		if (blkp->bi_lbn >= 0 && blkp->bi_lbn < ULFS_NDADDR) {
			obsize = ip->i_lfs_fragsize[blkp->bi_lbn];
		}
		if (obsize != blkp->bi_size) {
			DLOG((DLOG_CLEAN, "lfs_markv: ino %d lbn %lld wrong"
			      " size (%ld != %d), try again\n",
			      blkp->bi_inode, (long long)blkp->bi_lbn,
			      (long) obsize, blkp->bi_size));
			do_again++;
			continue;
		}

		/*
		 * If we get to here, then we are keeping the block.  If
		 * it is an indirect block, we want to actually put it
		 * in the buffer cache so that it can be updated in the
		 * finish_meta section.	 If it's not, we need to
		 * allocate a fake buffer so that writeseg can perform
		 * the copyin and write the buffer.
		 */
		if (ip->i_number != LFS_IFILE_INUM && blkp->bi_lbn >= 0) {
			/* Data Block */
			bp = lfs_fakebuf(fs, vp, blkp->bi_lbn,
					 blkp->bi_size, blkp->bi_bp);
			/* Pretend we used bread() to get it */
			bp->b_blkno = LFS_FSBTODB(fs, blkp->bi_daddr);
		} else {
			/* Indirect block or ifile */
			if (blkp->bi_size != fs->lfs_bsize &&
			    ip->i_number != LFS_IFILE_INUM)
				panic("lfs_markv: partial indirect block?"
				    " size=%d\n", blkp->bi_size);
			bp = getblk(vp, blkp->bi_lbn, blkp->bi_size, 0, 0);
			if (!(bp->b_oflags & (BO_DONE|BO_DELWRI))) {
				/*
				 * The block in question was not found
				 * in the cache; i.e., the block that
				 * getblk() returned is empty.	So, we
				 * can (and should) copy in the
				 * contents, because we've already
				 * determined that this was the right
				 * version of this block on disk.
				 *
				 * And, it can't have changed underneath
				 * us, because we have the segment lock.
				 */
				error = copyin(blkp->bi_bp, bp->b_data, blkp->bi_size);
				if (error)
					goto err2;
			}
		}
		if ((error = lfs_bwrite_ext(bp, BW_CLEAN)) != 0)
			goto err2;

		nblkwritten++;
		/*
		 * XXX should account indirect blocks and ifile pages as well
		 */
		if (nblkwritten + lfs_lblkno(fs, ninowritten * sizeof (struct ulfs1_dinode))
		    > LFS_MARKV_MAX_BLOCKS) {
			DLOG((DLOG_CLEAN, "lfs_markv: writing %d blks %d inos\n",
			      nblkwritten, ninowritten));
			lfs_segwrite(mntp, SEGM_CLEAN);
			nblkwritten = ninowritten = 0;
		}
	}

	/*
	 * Finish the old file, if there was one
	 */
	if (v_daddr != LFS_UNUSED_DADDR) {
		lfs_vunref(vp);
		numrefed--;
	}

#ifdef DIAGNOSTIC
	if (numrefed != 0)
		panic("lfs_markv: numrefed=%d", numrefed);
#endif
	DLOG((DLOG_CLEAN, "lfs_markv: writing %d blks %d inos (check point)\n",
	      nblkwritten, ninowritten));

	/*
	 * The last write has to be SEGM_SYNC, because of calling semantics.
	 * It also has to be SEGM_CKP, because otherwise we could write
	 * over the newly cleaned data contained in a checkpoint, and then
	 * we'd be unhappy at recovery time.
	 */
	lfs_segwrite(mntp, SEGM_CLEAN | SEGM_CKP | SEGM_SYNC);

	lfs_segunlock(fs);

	vfs_unbusy(mntp, false, NULL);
	if (error)
		return (error);
	else if (do_again)
		return EAGAIN;

	return 0;

err2:
	DLOG((DLOG_CLEAN, "lfs_markv err2\n"));

	/*
	 * XXX we're here because copyin() failed.
	 * XXX it means that we can't trust the cleanerd.  too bad.
	 * XXX how can we recover from this?
	 */

err3:
	/*
	 * XXX should do segwrite here anyway?
	 */

	if (v_daddr != LFS_UNUSED_DADDR) {
		lfs_vunref(vp);
		--numrefed;
	}

	lfs_segunlock(fs);
	vfs_unbusy(mntp, false, NULL);
#ifdef DIAGNOSTIC
	if (numrefed != 0)
		panic("lfs_markv: numrefed=%d", numrefed);
#endif

	return (error);
}
コード例 #19
0
/* ARGSUSED */
int
mfs_start(struct mount *mp, int flags)
{
	struct vnode *vp;
	struct mfsnode *mfsp;
	struct proc *p;
	struct buf *bp;
	void *base;
	int sleepreturn = 0, refcnt, error;
	ksiginfoq_t kq;

	/*
	 * Ensure that file system is still mounted when getting mfsnode.
	 * Add a reference to the mfsnode to prevent it disappearing in
	 * this routine.
	 */
	if ((error = vfs_busy(mp, NULL)) != 0)
		return error;
	vp = VFSTOUFS(mp)->um_devvp;
	mfsp = VTOMFS(vp);
	mutex_enter(&mfs_lock);
	mfsp->mfs_refcnt++;
	mutex_exit(&mfs_lock);
	vfs_unbusy(mp, false, NULL);

	base = mfsp->mfs_baseoff;
	mutex_enter(&mfs_lock);
	while (mfsp->mfs_shutdown != 1) {
		while ((bp = bufq_get(mfsp->mfs_buflist)) != NULL) {
			mutex_exit(&mfs_lock);
			mfs_doio(bp, base);
			mutex_enter(&mfs_lock);
		}
		/*
		 * If a non-ignored signal is received, try to unmount.
		 * If that fails, or the filesystem is already in the
		 * process of being unmounted, clear the signal (it has been
		 * "processed"), otherwise we will loop here, as tsleep
		 * will always return EINTR/ERESTART.
		 */
		if (sleepreturn != 0) {
			mutex_exit(&mfs_lock);
			if (dounmount(mp, 0, curlwp) != 0) {
				p = curproc;
				ksiginfo_queue_init(&kq);
				mutex_enter(p->p_lock);
				sigclearall(p, NULL, &kq);
				mutex_exit(p->p_lock);
				ksiginfo_queue_drain(&kq);
			}
			sleepreturn = 0;
			mutex_enter(&mfs_lock);
			continue;
		}

		sleepreturn = cv_wait_sig(&mfsp->mfs_cv, &mfs_lock);
	}
	KASSERT(bufq_peek(mfsp->mfs_buflist) == NULL);
	refcnt = --mfsp->mfs_refcnt;
	mutex_exit(&mfs_lock);
	if (refcnt == 0) {
		bufq_free(mfsp->mfs_buflist);
		cv_destroy(&mfsp->mfs_cv);
		kmem_free(mfsp, sizeof(*mfsp));
	}
	return (sleepreturn);
}
コード例 #20
0
ファイル: lfs_syscalls.c プロジェクト: ycui1984/netbsd-src
int
lfs_bmapv(struct lwp *l, fsid_t *fsidp, BLOCK_INFO *blkiov, int blkcnt)
{
	BLOCK_INFO *blkp;
	IFILE *ifp;
	struct buf *bp;
	struct inode *ip = NULL;
	struct lfs *fs;
	struct mount *mntp;
	struct ulfsmount *ump;
	struct vnode *vp;
	ino_t lastino;
	daddr_t v_daddr;
	int cnt, error;
	int numrefed = 0;

	error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_LFS,
	    KAUTH_REQ_SYSTEM_LFS_BMAPV, NULL, NULL, NULL);
	if (error)
		return (error);

	if ((mntp = vfs_getvfs(fsidp)) == NULL)
		return (ENOENT);

	if ((error = vfs_busy(mntp, NULL)) != 0)
		return (error);

	ump = VFSTOULFS(mntp);
	fs = ump->um_lfs;

	if (fs->lfs_cleaner_thread == NULL)
		fs->lfs_cleaner_thread = curlwp;
	KASSERT(fs->lfs_cleaner_thread == curlwp);

	cnt = blkcnt;

	error = 0;

	/* these were inside the initialization for the for loop */
	vp = NULL;
	v_daddr = LFS_UNUSED_DADDR;
	lastino = LFS_UNUSED_INUM;
	for (blkp = blkiov; cnt--; ++blkp)
	{
		/*
		 * Get the IFILE entry (only once) and see if the file still
		 * exists.
		 */
		if (lastino != blkp->bi_inode) {
			/*
			 * Finish the old file, if there was one.
			 */
			if (vp != NULL) {
				vput(vp);
				vp = NULL;
				numrefed--;
			}

			/*
			 * Start a new file
			 */
			lastino = blkp->bi_inode;
			if (blkp->bi_inode == LFS_IFILE_INUM)
				v_daddr = lfs_sb_getidaddr(fs);
			else {
				LFS_IENTRY(ifp, fs, blkp->bi_inode, bp);
				v_daddr = lfs_if_getdaddr(fs, ifp);
				brelse(bp, 0);
			}
			if (v_daddr == LFS_UNUSED_DADDR) {
				blkp->bi_daddr = LFS_UNUSED_DADDR;
				continue;
			}
			error = lfs_fastvget(mntp, blkp->bi_inode, NULL,
			    LK_SHARED, &vp);
			if (error) {
				DLOG((DLOG_CLEAN, "lfs_bmapv: lfs_fastvget ino"
				      "%d failed with %d",
				      blkp->bi_inode,error));
				KASSERT(vp == NULL);
				continue;
			} else {
				KASSERT(VOP_ISLOCKED(vp));
				numrefed++;
			}
			ip = VTOI(vp);
		} else if (vp == NULL) {
			/*
			 * This can only happen if the vnode is dead.
			 * Keep going.	Note that we DO NOT set the
			 * bi_addr to anything -- if we failed to get
			 * the vnode, for example, we want to assume
			 * conservatively that all of its blocks *are*
			 * located in the segment in question.
			 * lfs_markv will throw them out if we are
			 * wrong.
			 */
			continue;
		}

		/* Past this point we are guaranteed that vp, ip are valid. */

		if (blkp->bi_lbn == LFS_UNUSED_LBN) {
			/*
			 * We just want the inode address, which is
			 * conveniently in v_daddr.
			 */
			blkp->bi_daddr = v_daddr;
		} else {
			daddr_t bi_daddr;

			error = VOP_BMAP(vp, blkp->bi_lbn, NULL,
					 &bi_daddr, NULL);
			if (error)
			{
				blkp->bi_daddr = LFS_UNUSED_DADDR;
				continue;
			}
			blkp->bi_daddr = LFS_DBTOFSB(fs, bi_daddr);
			/* Fill in the block size, too */
			if (blkp->bi_lbn >= 0)
				blkp->bi_size = lfs_blksize(fs, ip, blkp->bi_lbn);
			else
				blkp->bi_size = lfs_sb_getbsize(fs);
		}
	}

	/*
	 * Finish the old file, if there was one.
	 */
	if (vp != NULL) {
		vput(vp);
		vp = NULL;
		numrefed--;
	}

#ifdef DIAGNOSTIC
	if (numrefed != 0)
		panic("lfs_bmapv: numrefed=%d", numrefed);
#endif

	vfs_unbusy(mntp, false, NULL);

	return 0;
}
コード例 #21
0
/*
 * Convert a vnode to its component name
 */
static int
pfs_vptocnp(struct vop_vptocnp_args *ap)
{
	struct vnode *vp = ap->a_vp;
	struct vnode **dvp = ap->a_vpp;
	struct pfs_vdata *pvd = vp->v_data;
	struct pfs_node *pd = pvd->pvd_pn;
	struct pfs_node *pn;
	struct mount *mp;
	char *buf = ap->a_buf;
	int *buflen = ap->a_buflen;
	char pidbuf[PFS_NAMELEN];
	pid_t pid = pvd->pvd_pid;
	int len, i, error, locked;

	i = *buflen;
	error = 0;

	pfs_lock(pd);

	if (vp->v_type == VDIR && pd->pn_type == pfstype_root) {
		*dvp = vp;
		vhold(*dvp);
		pfs_unlock(pd);
		PFS_RETURN (0);
	} else if (vp->v_type == VDIR && pd->pn_type == pfstype_procdir) {
		len = snprintf(pidbuf, sizeof(pidbuf), "%d", pid);
		i -= len;
		if (i < 0) {
			error = ENOMEM;
			goto failed;
		}
		bcopy(pidbuf, buf + i, len);
	} else {
		len = strlen(pd->pn_name);
		i -= len;
		if (i < 0) {
			error = ENOMEM;
			goto failed;
		}
		bcopy(pd->pn_name, buf + i, len);
	}

	pn = pd->pn_parent;
	pfs_unlock(pd);

	mp = vp->v_mount;
	error = vfs_busy(mp, 0);
	if (error)
		return (error);

	/*
	 * vp is held by caller.
	 */
	locked = VOP_ISLOCKED(vp);
	VOP_UNLOCK(vp, 0);

	error = pfs_vncache_alloc(mp, dvp, pn, pid);
	if (error) {
		vn_lock(vp, locked | LK_RETRY);
		vfs_unbusy(mp);
		PFS_RETURN(error);
	}

	*buflen = i;
	vhold(*dvp);
	vput(*dvp);
	vn_lock(vp, locked | LK_RETRY);
	vfs_unbusy(mp);

	PFS_RETURN (0);
failed:
	pfs_unlock(pd);
	PFS_RETURN(error);
}
コード例 #22
0
ファイル: ntfs_vfsops.c プロジェクト: UnitedMarsupials/kame
static int
ntfs_mount ( 
	struct mount *mp,
	char *path,
	caddr_t data,
	struct nameidata *ndp,
	struct proc *p )
{
	u_int		size;
	int		err = 0;
	struct vnode	*devvp;
	struct ntfs_args args;

	/*
	 * Use NULL path to flag a root mount
	 */
	if( path == NULL) {
		/*
		 ***
		 * Mounting root file system
		 ***
		 */
	
		/* Get vnode for root device*/
		if( bdevvp( rootdev, &rootvp))
			panic("ffs_mountroot: can't setup bdevvp for root");

		/*
		 * FS specific handling
		 */
		mp->mnt_flag |= MNT_RDONLY;	/* XXX globally applicable?*/

		/*
		 * Attempt mount
		 */
		if( ( err = ntfs_mountfs(rootvp, mp, &args, p)) != 0) {
			/* fs specific cleanup (if any)*/
			goto error_1;
		}

		goto dostatfs;		/* success*/

	}

	/*
	 ***
	 * Mounting non-root file system or updating a file system
	 ***
	 */

	/* copy in user arguments*/
	err = copyin(data, (caddr_t)&args, sizeof (struct ntfs_args));
	if (err)
		goto error_1;		/* can't get arguments*/

	/*
	 * If updating, check whether changing from read-only to
	 * read/write; if there is no device name, that's all we do.
	 */
	if (mp->mnt_flag & MNT_UPDATE) {
		printf("ntfs_mount(): MNT_UPDATE not supported\n");
		err = EINVAL;
		goto error_1;

#if 0
		ump = VFSTOUFS(mp);
		fs = ump->um_fs;
		err = 0;
		if (fs->fs_ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) {
			flags = WRITECLOSE;
			if (mp->mnt_flag & MNT_FORCE)
				flags |= FORCECLOSE;
			if (vfs_busy(mp)) {
				err = EBUSY;
				goto error_1;
			}
			err = ffs_flushfiles(mp, flags, p);
			vfs_unbusy(mp);
		}
		if (!err && (mp->mnt_flag & MNT_RELOAD))
			err = ffs_reload(mp, ndp->ni_cnd.cn_cred, p);
		if (err) {
			goto error_1;
		}
		if (fs->fs_ronly && (mp->mnt_flag & MNT_WANTRDWR)) {
			if (!fs->fs_clean) {
				if (mp->mnt_flag & MNT_FORCE) {
					printf("WARNING: %s was not properly dismounted.\n",fs->fs_fsmnt);
				} else {
					printf("WARNING: R/W mount of %s denied. Filesystem is not clean - run fsck.\n",
					    fs->fs_fsmnt);
					err = EPERM;
					goto error_1;
				}
			}
			fs->fs_ronly = 0;
		}
		if (fs->fs_ronly == 0) {
			fs->fs_clean = 0;
			ffs_sbupdate(ump, MNT_WAIT);
		}
		/* if not updating name...*/
		if (args.fspec == 0) {
			/*
			 * Process export requests.  Jumping to "success"
			 * will return the vfs_export() error code.
			 */
			err = vfs_export(mp, &ump->um_export, &args.export);
			goto success;
		}
コード例 #23
0
ファイル: tmpfs_vnops.c プロジェクト: hmatyschok/MeshBSD
static int
tmpfs_rename(struct vop_rename_args *v)
{
	struct vnode *fdvp = v->a_fdvp;
	struct vnode *fvp = v->a_fvp;
	struct componentname *fcnp = v->a_fcnp;
	struct vnode *tdvp = v->a_tdvp;
	struct vnode *tvp = v->a_tvp;
	struct componentname *tcnp = v->a_tcnp;
	struct mount *mp = NULL;

	char *newname;
	int error;
	struct tmpfs_dirent *de;
	struct tmpfs_mount *tmp;
	struct tmpfs_node *fdnode;
	struct tmpfs_node *fnode;
	struct tmpfs_node *tnode;
	struct tmpfs_node *tdnode;

	MPASS(VOP_ISLOCKED(tdvp));
	MPASS(IMPLIES(tvp != NULL, VOP_ISLOCKED(tvp)));
	MPASS(fcnp->cn_flags & HASBUF);
	MPASS(tcnp->cn_flags & HASBUF);

	/* Disallow cross-device renames.
	 * XXX Why isn't this done by the caller? */
	if (fvp->v_mount != tdvp->v_mount ||
	    (tvp != NULL && fvp->v_mount != tvp->v_mount)) {
		error = EXDEV;
		goto out;
	}

	/* If source and target are the same file, there is nothing to do. */
	if (fvp == tvp) {
		error = 0;
		goto out;
	}

	/* If we need to move the directory between entries, lock the
	 * source so that we can safely operate on it. */
	if (fdvp != tdvp && fdvp != tvp) {
		if (vn_lock(fdvp, LK_EXCLUSIVE | LK_NOWAIT) != 0) {
			mp = tdvp->v_mount;
			error = vfs_busy(mp, 0);
			if (error != 0) {
				mp = NULL;
				goto out;
			}
			error = tmpfs_rename_relock(fdvp, &fvp, tdvp, &tvp,
			    fcnp, tcnp);
			if (error != 0) {
				vfs_unbusy(mp);
				return (error);
			}
			ASSERT_VOP_ELOCKED(fdvp,
			    "tmpfs_rename: fdvp not locked");
			ASSERT_VOP_ELOCKED(tdvp,
			    "tmpfs_rename: tdvp not locked");
			if (tvp != NULL)
				ASSERT_VOP_ELOCKED(tvp,
				    "tmpfs_rename: tvp not locked");
			if (fvp == tvp) {
				error = 0;
				goto out_locked;
			}
		}
	}

	tmp = VFS_TO_TMPFS(tdvp->v_mount);
	tdnode = VP_TO_TMPFS_DIR(tdvp);
	tnode = (tvp == NULL) ? NULL : VP_TO_TMPFS_NODE(tvp);
	fdnode = VP_TO_TMPFS_DIR(fdvp);
	fnode = VP_TO_TMPFS_NODE(fvp);
	de = tmpfs_dir_lookup(fdnode, fnode, fcnp);

	/* Entry can disappear before we lock fdvp,
	 * also avoid manipulating '.' and '..' entries. */
	if (de == NULL) {
		if ((fcnp->cn_flags & ISDOTDOT) != 0 ||
		    (fcnp->cn_namelen == 1 && fcnp->cn_nameptr[0] == '.'))
			error = EINVAL;
		else
			error = ENOENT;
		goto out_locked;
	}
	MPASS(de->td_node == fnode);

	/* If re-naming a directory to another preexisting directory
	 * ensure that the target directory is empty so that its
	 * removal causes no side effects.
	 * Kern_rename guarantees the destination to be a directory
	 * if the source is one. */
	if (tvp != NULL) {
		MPASS(tnode != NULL);

		if ((tnode->tn_flags & (NOUNLINK | IMMUTABLE | APPEND)) ||
		    (tdnode->tn_flags & (APPEND | IMMUTABLE))) {
			error = EPERM;
			goto out_locked;
		}

		if (fnode->tn_type == VDIR && tnode->tn_type == VDIR) {
			if (tnode->tn_size > 0) {
				error = ENOTEMPTY;
				goto out_locked;
			}
		} else if (fnode->tn_type == VDIR && tnode->tn_type != VDIR) {
			error = ENOTDIR;
			goto out_locked;
		} else if (fnode->tn_type != VDIR && tnode->tn_type == VDIR) {
			error = EISDIR;
			goto out_locked;
		} else {
			MPASS(fnode->tn_type != VDIR &&
				tnode->tn_type != VDIR);
		}
	}

	if ((fnode->tn_flags & (NOUNLINK | IMMUTABLE | APPEND))
	    || (fdnode->tn_flags & (APPEND | IMMUTABLE))) {
		error = EPERM;
		goto out_locked;
	}

	/* Ensure that we have enough memory to hold the new name, if it
	 * has to be changed. */
	if (fcnp->cn_namelen != tcnp->cn_namelen ||
	    bcmp(fcnp->cn_nameptr, tcnp->cn_nameptr, fcnp->cn_namelen) != 0) {
		newname = malloc(tcnp->cn_namelen, M_TMPFSNAME, M_WAITOK);
	} else
		newname = NULL;

	/* If the node is being moved to another directory, we have to do
	 * the move. */
	if (fdnode != tdnode) {
		/* In case we are moving a directory, we have to adjust its
		 * parent to point to the new parent. */
		if (de->td_node->tn_type == VDIR) {
			struct tmpfs_node *n;

			/* Ensure the target directory is not a child of the
			 * directory being moved.  Otherwise, we'd end up
			 * with stale nodes. */
			n = tdnode;
			/* TMPFS_LOCK garanties that no nodes are freed while
			 * traversing the list. Nodes can only be marked as
			 * removed: tn_parent == NULL. */
			TMPFS_LOCK(tmp);
			TMPFS_NODE_LOCK(n);
			while (n != n->tn_dir.tn_parent) {
				struct tmpfs_node *parent;

				if (n == fnode) {
					TMPFS_NODE_UNLOCK(n);
					TMPFS_UNLOCK(tmp);
					error = EINVAL;
					if (newname != NULL)
						    free(newname, M_TMPFSNAME);
					goto out_locked;
				}
				parent = n->tn_dir.tn_parent;
				TMPFS_NODE_UNLOCK(n);
				if (parent == NULL) {
					n = NULL;
					break;
				}
				TMPFS_NODE_LOCK(parent);
				if (parent->tn_dir.tn_parent == NULL) {
					TMPFS_NODE_UNLOCK(parent);
					n = NULL;
					break;
				}
				n = parent;
			}
			TMPFS_UNLOCK(tmp);
			if (n == NULL) {
				error = EINVAL;
				if (newname != NULL)
					    free(newname, M_TMPFSNAME);
				goto out_locked;
			}
			TMPFS_NODE_UNLOCK(n);

			/* Adjust the parent pointer. */
			TMPFS_VALIDATE_DIR(fnode);
			TMPFS_NODE_LOCK(de->td_node);
			de->td_node->tn_dir.tn_parent = tdnode;
			TMPFS_NODE_UNLOCK(de->td_node);

			/* As a result of changing the target of the '..'
			 * entry, the link count of the source and target
			 * directories has to be adjusted. */
			TMPFS_NODE_LOCK(tdnode);
			TMPFS_ASSERT_LOCKED(tdnode);
			tdnode->tn_links++;
			TMPFS_NODE_UNLOCK(tdnode);

			TMPFS_NODE_LOCK(fdnode);
			TMPFS_ASSERT_LOCKED(fdnode);
			fdnode->tn_links--;
			TMPFS_NODE_UNLOCK(fdnode);
		}
	}

	/* Do the move: just remove the entry from the source directory
	 * and insert it into the target one. */
	tmpfs_dir_detach(fdvp, de);

	if (fcnp->cn_flags & DOWHITEOUT)
		tmpfs_dir_whiteout_add(fdvp, fcnp);
	if (tcnp->cn_flags & ISWHITEOUT)
		tmpfs_dir_whiteout_remove(tdvp, tcnp);

	/* If the name has changed, we need to make it effective by changing
	 * it in the directory entry. */
	if (newname != NULL) {
		MPASS(tcnp->cn_namelen <= MAXNAMLEN);

		free(de->ud.td_name, M_TMPFSNAME);
		de->ud.td_name = newname;
		tmpfs_dirent_init(de, tcnp->cn_nameptr, tcnp->cn_namelen);

		fnode->tn_status |= TMPFS_NODE_CHANGED;
		tdnode->tn_status |= TMPFS_NODE_MODIFIED;
	}

	/* If we are overwriting an entry, we have to remove the old one
	 * from the target directory. */
	if (tvp != NULL) {
		struct tmpfs_dirent *tde;

		/* Remove the old entry from the target directory. */
		tde = tmpfs_dir_lookup(tdnode, tnode, tcnp);
		tmpfs_dir_detach(tdvp, tde);

		/* Free the directory entry we just deleted.  Note that the
		 * node referred by it will not be removed until the vnode is
		 * really reclaimed. */
		tmpfs_free_dirent(VFS_TO_TMPFS(tvp->v_mount), tde);
	}

	tmpfs_dir_attach(tdvp, de);

	cache_purge(fvp);
	if (tvp != NULL)
		cache_purge(tvp);
	cache_purge_negative(tdvp);

	error = 0;

out_locked:
	if (fdvp != tdvp && fdvp != tvp)
		VOP_UNLOCK(fdvp, 0);

out:
	/* Release target nodes. */
	/* XXX: I don't understand when tdvp can be the same as tvp, but
	 * other code takes care of this... */
	if (tdvp == tvp)
		vrele(tdvp);
	else
		vput(tdvp);
	if (tvp != NULL)
		vput(tvp);

	/* Release source nodes. */
	vrele(fdvp);
	vrele(fvp);

	if (mp != NULL)
		vfs_unbusy(mp);

	return error;
}
コード例 #24
0
ファイル: ext2_vfsops.c プロジェクト: Gwenio/DragonFlyBSD
/*
 * VFS Operations.
 *
 * mount system call
 *
 * Parameters:
 *	data:	this is actually a (struct ext2_args *)
 */
static int
ext2_mount(struct mount *mp, char *path, caddr_t data, struct ucred *cred)
{
	struct vnode *devvp;
	struct ext2_args args;
	struct ext2mount *ump = NULL;
	struct ext2_sb_info *fs;
	size_t size;
	int error, flags;
	mode_t accessmode;
	struct nlookupdata nd;

	if ((error = copyin(data, (caddr_t)&args, sizeof (struct ext2_args))) != 0)
		return (error);

	/*
	 * If updating, check whether changing from read-only to
	 * read/write; if there is no device name, that's all we do.
	 */
	if (mp->mnt_flag & MNT_UPDATE) {
		ump = VFSTOEXT2(mp);
		fs = ump->um_e2fs;
		devvp = ump->um_devvp;
		error = 0;
		if (fs->s_rd_only == 0 && (mp->mnt_flag & MNT_RDONLY)) {
			flags = WRITECLOSE;
			if (mp->mnt_flag & MNT_FORCE)
				flags |= FORCECLOSE;
			if (vfs_busy(mp, LK_NOWAIT))
				return (EBUSY);
			error = ext2_flushfiles(mp, flags);
			vfs_unbusy(mp);
			if (!error && fs->s_wasvalid) {
				fs->s_es->s_state |= EXT2_VALID_FS;
				ext2_sbupdate(ump, MNT_WAIT);
			}
			fs->s_rd_only = 1;
			vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
			VOP_OPEN(devvp, FREAD, FSCRED, NULL);
			VOP_CLOSE(devvp, FREAD|FWRITE);
			vn_unlock(devvp);
		}
		if (!error && (mp->mnt_flag & MNT_RELOAD))
			error = ext2_reload(mp, cred);
		if (error)
			return (error);
		if (ext2_check_sb_compat(fs->s_es, devvp->v_rdev,
		    (mp->mnt_kern_flag & MNTK_WANTRDWR) == 0) != 0)
			return (EPERM);
		if (fs->s_rd_only && (mp->mnt_kern_flag & MNTK_WANTRDWR)) {
			/*
			 * If upgrade to read-write by non-root, then verify
			 * that user has necessary permissions on the device.
			 */
			if (cred->cr_uid != 0) {
				vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
				error = VOP_EACCESS(devvp, VREAD | VWRITE, cred);
				if (error) {
					vn_unlock(devvp);
					return (error);
				}
				vn_unlock(devvp);
			}

			if ((fs->s_es->s_state & EXT2_VALID_FS) == 0 ||
			    (fs->s_es->s_state & EXT2_ERROR_FS)) {
				if (mp->mnt_flag & MNT_FORCE) {
					kprintf(
"WARNING: %s was not properly dismounted\n",
					    fs->fs_fsmnt);
				} else {
					kprintf(
"WARNING: R/W mount of %s denied.  Filesystem is not clean - run fsck\n",
					    fs->fs_fsmnt);
					return (EPERM);
				}
			}
			fs->s_es->s_state &= ~EXT2_VALID_FS;
			ext2_sbupdate(ump, MNT_WAIT);
			fs->s_rd_only = 0;
			vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
			VOP_OPEN(devvp, FREAD|FWRITE, FSCRED, NULL);
			VOP_CLOSE(devvp, FREAD);
			vn_unlock(devvp);
		}
		if (args.fspec == NULL) {
			/*
			 * Process export requests.
			 */
			return (vfs_export(mp, &ump->um_export, &args.export));
		}
	}
コード例 #25
0
ファイル: ext2_vfsops.c プロジェクト: Gwenio/DragonFlyBSD
/*
 * Do operations associated with quotas
 */
int
ext2_quotactl(struct mount *mp, int cmds, uid_t uid, caddr_t arg,
	     struct ucred *cred)
{
#ifndef QUOTA
	return (EOPNOTSUPP);
#else
	int cmd, type, error;

	type = cmds & SUBCMDMASK;
	cmd = cmds >> SUBCMDSHIFT;

	if (uid == -1) {
		switch(type) {
			case USRQUOTA:
				uid = cred->cr_ruid;
				break;
			case GRPQUOTA:
				uid = cred->cr_rgid;
				break;
			default:
				return (EINVAL);
		}
	}

	/*
	 * Check permissions.
	 */
	switch (cmd) {

	case Q_QUOTAON:
		error = priv_check_cred(cred, PRIV_UFS_QUOTAON, 0);
		break;

	case Q_QUOTAOFF:
		error = priv_check_cred(cred, PRIV_UFS_QUOTAOFF, 0);
		break;

	case Q_SETQUOTA:
		error = priv_check_cred(cred, PRIV_VFS_SETQUOTA, 0);
		break;

	case Q_SETUSE:
		error = priv_check_cred(cred, PRIV_UFS_SETUSE, 0);
		break;

	case Q_GETQUOTA:
		if (uid == cred->cr_ruid)
			error = 0;
		else
			error = priv_check_cred(cred, PRIV_VFS_GETQUOTA, 0);
		break;

	case Q_SYNC:
		error = 0;
		break;

	default:
		error = EINVAL;
		break;
	}

	if (error)
		return (error);


	if ((uint)type >= MAXQUOTAS)
		return (EINVAL);
	if (vfs_busy(mp, LK_NOWAIT))
		return (0);

	switch (cmd) {

	case Q_QUOTAON:
		error = ext2_quotaon(cred, mp, type, arg);
		break;

	case Q_QUOTAOFF:
		error = ext2_quotaoff(mp, type);
		break;

	case Q_SETQUOTA:
		error = ext2_setquota(mp, uid, type, arg);
		break;

	case Q_SETUSE:
		error = ext2_setuse(mp, uid, type, arg);
		break;

	case Q_GETQUOTA:
		error = ext2_getquota(mp, uid, type, arg);
		break;

	case Q_SYNC:
		error = ext2_qsync(mp);
		break;

	default:
		error = EINVAL;
		break;
	}
	vfs_unbusy(mp);
	return (error);
#endif
}
コード例 #26
0
ファイル: kern_shutdown.c プロジェクト: bhimanshu1997/freebsd
/*
 * The system call that results in changing the rootfs.
 */
static int
kern_reroot(void)
{
	struct vnode *oldrootvnode, *vp;
	struct mount *mp, *devmp;
	int error;

	if (curproc != initproc)
		return (EPERM);

	/*
	 * Mark the filesystem containing currently-running executable
	 * (the temporary copy of init(8)) busy.
	 */
	vp = curproc->p_textvp;
	error = vn_lock(vp, LK_SHARED);
	if (error != 0)
		return (error);
	mp = vp->v_mount;
	error = vfs_busy(mp, MBF_NOWAIT);
	if (error != 0) {
		vfs_ref(mp);
		VOP_UNLOCK(vp, 0);
		error = vfs_busy(mp, 0);
		vn_lock(vp, LK_SHARED | LK_RETRY);
		vfs_rel(mp);
		if (error != 0) {
			VOP_UNLOCK(vp, 0);
			return (ENOENT);
		}
		if (vp->v_iflag & VI_DOOMED) {
			VOP_UNLOCK(vp, 0);
			vfs_unbusy(mp);
			return (ENOENT);
		}
	}
	VOP_UNLOCK(vp, 0);

	/*
	 * Remove the filesystem containing currently-running executable
	 * from the mount list, to prevent it from being unmounted
	 * by vfs_unmountall(), and to avoid confusing vfs_mountroot().
	 *
	 * Also preserve /dev - forcibly unmounting it could cause driver
	 * reinitialization.
	 */

	vfs_ref(rootdevmp);
	devmp = rootdevmp;
	rootdevmp = NULL;

	mtx_lock(&mountlist_mtx);
	TAILQ_REMOVE(&mountlist, mp, mnt_list);
	TAILQ_REMOVE(&mountlist, devmp, mnt_list);
	mtx_unlock(&mountlist_mtx);

	oldrootvnode = rootvnode;

	/*
	 * Unmount everything except for the two filesystems preserved above.
	 */
	vfs_unmountall();

	/*
	 * Add /dev back; vfs_mountroot() will move it into its new place.
	 */
	mtx_lock(&mountlist_mtx);
	TAILQ_INSERT_HEAD(&mountlist, devmp, mnt_list);
	mtx_unlock(&mountlist_mtx);
	rootdevmp = devmp;
	vfs_rel(rootdevmp);

	/*
	 * Mount the new rootfs.
	 */
	vfs_mountroot();

	/*
	 * Update all references to the old rootvnode.
	 */
	mountcheckdirs(oldrootvnode, rootvnode);

	/*
	 * Add the temporary filesystem back and unbusy it.
	 */
	mtx_lock(&mountlist_mtx);
	TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list);
	mtx_unlock(&mountlist_mtx);
	vfs_unbusy(mp);

	return (0);
}
コード例 #27
0
ファイル: vfs_lookup.c プロジェクト: Algozjb/xnu
/*
 * Search a pathname.
 * This is a very central and rather complicated routine.
 *
 * The pathname is pointed to by ni_ptr and is of length ni_pathlen.
 * The starting directory is taken from ni_startdir. The pathname is
 * descended until done, or a symbolic link is encountered. The variable
 * ni_more is clear if the path is completed; it is set to one if a
 * symbolic link needing interpretation is encountered.
 *
 * The flag argument is LOOKUP, CREATE, RENAME, or DELETE depending on
 * whether the name is to be looked up, created, renamed, or deleted.
 * When CREATE, RENAME, or DELETE is specified, information usable in
 * creating, renaming, or deleting a directory entry may be calculated.
 * If flag has LOCKPARENT or'ed into it, the parent directory is returned
 * locked. If flag has WANTPARENT or'ed into it, the parent directory is
 * returned unlocked. Otherwise the parent directory is not returned. If
 * the target of the pathname exists and LOCKLEAF is or'ed into the flag
 * the target is returned locked, otherwise it is returned unlocked.
 * When creating or renaming and LOCKPARENT is specified, the target may not
 * be ".".  When deleting and LOCKPARENT is specified, the target may be ".".
 * 
 * Overall outline of lookup:
 *
 * dirloop:
 *	identify next component of name at ndp->ni_ptr
 *	handle degenerate case where name is null string
 *	if .. and crossing mount points and on mounted filesys, find parent
 *	call VNOP_LOOKUP routine for next component name
 *	    directory vnode returned in ni_dvp, unlocked unless LOCKPARENT set
 *	    component vnode returned in ni_vp (if it exists), locked.
 *	if result vnode is mounted on and crossing mount points,
 *	    find mounted on vnode
 *	if more components of name, do next level at dirloop
 *	return the answer in ni_vp, locked if LOCKLEAF set
 *	    if LOCKPARENT set, return locked parent in ni_dvp
 *	    if WANTPARENT set, return unlocked parent in ni_dvp
 *
 * Returns:	0			Success
 *		ENOENT			No such file or directory
 *		EBADF			Bad file descriptor
 *		ENOTDIR			Not a directory
 *		EROFS			Read-only file system [CREATE]
 *		EISDIR			Is a directory [CREATE]
 *		cache_lookup_path:ERECYCLE  (vnode was recycled from underneath us, redrive lookup again)
 *		vnode_authorize:EROFS
 *		vnode_authorize:EACCES
 *		vnode_authorize:EPERM
 *		vnode_authorize:???
 *		VNOP_LOOKUP:ENOENT	No such file or directory
 *		VNOP_LOOKUP:EJUSTRETURN	Restart system call (INTERNAL)
 *		VNOP_LOOKUP:???
 *		VFS_ROOT:ENOTSUP
 *		VFS_ROOT:ENOENT
 *		VFS_ROOT:???
 */
int
lookup(struct nameidata *ndp)
{
	char	*cp;		/* pointer into pathname argument */
	vnode_t		tdp;		/* saved dp */
	vnode_t		dp;		/* the directory we are searching */
	int docache = 1;		/* == 0 do not cache last component */
	int wantparent;			/* 1 => wantparent or lockparent flag */
	int rdonly;			/* lookup read-only flag bit */
	int dp_authorized = 0;
	int error = 0;
	struct componentname *cnp = &ndp->ni_cnd;
	vfs_context_t ctx = cnp->cn_context;
	int vbusyflags = 0;
	int nc_generation = 0;
	vnode_t last_dp = NULLVP;
	int keep_going;
	int atroot;

	/*
	 * Setup: break out flag bits into variables.
	 */
	if (cnp->cn_flags & (NOCACHE | DOWHITEOUT)) {
	        if ((cnp->cn_flags & NOCACHE) || (cnp->cn_nameiop == DELETE))
		        docache = 0;
	}
	wantparent = cnp->cn_flags & (LOCKPARENT | WANTPARENT);
	rdonly = cnp->cn_flags & RDONLY;
	cnp->cn_flags &= ~ISSYMLINK;
	cnp->cn_consume = 0;

	dp = ndp->ni_startdir;
	ndp->ni_startdir = NULLVP;

	if ((cnp->cn_flags & CN_NBMOUNTLOOK) != 0)
			vbusyflags = LK_NOWAIT;
	cp = cnp->cn_nameptr;

	if (*cp == '\0') {
	        if ( (vnode_getwithref(dp)) ) {
			dp = NULLVP;
		        error = ENOENT;
			goto bad;
		}
		ndp->ni_vp = dp;
		error = lookup_handle_emptyname(ndp, cnp, wantparent);
		if (error) {
			goto bad;
		}

		return 0;
	}
dirloop: 
	atroot = 0;
	ndp->ni_vp = NULLVP;

	if ( (error = cache_lookup_path(ndp, cnp, dp, ctx, &dp_authorized, last_dp)) ) {
		dp = NULLVP;
		goto bad;
	}
	if ((cnp->cn_flags & ISLASTCN)) {
	        if (docache)
		        cnp->cn_flags |= MAKEENTRY;
	} else
	        cnp->cn_flags |= MAKEENTRY;

	dp = ndp->ni_dvp;

	if (ndp->ni_vp != NULLVP) {
	        /*
		 * cache_lookup_path returned a non-NULL ni_vp then,
		 * we're guaranteed that the dp is a VDIR, it's 
		 * been authorized, and vp is not ".."
		 *
		 * make sure we don't try to enter the name back into
		 * the cache if this vp is purged before we get to that
		 * check since we won't have serialized behind whatever
		 * activity is occurring in the FS that caused the purge
		 */
	        if (dp != NULLVP)
		        nc_generation = dp->v_nc_generation - 1;

	        goto returned_from_lookup_path;
	}

	/*
	 * Handle "..": two special cases.
	 * 1. If at root directory (e.g. after chroot)
	 *    or at absolute root directory
	 *    then ignore it so can't get out.
	 * 2. If this vnode is the root of a mounted
	 *    filesystem, then replace it with the
	 *    vnode which was mounted on so we take the
	 *    .. in the other file system.
	 */
	if ( (cnp->cn_flags & ISDOTDOT) ) {
		for (;;) {
		        if (dp == ndp->ni_rootdir || dp == rootvnode) {
			        ndp->ni_dvp = dp;
				ndp->ni_vp = dp;
				/*
				 * we're pinned at the root
				 * we've already got one reference on 'dp'
				 * courtesy of cache_lookup_path... take
				 * another one for the ".."
				 * if we fail to get the new reference, we'll
				 * drop our original down in 'bad'
				 */
				if ( (vnode_get(dp)) ) {
					error = ENOENT;
					goto bad;
				}
				atroot = 1;
				goto returned_from_lookup_path;
			}
			if ((dp->v_flag & VROOT) == 0 ||
			    (cnp->cn_flags & NOCROSSMOUNT))
			        break;
			if (dp->v_mount == NULL) {	/* forced umount */
			        error = EBADF;
				goto bad;
			}
			tdp = dp;
			dp = tdp->v_mount->mnt_vnodecovered;

			vnode_put(tdp);

			if ( (vnode_getwithref(dp)) ) {
			        dp = NULLVP;
				error = ENOENT;
				goto bad;
			}
			ndp->ni_dvp = dp;
			dp_authorized = 0;
		}
	}

	/*
	 * We now have a segment name to search for, and a directory to search.
	 */
unionlookup:
	ndp->ni_vp = NULLVP;

	if (dp->v_type != VDIR) {
	        error = ENOTDIR;
	        goto lookup_error;
	}
	if ( (cnp->cn_flags & DONOTAUTH) != DONOTAUTH ) {
		error = lookup_authorize_search(dp, cnp, dp_authorized, ctx);
		if (error) {
			goto lookup_error;
		}
	}

	/*
	 * Now that we've authorized a lookup, can bail out if the filesystem
	 * will be doing a batched operation.  Return an iocount on dvp.
	 */
#if NAMEDRSRCFORK
	if ((cnp->cn_flags & ISLASTCN) && namei_compound_available(dp, ndp) && !(cnp->cn_flags & CN_WANTSRSRCFORK)) {
#else 
	if ((cnp->cn_flags & ISLASTCN) && namei_compound_available(dp, ndp)) {
#endif /* NAMEDRSRCFORK */
		ndp->ni_flag |= NAMEI_UNFINISHED;
		ndp->ni_ncgeneration = dp->v_nc_generation;
		return 0;
	}

        nc_generation = dp->v_nc_generation;

	error = VNOP_LOOKUP(dp, &ndp->ni_vp, cnp, ctx);


	if ( error ) {
lookup_error:
		if ((error == ENOENT) &&
		    (dp->v_flag & VROOT) && (dp->v_mount != NULL) &&
		    (dp->v_mount->mnt_flag & MNT_UNION)) {
#if CONFIG_VFS_FUNNEL
		        if ((cnp->cn_flags & FSNODELOCKHELD)) {
			        cnp->cn_flags &= ~FSNODELOCKHELD;
				unlock_fsnode(dp, NULL);
			}	
#endif /* CONFIG_VFS_FUNNEL */
			tdp = dp;
			dp = tdp->v_mount->mnt_vnodecovered;

			vnode_put(tdp);

			if ( (vnode_getwithref(dp)) ) {
			        dp = NULLVP;
				error = ENOENT;
				goto bad;
			}
			ndp->ni_dvp = dp;
			dp_authorized = 0;
			goto unionlookup;
		}

		if (error != EJUSTRETURN)
			goto bad;

		if (ndp->ni_vp != NULLVP)
			panic("leaf should be empty");

		error = lookup_validate_creation_path(ndp);
		if (error)
			goto bad;
		/*
		 * We return with ni_vp NULL to indicate that the entry
		 * doesn't currently exist, leaving a pointer to the
		 * referenced directory vnode in ndp->ni_dvp.
		 */
		if (cnp->cn_flags & SAVESTART) {
			if ( (vnode_get(ndp->ni_dvp)) ) {
				error = ENOENT;
				goto bad;
			}
			ndp->ni_startdir = ndp->ni_dvp;
		}
		if (!wantparent)
		        vnode_put(ndp->ni_dvp);

		if (kdebug_enable)
		        kdebug_lookup(ndp->ni_dvp, cnp);
		return (0);
	}
returned_from_lookup_path:
	/* We'll always have an iocount on ni_vp when this finishes. */
	error = lookup_handle_found_vnode(ndp, cnp, rdonly, vbusyflags, &keep_going, nc_generation, wantparent, atroot, ctx);
	if (error != 0) {
		goto bad2; 
	}

	if (keep_going) {
		dp = ndp->ni_vp;

		/* namei() will handle symlinks */
		if ((dp->v_type == VLNK) &&
				((cnp->cn_flags & FOLLOW) || (ndp->ni_flag & NAMEI_TRAILINGSLASH) || *ndp->ni_next == '/')) {
			return 0; 
		}

		/*
		 * Otherwise, there's more path to process.  
		 * cache_lookup_path is now responsible for dropping io ref on dp
		 * when it is called again in the dirloop.  This ensures we hold
		 * a ref on dp until we complete the next round of lookup.
		 */
		last_dp = dp;

		goto dirloop;
	}

	return (0);
bad2:
#if CONFIG_VFS_FUNNEL
	if ((cnp->cn_flags & FSNODELOCKHELD)) {
	        cnp->cn_flags &= ~FSNODELOCKHELD;
		unlock_fsnode(ndp->ni_dvp, NULL);
	}
#endif /* CONFIG_VFS_FUNNEL */
	if (ndp->ni_dvp)
		vnode_put(ndp->ni_dvp);

	vnode_put(ndp->ni_vp);
	ndp->ni_vp = NULLVP;

	if (kdebug_enable)
	        kdebug_lookup(dp, cnp);
	return (error);

bad:
#if CONFIG_VFS_FUNNEL
	if ((cnp->cn_flags & FSNODELOCKHELD)) {
	        cnp->cn_flags &= ~FSNODELOCKHELD;
		unlock_fsnode(ndp->ni_dvp, NULL);
	}	
#endif /* CONFIG_VFS_FUNNEL */
	if (dp)
	        vnode_put(dp);
	ndp->ni_vp = NULLVP;

	if (kdebug_enable)
	        kdebug_lookup(dp, cnp);
	return (error);
}

int 
lookup_validate_creation_path(struct nameidata *ndp)
{
	struct componentname *cnp = &ndp->ni_cnd;

	/*
	 * If creating and at end of pathname, then can consider
	 * allowing file to be created.
	 */
	if (cnp->cn_flags & RDONLY) {
		return EROFS;
	}
	if ((cnp->cn_flags & ISLASTCN) && (ndp->ni_flag & NAMEI_TRAILINGSLASH) && !(cnp->cn_flags & WILLBEDIR)) {
		return ENOENT;
	}
	
	return 0;
}

/*
 * Modifies only ni_vp.  Always returns with ni_vp still valid (iocount held).
 */
int
lookup_traverse_mountpoints(struct nameidata *ndp, struct componentname *cnp, vnode_t dp, 
		int vbusyflags, vfs_context_t ctx)
{
	mount_t mp;
	vnode_t tdp;
	int error = 0;
	uthread_t uth;
	uint32_t depth = 0;
	int dont_cache_mp = 0;
	vnode_t	mounted_on_dp;
	int current_mount_generation = 0;
	
	mounted_on_dp = dp;
	current_mount_generation = mount_generation;

	while ((dp->v_type == VDIR) && dp->v_mountedhere &&
			((cnp->cn_flags & NOCROSSMOUNT) == 0)) {
#if CONFIG_TRIGGERS
		/*
		 * For a trigger vnode, call its resolver when crossing its mount (if requested)
		 */
		if (dp->v_resolve) {
			(void) vnode_trigger_resolve(dp, ndp, ctx);
		}
#endif
		vnode_lock(dp);

		if ((dp->v_type == VDIR) && (mp = dp->v_mountedhere)) {

			mp->mnt_crossref++;
			vnode_unlock(dp);


			if (vfs_busy(mp, vbusyflags)) {
				mount_dropcrossref(mp, dp, 0);
				if (vbusyflags == LK_NOWAIT) {
					error = ENOENT;
					goto out;
				}

				continue;
			}


			/*
			 * XXX - if this is the last component of the
			 * pathname, and it's either not a lookup operation
			 * or the NOTRIGGER flag is set for the operation,
			 * set a uthread flag to let VFS_ROOT() for autofs
			 * know it shouldn't trigger a mount.
			 */
			uth = (struct uthread *)get_bsdthread_info(current_thread());
			if ((cnp->cn_flags & ISLASTCN) &&
					(cnp->cn_nameiop != LOOKUP ||
					 (cnp->cn_flags & NOTRIGGER))) {
				uth->uu_notrigger = 1;
				dont_cache_mp = 1;
			}

			error = VFS_ROOT(mp, &tdp, ctx);
			/* XXX - clear the uthread flag */
			uth->uu_notrigger = 0;

			mount_dropcrossref(mp, dp, 0);
			vfs_unbusy(mp);

			if (error) {
				goto out;
			}

			vnode_put(dp);
			ndp->ni_vp = dp = tdp;
			depth++;

#if CONFIG_TRIGGERS
			/*
			 * Check if root dir is a trigger vnode
			 */
			if (dp->v_resolve) {
				error = vnode_trigger_resolve(dp, ndp, ctx);
				if (error) {
					goto out;
				}
			}
#endif			

		} else { 
			vnode_unlock(dp);
			break;
		}
	}

	if (depth && !dont_cache_mp) {
	        mp = mounted_on_dp->v_mountedhere;

		if (mp) {
		        mount_lock_spin(mp);
			mp->mnt_realrootvp_vid = dp->v_id;
			mp->mnt_realrootvp = dp;
			mp->mnt_generation = current_mount_generation;
			mount_unlock(mp);
		}
	}

	return 0;

out:
	return error;
}
コード例 #28
0
ファイル: fuse_vnops.c プロジェクト: ele7enxxh/dtrace-pf
/*
    struct vnop_lookup_args {
	struct vnodeop_desc *a_desc;
	struct vnode *a_dvp;
	struct vnode **a_vpp;
	struct componentname *a_cnp;
    };
*/
int
fuse_vnop_lookup(struct vop_lookup_args *ap)
{
	struct vnode *dvp = ap->a_dvp;
	struct vnode **vpp = ap->a_vpp;
	struct componentname *cnp = ap->a_cnp;
	struct thread *td = cnp->cn_thread;
	struct ucred *cred = cnp->cn_cred;

	int nameiop = cnp->cn_nameiop;
	int flags = cnp->cn_flags;
	int wantparent = flags & (LOCKPARENT | WANTPARENT);
	int islastcn = flags & ISLASTCN;
	struct mount *mp = vnode_mount(dvp);

	int err = 0;
	int lookup_err = 0;
	struct vnode *vp = NULL;

	struct fuse_dispatcher fdi;
	enum fuse_opcode op;

	uint64_t nid;
	struct fuse_access_param facp;

	FS_DEBUG2G("parent_inode=%ju - %*s\n",
	    (uintmax_t)VTOI(dvp), (int)cnp->cn_namelen, cnp->cn_nameptr);

	if (fuse_isdeadfs(dvp)) {
		*vpp = NULL;
		return ENXIO;
	}
	if (!vnode_isdir(dvp)) {
		return ENOTDIR;
	}
	if (islastcn && vfs_isrdonly(mp) && (nameiop != LOOKUP)) {
		return EROFS;
	}
	/*
         * We do access check prior to doing anything else only in the case
         * when we are at fs root (we'd like to say, "we are at the first
         * component", but that's not exactly the same... nevermind).
         * See further comments at further access checks.
         */

	bzero(&facp, sizeof(facp));
	if (vnode_isvroot(dvp)) {	/* early permission check hack */
		if ((err = fuse_internal_access(dvp, VEXEC, &facp, td, cred))) {
			return err;
		}
	}
	if (flags & ISDOTDOT) {
		nid = VTOFUD(dvp)->parent_nid;
		if (nid == 0) {
			return ENOENT;
		}
		fdisp_init(&fdi, 0);
		op = FUSE_GETATTR;
		goto calldaemon;
	} else if (cnp->cn_namelen == 1 && *(cnp->cn_nameptr) == '.') {
		nid = VTOI(dvp);
		fdisp_init(&fdi, 0);
		op = FUSE_GETATTR;
		goto calldaemon;
	} else if (fuse_lookup_cache_enable) {
		err = cache_lookup(dvp, vpp, cnp, NULL, NULL);
		switch (err) {

		case -1:		/* positive match */
			atomic_add_acq_long(&fuse_lookup_cache_hits, 1);
			return 0;

		case 0:		/* no match in cache */
			atomic_add_acq_long(&fuse_lookup_cache_misses, 1);
			break;

		case ENOENT:		/* negative match */
			/* fall through */
		default:
			return err;
		}
	}
	nid = VTOI(dvp);
	fdisp_init(&fdi, cnp->cn_namelen + 1);
	op = FUSE_LOOKUP;

calldaemon:
	fdisp_make(&fdi, op, mp, nid, td, cred);

	if (op == FUSE_LOOKUP) {
		memcpy(fdi.indata, cnp->cn_nameptr, cnp->cn_namelen);
		((char *)fdi.indata)[cnp->cn_namelen] = '\0';
	}
	lookup_err = fdisp_wait_answ(&fdi);

	if ((op == FUSE_LOOKUP) && !lookup_err) {	/* lookup call succeeded */
		nid = ((struct fuse_entry_out *)fdi.answ)->nodeid;
		if (!nid) {
			/*
	                 * zero nodeid is the same as "not found",
	                 * but it's also cacheable (which we keep
	                 * keep on doing not as of writing this)
	                 */
			lookup_err = ENOENT;
		} else if (nid == FUSE_ROOT_ID) {
			lookup_err = EINVAL;
		}
	}
	if (lookup_err &&
	    (!fdi.answ_stat || lookup_err != ENOENT || op != FUSE_LOOKUP)) {
		fdisp_destroy(&fdi);
		return lookup_err;
	}
	/* lookup_err, if non-zero, must be ENOENT at this point */

	if (lookup_err) {

		if ((nameiop == CREATE || nameiop == RENAME) && islastcn
		     /* && directory dvp has not been removed */ ) {

			if (vfs_isrdonly(mp)) {
				err = EROFS;
				goto out;
			}
#if 0 /* THINK_ABOUT_THIS */
			if ((err = fuse_internal_access(dvp, VWRITE, cred, td, &facp))) {
				goto out;
			}
#endif

			/*
	                 * Possibly record the position of a slot in the
	                 * directory large enough for the new component name.
	                 * This can be recorded in the vnode private data for
	                 * dvp. Set the SAVENAME flag to hold onto the
	                 * pathname for use later in VOP_CREATE or VOP_RENAME.
	                 */
			cnp->cn_flags |= SAVENAME;

			err = EJUSTRETURN;
			goto out;
		}
		/* Consider inserting name into cache. */

		/*
	         * No we can't use negative caching, as the fs
	         * changes are out of our control.
	         * False positives' falseness turns out just as things
	         * go by, but false negatives' falseness doesn't.
	         * (and aiding the caching mechanism with extra control
	         * mechanisms comes quite close to beating the whole purpose
	         * caching...)
	         */
#if 0
		if ((cnp->cn_flags & MAKEENTRY) && nameiop != CREATE) {
			FS_DEBUG("inserting NULL into cache\n");
			cache_enter(dvp, NULL, cnp);
		}
#endif
		err = ENOENT;
		goto out;

	} else {

		/* !lookup_err */

		struct fuse_entry_out *feo = NULL;
		struct fuse_attr *fattr = NULL;

		if (op == FUSE_GETATTR) {
			fattr = &((struct fuse_attr_out *)fdi.answ)->attr;
		} else {
			feo = (struct fuse_entry_out *)fdi.answ;
			fattr = &(feo->attr);
		}

		/*
	         * If deleting, and at end of pathname, return parameters
	         * which can be used to remove file.  If the wantparent flag
	         * isn't set, we return only the directory, otherwise we go on
	         * and lock the inode, being careful with ".".
	         */
		if (nameiop == DELETE && islastcn) {
			/*
	                 * Check for write access on directory.
	                 */
			facp.xuid = fattr->uid;
			facp.facc_flags |= FACCESS_STICKY;
			err = fuse_internal_access(dvp, VWRITE, &facp, td, cred);
			facp.facc_flags &= ~FACCESS_XQUERIES;

			if (err) {
				goto out;
			}
			if (nid == VTOI(dvp)) {
				vref(dvp);
				*vpp = dvp;
			} else {
				err = fuse_vnode_get(dvp->v_mount, nid, dvp,
				    &vp, cnp, IFTOVT(fattr->mode));
				if (err)
					goto out;
				*vpp = vp;
			}

			/*
			 * Save the name for use in VOP_RMDIR and VOP_REMOVE
			 * later.
			 */
			cnp->cn_flags |= SAVENAME;
			goto out;

		}
		/*
	         * If rewriting (RENAME), return the inode and the
	         * information required to rewrite the present directory
	         * Must get inode of directory entry to verify it's a
	         * regular file, or empty directory.
	         */
		if (nameiop == RENAME && wantparent && islastcn) {

#if 0 /* THINK_ABOUT_THIS */
			if ((err = fuse_internal_access(dvp, VWRITE, cred, td, &facp))) {
				goto out;
			}
#endif

			/*
	                 * Check for "."
	                 */
			if (nid == VTOI(dvp)) {
				err = EISDIR;
				goto out;
			}
			err = fuse_vnode_get(vnode_mount(dvp),
			    nid,
			    dvp,
			    &vp,
			    cnp,
			    IFTOVT(fattr->mode));
			if (err) {
				goto out;
			}
			*vpp = vp;
			/*
	                 * Save the name for use in VOP_RENAME later.
	                 */
			cnp->cn_flags |= SAVENAME;

			goto out;
		}
		if (flags & ISDOTDOT) {
			struct mount *mp;
			int ltype;

			/*
			 * Expanded copy of vn_vget_ino() so that
			 * fuse_vnode_get() can be used.
			 */
			mp = dvp->v_mount;
			ltype = VOP_ISLOCKED(dvp);
			err = vfs_busy(mp, MBF_NOWAIT);
			if (err != 0) {
				vfs_ref(mp);
				VOP_UNLOCK(dvp, 0);
				err = vfs_busy(mp, 0);
				vn_lock(dvp, ltype | LK_RETRY);
				vfs_rel(mp);
				if (err)
					goto out;
				if ((dvp->v_iflag & VI_DOOMED) != 0) {
					err = ENOENT;
					vfs_unbusy(mp);
					goto out;
				}
			}
			VOP_UNLOCK(dvp, 0);
			err = fuse_vnode_get(vnode_mount(dvp),
			    nid,
			    NULL,
			    &vp,
			    cnp,
			    IFTOVT(fattr->mode));
			vfs_unbusy(mp);
			vn_lock(dvp, ltype | LK_RETRY);
			if ((dvp->v_iflag & VI_DOOMED) != 0) {
				if (err == 0)
					vput(vp);
				err = ENOENT;
			}
			if (err)
				goto out;
			*vpp = vp;
		} else if (nid == VTOI(dvp)) {
			vref(dvp);
			*vpp = dvp;
		} else {
			err = fuse_vnode_get(vnode_mount(dvp),
			    nid,
			    dvp,
			    &vp,
			    cnp,
			    IFTOVT(fattr->mode));
			if (err) {
				goto out;
			}
			fuse_vnode_setparent(vp, dvp);
			*vpp = vp;
		}

		if (op == FUSE_GETATTR) {
			cache_attrs(*vpp, (struct fuse_attr_out *)fdi.answ);
		} else {
			cache_attrs(*vpp, (struct fuse_entry_out *)fdi.answ);
		}

		/* Insert name into cache if appropriate. */

		/*
	         * Nooo, caching is evil. With caching, we can't avoid stale
	         * information taking over the playground (cached info is not
	         * just positive/negative, it does have qualitative aspects,
	         * too). And a (VOP/FUSE)_GETATTR is always thrown anyway, when
	         * walking down along cached path components, and that's not
	         * any cheaper than FUSE_LOOKUP. This might change with
	         * implementing kernel side attr caching, but... In Linux,
	         * lookup results are not cached, and the daemon is bombarded
	         * with FUSE_LOOKUPS on and on. This shows that by design, the
	         * daemon is expected to handle frequent lookup queries
	         * efficiently, do its caching in userspace, and so on.
	         *
	         * So just leave the name cache alone.
	         */

		/*
	         * Well, now I know, Linux caches lookups, but with a
	         * timeout... So it's the same thing as attribute caching:
	         * we can deal with it when implement timeouts.
	         */
#if 0
		if (cnp->cn_flags & MAKEENTRY) {
			cache_enter(dvp, *vpp, cnp);
		}
#endif
	}
out:
	if (!lookup_err) {

		/* No lookup error; need to clean up. */

		if (err) {		/* Found inode; exit with no vnode. */
			if (op == FUSE_LOOKUP) {
				fuse_internal_forget_send(vnode_mount(dvp), td, cred,
				    nid, 1);
			}
			fdisp_destroy(&fdi);
			return err;
		} else {
#ifndef NO_EARLY_PERM_CHECK_HACK
			if (!islastcn) {
				/*
				 * We have the attributes of the next item
				 * *now*, and it's a fact, and we do not
				 * have to do extra work for it (ie, beg the
				 * daemon), and it neither depends on such
				 * accidental things like attr caching. So
				 * the big idea: check credentials *now*,
				 * not at the beginning of the next call to
				 * lookup.
				 * 
				 * The first item of the lookup chain (fs root)
				 * won't be checked then here, of course, as
				 * its never "the next". But go and see that
				 * the root is taken care about at the very
				 * beginning of this function.
				 * 
				 * Now, given we want to do the access check
				 * this way, one might ask: so then why not
				 * do the access check just after fetching
				 * the inode and its attributes from the
				 * daemon? Why bother with producing the
				 * corresponding vnode at all if something
				 * is not OK? We know what's the deal as
				 * soon as we get those attrs... There is
				 * one bit of info though not given us by
				 * the daemon: whether his response is
				 * authorative or not... His response should
				 * be ignored if something is mounted over
				 * the dir in question. But that can be
				 * known only by having the vnode...
				 */
				int tmpvtype = vnode_vtype(*vpp);

				bzero(&facp, sizeof(facp));
				/*the early perm check hack */
				    facp.facc_flags |= FACCESS_VA_VALID;

				if ((tmpvtype != VDIR) && (tmpvtype != VLNK)) {
					err = ENOTDIR;
				}
				if (!err && !vnode_mountedhere(*vpp)) {
					err = fuse_internal_access(*vpp, VEXEC, &facp, td, cred);
				}
				if (err) {
					if (tmpvtype == VLNK)
						FS_DEBUG("weird, permission error with a symlink?\n");
					vput(*vpp);
					*vpp = NULL;
				}
			}
#endif
		}
	}
	fdisp_destroy(&fdi);

	return err;
}
コード例 #29
0
ファイル: vfs_lookup.c プロジェクト: 7shi/openbsd-loongson-vc
/*
 * Search a pathname.
 * This is a very central and rather complicated routine.
 *
 * The pathname is pointed to by ni_cnd.cn_nameptr and is of length
 * ni_pathlen.  The starting directory is taken from ni_startdir. The
 * pathname is descended until done, or a symbolic link is encountered.
 * If the path is completed the flag ISLASTCN is set in ni_cnd.cn_flags.
 * If a symbolic link need interpretation is encountered, the flag ISSYMLINK
 * is set in ni_cnd.cn_flags.
 *
 * The flag argument is LOOKUP, CREATE, RENAME, or DELETE depending on
 * whether the name is to be looked up, created, renamed, or deleted.
 * When CREATE, RENAME, or DELETE is specified, information usable in
 * creating, renaming, or deleting a directory entry may be calculated.
 * If flag has LOCKPARENT or'ed into it, the parent directory is returned
 * locked. If flag has WANTPARENT or'ed into it, the parent directory is
 * returned unlocked. Otherwise the parent directory is not returned. If
 * the target of the pathname exists and LOCKLEAF is or'ed into the flag
 * the target is returned locked, otherwise it is returned unlocked.
 * When creating or renaming and LOCKPARENT is specified, the target may not
 * be ".".  When deleting and LOCKPARENT is specified, the target may be ".".
 *
 * Overall outline of lookup:
 *
 * dirloop:
 *	identify next component of name at ndp->ni_ptr
 *	handle degenerate case where name is null string
 *	if .. and crossing mount points and on mounted filesys, find parent
 *	call VOP_LOOKUP routine for next component name
 *	    directory vnode returned in ni_dvp, unlocked unless LOCKPARENT set
 *	    component vnode returned in ni_vp (if it exists), locked.
 *	if result vnode is mounted on and crossing mount points,
 *	    find mounted on vnode
 *	if more components of name, do next level at dirloop
 *	return the answer in ni_vp, locked if LOCKLEAF set
 *	    if LOCKPARENT set, return locked parent in ni_dvp
 *	    if WANTPARENT set, return unlocked parent in ni_dvp
 */
int
lookup(struct nameidata *ndp)
{
	char *cp;			/* pointer into pathname argument */
	struct vnode *dp = 0;		/* the directory we are searching */
	struct vnode *tdp;		/* saved dp */
	struct mount *mp;		/* mount table entry */
	int docache;			/* == 0 do not cache last component */
	int wantparent;			/* 1 => wantparent or lockparent flag */
	int rdonly;			/* lookup read-only flag bit */
	int error = 0;
	int dpunlocked = 0;		/* dp has already been unlocked */
	int slashes;
	struct componentname *cnp = &ndp->ni_cnd;
	struct proc *p = cnp->cn_proc;
	/*
	 * Setup: break out flag bits into variables.
	 */
	wantparent = cnp->cn_flags & (LOCKPARENT | WANTPARENT);
	docache = (cnp->cn_flags & NOCACHE) ^ NOCACHE;
	if (cnp->cn_nameiop == DELETE ||
	    (wantparent && cnp->cn_nameiop != CREATE))
		docache = 0;
	rdonly = cnp->cn_flags & RDONLY;
	ndp->ni_dvp = NULL;
	cnp->cn_flags &= ~ISSYMLINK;
	dp = ndp->ni_startdir;
	ndp->ni_startdir = NULLVP;
	vn_lock(dp, LK_EXCLUSIVE | LK_RETRY, p);

	/*
	 * If we have a leading string of slashes, remove them, and just make
	 * sure the current node is a directory.
	 */
	cp = cnp->cn_nameptr;
	if (*cp == '/') {
		do {
			cp++;
		} while (*cp == '/');
		ndp->ni_pathlen -= cp - cnp->cn_nameptr;
		cnp->cn_nameptr = cp;

		if (dp->v_type != VDIR) {
			error = ENOTDIR;
			goto bad;
		}

		/*
		 * If we've exhausted the path name, then just return the
		 * current node.  If the caller requested the parent node (i.e.
		 * it's a CREATE, DELETE, or RENAME), and we don't have one
		 * (because this is the root directory), then we must fail.
		 */
		if (cnp->cn_nameptr[0] == '\0') {
			if (ndp->ni_dvp == NULL && wantparent) {
				error = EISDIR;
				goto bad;
			}
			ndp->ni_vp = dp;
			cnp->cn_flags |= ISLASTCN;
			goto terminal;
		}
	}

dirloop:
	/*
	 * Search a new directory.
	 *
	 * The cn_hash value is for use by vfs_cache.
	 * The last component of the filename is left accessible via
	 * cnp->cn_nameptr for callers that need the name. Callers needing
	 * the name set the SAVENAME flag. When done, they assume
	 * responsibility for freeing the pathname buffer.
	 */
	cp = NULL;
	cnp->cn_consume = 0;
	cnp->cn_hash = hash32_stre(cnp->cn_nameptr, '/', &cp, HASHINIT);
	cnp->cn_namelen = cp - cnp->cn_nameptr;
	if (cnp->cn_namelen > NAME_MAX) {
		error = ENAMETOOLONG;
		goto bad;
	}
#ifdef NAMEI_DIAGNOSTIC
	{ char c = *cp;
	*cp = '\0';
	printf("{%s}: ", cnp->cn_nameptr);
	*cp = c; }
#endif
	ndp->ni_pathlen -= cnp->cn_namelen;
	ndp->ni_next = cp;
	/*
	 * If this component is followed by a slash, then move the pointer to
	 * the next component forward, and remember that this component must be
	 * a directory.
	 */
	if (*cp == '/') {
		do {
			cp++;
		} while (*cp == '/');
		slashes = cp - ndp->ni_next;
		ndp->ni_pathlen -= slashes;
		ndp->ni_next = cp;
		cnp->cn_flags |= REQUIREDIR;
	} else {
		slashes = 0;
		cnp->cn_flags &= ~REQUIREDIR;
	}
	/*
	 * We do special processing on the last component, whether or not it's
	 * a directory.  Cache all intervening lookups, but not the final one.
	 */
	if (*cp == '\0') {
		if (docache)
			cnp->cn_flags |= MAKEENTRY;
		else
			cnp->cn_flags &= ~MAKEENTRY;
		cnp->cn_flags |= ISLASTCN;
	} else {
		cnp->cn_flags |= MAKEENTRY;
		cnp->cn_flags &= ~ISLASTCN;
	}
	if (cnp->cn_namelen == 2 &&
	    cnp->cn_nameptr[1] == '.' && cnp->cn_nameptr[0] == '.')
		cnp->cn_flags |= ISDOTDOT;
	else
		cnp->cn_flags &= ~ISDOTDOT;

	/*
	 * Handle "..": two special cases.
	 * 1. If at root directory (e.g. after chroot)
	 *    or at absolute root directory
	 *    then ignore it so can't get out.
	 * 2. If this vnode is the root of a mounted
	 *    filesystem, then replace it with the
	 *    vnode which was mounted on so we take the
	 *    .. in the other file system.
	 */
	if (cnp->cn_flags & ISDOTDOT) {
		for (;;) {
			if (dp == ndp->ni_rootdir || dp == rootvnode) {
				ndp->ni_dvp = dp;
				ndp->ni_vp = dp;
				vref(dp);
				goto nextname;
			}
			if ((dp->v_flag & VROOT) == 0 ||
			    (cnp->cn_flags & NOCROSSMOUNT))
				break;
			tdp = dp;
			dp = dp->v_mount->mnt_vnodecovered;
			vput(tdp);
			vref(dp);
			vn_lock(dp, LK_EXCLUSIVE | LK_RETRY, p);
		}
	}

	/*
	 * We now have a segment name to search for, and a directory to search.
	 */
	ndp->ni_dvp = dp;
	ndp->ni_vp = NULL;
	cnp->cn_flags &= ~PDIRUNLOCK;

	if ((error = VOP_LOOKUP(dp, &ndp->ni_vp, cnp)) != 0) {
#ifdef DIAGNOSTIC
		if (ndp->ni_vp != NULL)
			panic("leaf should be empty");
#endif
#ifdef NAMEI_DIAGNOSTIC
		printf("not found\n");
#endif
		if (error != EJUSTRETURN)
			goto bad;
		/*
		 * If this was not the last component, or there were trailing
		 * slashes, then the name must exist.
		 */
		if (cnp->cn_flags & REQUIREDIR) {
			error = ENOENT;
			goto bad;
		}
		/*
		 * If creating and at end of pathname, then can consider
		 * allowing file to be created.
		 */
		if (rdonly || (ndp->ni_dvp->v_mount->mnt_flag & MNT_RDONLY)) {
			error = EROFS;
			goto bad;
		}
		/*
		 * We return with ni_vp NULL to indicate that the entry
		 * doesn't currently exist, leaving a pointer to the
		 * (possibly locked) directory inode in ndp->ni_dvp.
		 */
		if (cnp->cn_flags & SAVESTART) {
			ndp->ni_startdir = ndp->ni_dvp;
			vref(ndp->ni_startdir);
		}
		return (0);
	}
#ifdef NAMEI_DIAGNOSTIC
	printf("found\n");
#endif

	/*
	 * Take into account any additional components consumed by the
	 * underlying filesystem.  This will include any trailing slashes after
	 * the last component consumed.
	 */
	if (cnp->cn_consume > 0) {
		if (cnp->cn_consume >= slashes) {
			cnp->cn_flags &= ~REQUIREDIR;
		}

		ndp->ni_pathlen -= cnp->cn_consume - slashes;
		ndp->ni_next += cnp->cn_consume - slashes;
		cnp->cn_consume = 0;
		if (ndp->ni_next[0] == '\0')
			cnp->cn_flags |= ISLASTCN;
	}

	dp = ndp->ni_vp;
	/*
	 * Check to see if the vnode has been mounted on;
	 * if so find the root of the mounted file system.
	 */
	while (dp->v_type == VDIR && (mp = dp->v_mountedhere) &&
	    (cnp->cn_flags & NOCROSSMOUNT) == 0) {
		if (vfs_busy(mp, VB_READ|VB_WAIT))
			continue;
		VOP_UNLOCK(dp, 0, p);
		error = VFS_ROOT(mp, &tdp);
		vfs_unbusy(mp);
		if (error) {
			dpunlocked = 1;
			goto bad2;
		}
		vrele(dp);
		ndp->ni_vp = dp = tdp;
	}

	/*
	 * Check for symbolic link.  Back up over any slashes that we skipped,
	 * as we will need them again.
	 */
	if ((dp->v_type == VLNK) && (cnp->cn_flags & (FOLLOW|REQUIREDIR))) {
		ndp->ni_pathlen += slashes;
		ndp->ni_next -= slashes;
		cnp->cn_flags |= ISSYMLINK;
		return (0);
	}

	/*
	 * Check for directory, if the component was followed by a series of
	 * slashes.
	 */
	if ((dp->v_type != VDIR) && (cnp->cn_flags & REQUIREDIR)) {
		error = ENOTDIR;
		goto bad2;
	}

nextname:
	/*
	 * Not a symbolic link.  If this was not the last component, then
	 * continue at the next component, else return.
	 */
	if (!(cnp->cn_flags & ISLASTCN)) {
		cnp->cn_nameptr = ndp->ni_next;
		vrele(ndp->ni_dvp);
		goto dirloop;
	}

terminal:
	/*
	 * Check for read-only file systems.
	 */
	if (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME) {
		/*
		 * Disallow directory write attempts on read-only
		 * file systems.
		 */
		if (rdonly || (dp->v_mount->mnt_flag & MNT_RDONLY) ||
		    (wantparent &&
		    (ndp->ni_dvp->v_mount->mnt_flag & MNT_RDONLY))) {
			error = EROFS;
			goto bad2;
		}
	}
	if (ndp->ni_dvp != NULL) {
		if (cnp->cn_flags & SAVESTART) {
			ndp->ni_startdir = ndp->ni_dvp;
			vref(ndp->ni_startdir);
		}
		if (!wantparent)
			vrele(ndp->ni_dvp);
	}
	if ((cnp->cn_flags & LOCKLEAF) == 0)
		VOP_UNLOCK(dp, 0, p);
	return (0);

bad2:
	if ((cnp->cn_flags & LOCKPARENT) && (cnp->cn_flags & ISLASTCN) &&
	    ((cnp->cn_flags & PDIRUNLOCK) == 0))
		VOP_UNLOCK(ndp->ni_dvp, 0, p);
	vrele(ndp->ni_dvp);
bad:
	if (dpunlocked)
		vrele(dp);
	else
		vput(dp);
	ndp->ni_vp = NULL;
	return (error);
}
コード例 #30
0
ファイル: devfs_vfsops.c プロジェクト: 0xffea/xnu
/*
 * Function: devfs_kernel_mount
 * Purpose:
 *   Mount devfs at the given mount point from within the kernel.
 */
int
devfs_kernel_mount(char * mntname)
{
	struct mount *mp;
	int error;
	struct nameidata nd;
	struct vnode  * vp;
	vfs_context_t ctx = vfs_context_kernel();
	struct vfstable *vfsp;

	/* Find our vfstable entry */
	for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
		if (!strncmp(vfsp->vfc_name, "devfs", sizeof(vfsp->vfc_name)))
			break;
	
	if (!vfsp) {
		panic("Could not find entry in vfsconf for devfs.\n");
	} 

	/*
	 * Get vnode to be covered
	 */
	NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE,
	    CAST_USER_ADDR_T(mntname), ctx);
	if ((error = namei(&nd))) {
	    printf("devfs_kernel_mount: failed to find directory '%s', %d", 
		   mntname, error);
	    return (error);
	}
	nameidone(&nd);
	vp = nd.ni_vp;

	if ((error = VNOP_FSYNC(vp, MNT_WAIT, ctx))) {
	    printf("devfs_kernel_mount: vnop_fsync failed: %d\n", error);
	    vnode_put(vp);
	    return (error);
	}
	if ((error = buf_invalidateblks(vp, BUF_WRITE_DATA, 0, 0))) {
	    printf("devfs_kernel_mount: buf_invalidateblks failed: %d\n", error);
	    vnode_put(vp);
	    return (error);
	}
	if (vnode_isdir(vp) == 0) {
	    printf("devfs_kernel_mount: '%s' is not a directory\n", mntname);
	    vnode_put(vp);
	    return (ENOTDIR);
	}
	if ((vnode_mountedhere(vp))) {
	    vnode_put(vp);
	    return (EBUSY);
	}

	/*
	 * Allocate and initialize the filesystem.
	 */
	MALLOC_ZONE(mp, struct mount *, sizeof(struct mount),
		M_MOUNT, M_WAITOK);
	bzero((char *)mp, sizeof(struct mount));

	/* Initialize the default IO constraints */
	mp->mnt_maxreadcnt = mp->mnt_maxwritecnt = MAXPHYS;
	mp->mnt_segreadcnt = mp->mnt_segwritecnt = 32;
	mp->mnt_ioflags = 0;
	mp->mnt_realrootvp = NULLVP;
	mp->mnt_authcache_ttl = CACHED_LOOKUP_RIGHT_TTL;

	mount_lock_init(mp);
	TAILQ_INIT(&mp->mnt_vnodelist);
	TAILQ_INIT(&mp->mnt_workerqueue);
	TAILQ_INIT(&mp->mnt_newvnodes);

	(void)vfs_busy(mp, LK_NOWAIT);
	mp->mnt_op = &devfs_vfsops;
	mp->mnt_vtable = vfsp;
	mp->mnt_flag = 0;
	mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK;
	strlcpy(mp->mnt_vfsstat.f_fstypename, vfsp->vfc_name, MFSTYPENAMELEN);
	vp->v_mountedhere = mp;
	mp->mnt_vnodecovered = vp;
	mp->mnt_vfsstat.f_owner = kauth_cred_getuid(kauth_cred_get());
	(void) copystr(mntname, mp->mnt_vfsstat.f_mntonname, MAXPATHLEN - 1, 0);
#if CONFIG_MACF
	mac_mount_label_init(mp);
	mac_mount_label_associate(ctx, mp);
#endif

	error = devfs_mount(mp, NULL, USER_ADDR_NULL, ctx);

	if (error) {
	    printf("devfs_kernel_mount: mount %s failed: %d", mntname, error);
	    mp->mnt_vtable->vfc_refcount--;

	    vfs_unbusy(mp);

	    mount_lock_destroy(mp);
#if CONFIG_MACF
	    mac_mount_label_destroy(mp);
#endif
	    FREE_ZONE(mp, sizeof (struct mount), M_MOUNT);
	    vnode_put(vp);
	    return (error);
	}
	vnode_ref(vp);
	vnode_put(vp);
	vfs_unbusy(mp);
	mount_list_add(mp);
	return (0);
}