Esempio n. 1
0
static int
msdosfs_sync(struct mount *mp, int waitfor)
{
	struct vnode *vp, *nvp;
	struct thread *td;
	struct denode *dep;
	struct msdosfsmount *pmp = VFSTOMSDOSFS(mp);
	int error, allerror = 0;

	td = curthread;

	/*
	 * If we ever switch to not updating all of the fats all the time,
	 * this would be the place to update them from the first one.
	 */
	if (pmp->pm_fmod != 0) {
		if (pmp->pm_flags & MSDOSFSMNT_RONLY)
			panic("msdosfs_sync: rofs mod");
		else {
			/* update fats here */
		}
	}
	/*
	 * Write back each (modified) denode.
	 */
	MNT_ILOCK(mp);
loop:
	MNT_VNODE_FOREACH(vp, mp, nvp) {
		VI_LOCK(vp);
		if (vp->v_type == VNON || (vp->v_iflag & VI_DOOMED)) {
			VI_UNLOCK(vp);
			continue;
		}
		MNT_IUNLOCK(mp);
		dep = VTODE(vp);
		if ((dep->de_flag &
		    (DE_ACCESS | DE_CREATE | DE_UPDATE | DE_MODIFIED)) == 0 &&
		    (vp->v_bufobj.bo_dirty.bv_cnt == 0 ||
		    waitfor == MNT_LAZY)) {
			VI_UNLOCK(vp);
			MNT_ILOCK(mp);
			continue;
		}
		error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, td);
		if (error) {
			MNT_ILOCK(mp);
			if (error == ENOENT)
				goto loop;
			continue;
		}
		error = VOP_FSYNC(vp, waitfor, td);
		if (error)
			allerror = error;
		VOP_UNLOCK(vp, 0);
		vrele(vp);
		MNT_ILOCK(mp);
	}
Esempio n. 2
0
/* Unmount the filesystem described by mp. */
static int
nwfs_unmount(struct mount *mp, int mntflags)
{
	struct thread *td;
	struct nwmount *nmp = VFSTONWFS(mp);
	struct ncp_conn *conn;
	int error, flags;

	NCPVODEBUG("nwfs_unmount: flags=%04x\n",mntflags);
	td = curthread;
	flags = 0;
	if (mntflags & MNT_FORCE)
		flags |= FORCECLOSE;
	/* There is 1 extra root vnode reference from nwfs_mount(). */
	error = vflush(mp, 1, flags, td);
	if (error)
		return (error);
	conn = NWFSTOCONN(nmp);
	ncp_conn_puthandle(nmp->connh,NULL,0);
	if (ncp_conn_lock(conn, td, td->td_ucred,NCPM_WRITE | NCPM_EXECUTE) == 0) {
		if(ncp_conn_free(conn))
			ncp_conn_unlock(conn, td);
	}
	mp->mnt_data = NULL;
	if (nmp->m.flags & NWFS_MOUNT_HAVE_NLS)
		free(nmp->m.nls.to_lower, M_NWFSDATA);
	free(nmp, M_NWFSDATA);
	MNT_ILOCK(mp);
	mp->mnt_flag &= ~MNT_LOCAL;
	MNT_IUNLOCK(mp);
	return (error);
}
Esempio n. 3
0
/*
 * Mount a pseudofs instance
 */
int
pfs_mount(struct pfs_info *pi, struct mount *mp)
{
	struct statfs *sbp;

	if (mp->mnt_flag & MNT_UPDATE)
		return (EOPNOTSUPP);

	MNT_ILOCK(mp);
	mp->mnt_flag |= MNT_LOCAL;
	mp->mnt_kern_flag |= MNTK_MPSAFE;
	MNT_IUNLOCK(mp);
	mp->mnt_data = pi;
	vfs_getnewfsid(mp);

	sbp = &mp->mnt_stat;
	vfs_mountedfrom(mp, pi->pi_name);
	sbp->f_bsize = PAGE_SIZE;
	sbp->f_iosize = PAGE_SIZE;
	sbp->f_blocks = 1;
	sbp->f_bfree = 0;
	sbp->f_bavail = 0;
	sbp->f_files = 1;
	sbp->f_ffree = 0;

	return (0);
}
Esempio n. 4
0
void
vfs_setmntopt(vfs_t *vfsp, const char *name, const char *arg,
    int flags __unused)
{
	struct vfsopt *opt;
	size_t namesize;
	int locked;

	if (!(locked = mtx_owned(MNT_MTX(vfsp))))
		MNT_ILOCK(vfsp);

	if (vfsp->mnt_opt == NULL) {
		void *opts;

		MNT_IUNLOCK(vfsp);
		opts = malloc(sizeof(*vfsp->mnt_opt), M_MOUNT, M_WAITOK);
		MNT_ILOCK(vfsp);
		if (vfsp->mnt_opt == NULL) {
			vfsp->mnt_opt = opts;
			TAILQ_INIT(vfsp->mnt_opt);
		} else {
			free(opts, M_MOUNT);
		}
	}

	MNT_IUNLOCK(vfsp);

	opt = malloc(sizeof(*opt), M_MOUNT, M_WAITOK);
	namesize = strlen(name) + 1;
	opt->name = malloc(namesize, M_MOUNT, M_WAITOK);
	strlcpy(opt->name, name, namesize);
	opt->pos = -1;
	opt->seen = 1;
	if (arg == NULL) {
		opt->value = NULL;
		opt->len = 0;
	} else {
		opt->len = strlen(arg) + 1;
		opt->value = malloc(opt->len, M_MOUNT, M_WAITOK);
		bcopy(arg, opt->value, opt->len);
	}

	MNT_ILOCK(vfsp);
	TAILQ_INSERT_TAIL(vfsp->mnt_opt, opt, link);
	if (!locked)
		MNT_IUNLOCK(vfsp);
}
Esempio n. 5
0
/* ARGSUSED */
static int
nfs_sync(struct mount *mp, int waitfor)
{
	struct vnode *vp, *mvp;
	struct thread *td;
	int error, allerror = 0;

	td = curthread;

	MNT_ILOCK(mp);
	/*
	 * If a forced dismount is in progress, return from here so that
	 * the umount(2) syscall doesn't get stuck in VFS_SYNC() before
	 * calling VFS_UNMOUNT().
	 */
	if ((mp->mnt_kern_flag & MNTK_UNMOUNTF) != 0) {
		MNT_IUNLOCK(mp);
		return (EBADF);
	}

	/*
	 * Force stale buffer cache information to be flushed.
	 */
loop:
	MNT_VNODE_FOREACH(vp, mp, mvp) {
		VI_LOCK(vp);
		MNT_IUNLOCK(mp);
		/* XXX Racy bv_cnt check. */
		if (VOP_ISLOCKED(vp) || vp->v_bufobj.bo_dirty.bv_cnt == 0 ||
		    waitfor == MNT_LAZY) {
			VI_UNLOCK(vp);
			MNT_ILOCK(mp);
			continue;
		}
		if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td)) {
			MNT_ILOCK(mp);
			MNT_VNODE_FOREACH_ABORT_ILOCKED(mp, mvp);
			goto loop;
		}
		error = VOP_FSYNC(vp, waitfor, td);
		if (error)
			allerror = error;
		VOP_UNLOCK(vp, 0);
		vrele(vp);

		MNT_ILOCK(mp);
	}
Esempio n. 6
0
void
vfs_clearmntopt(vfs_t *vfsp, const char *name)
{
	int locked;

	if (!(locked = mtx_owned(MNT_MTX(vfsp))))
		MNT_ILOCK(vfsp);
	vfs_deleteopt(vfsp->mnt_opt, name);
	if (!locked)
		MNT_IUNLOCK(vfsp);
}
Esempio n. 7
0
/* Unmount the filesystem described by mp. */
static int
smbfs_unmount(struct mount *mp, int mntflags)
{
	struct thread *td;
	struct smbmount *smp = VFSTOSMBFS(mp);
	struct smb_cred *scred;
	struct smb_dev *dev;
	int error, flags;

	SMBVDEBUG("smbfs_unmount: flags=%04x\n", mntflags);
	td = curthread;
	flags = 0;
	if (mntflags & MNT_FORCE)
		flags |= FORCECLOSE;
	/*
	 * Keep trying to flush the vnode list for the mount while 
	 * some are still busy and we are making progress towards
	 * making them not busy. This is needed because smbfs vnodes
	 * reference their parent directory but may appear after their
	 * parent in the list; one pass over the vnode list is not
	 * sufficient in this case.
	 */
	do {
		smp->sm_didrele = 0;
		/* There is 1 extra root vnode reference from smbfs_mount(). */
		error = vflush(mp, 1, flags, td);
	} while (error == EBUSY && smp->sm_didrele != 0);
	if (error)
		return error;
	scred = smbfs_malloc_scred();
	smb_makescred(scred, td, td->td_ucred);
	error = smb_share_lock(smp->sm_share);
	if (error)
		goto out;
	smb_share_put(smp->sm_share, scred);
	SMB_LOCK();
	dev = smp->sm_dev;
	if (!dev)
		panic("No private data for mount point");
	sdp_trydestroy(dev);
	mp->mnt_data = NULL;
	SMB_UNLOCK();
	free(smp, M_SMBFSDATA);
	MNT_ILOCK(mp);
	mp->mnt_flag &= ~MNT_LOCAL;
	MNT_IUNLOCK(mp);
out:
	smbfs_free_scred(scred);
	return error;
}
Esempio n. 8
0
/*
 * Mount the filesystem
 */
static int
devfs_mount(struct mount *mp)
{
	int error;
	struct devfs_mount *fmp;
	struct vnode *rvp;

	if (devfs_unr == NULL)
		devfs_unr = new_unrhdr(0, INT_MAX, NULL);

	error = 0;

	if (mp->mnt_flag & (MNT_UPDATE | MNT_ROOTFS))
		return (EOPNOTSUPP);

	fmp = malloc(sizeof *fmp, M_DEVFS, M_WAITOK | M_ZERO);
	fmp->dm_idx = alloc_unr(devfs_unr);
	sx_init(&fmp->dm_lock, "devfsmount");
	fmp->dm_holdcnt = 1;

	MNT_ILOCK(mp);
	mp->mnt_flag |= MNT_LOCAL;
	mp->mnt_kern_flag |= MNTK_MPSAFE | MNTK_LOOKUP_SHARED |
	    MNTK_EXTENDED_SHARED;
#ifdef MAC
	mp->mnt_flag |= MNT_MULTILABEL;
#endif
	MNT_IUNLOCK(mp);
	fmp->dm_mount = mp;
	mp->mnt_data = (void *) fmp;
	vfs_getnewfsid(mp);

	fmp->dm_rootdir = devfs_vmkdir(fmp, NULL, 0, NULL, DEVFS_ROOTINO);

	error = devfs_root(mp, LK_EXCLUSIVE, &rvp);
	if (error) {
		sx_destroy(&fmp->dm_lock);
		free_unr(devfs_unr, fmp->dm_idx);
		free(fmp, M_DEVFS);
		return (error);
	}

	VOP_UNLOCK(rvp, 0);

	vfs_mountedfrom(mp, "devfs");

	return (0);
}
Esempio n. 9
0
/*
 * Unmount system call.
 */
static int
ext2_unmount(struct mount *mp, int mntflags)
{
	struct ext2mount *ump;
	struct m_ext2fs *fs;
	struct csum *sump;
	int error, flags, i, ronly;

	flags = 0;
	if (mntflags & MNT_FORCE) {
		if (mp->mnt_flag & MNT_ROOTFS)
			return (EINVAL);
		flags |= FORCECLOSE;
	}
	if ((error = ext2_flushfiles(mp, flags, curthread)) != 0)
		return (error);
	ump = VFSTOEXT2(mp);
	fs = ump->um_e2fs;
	ronly = fs->e2fs_ronly;
	if (ronly == 0 && ext2_cgupdate(ump, MNT_WAIT) == 0) {
		if (fs->e2fs_wasvalid)
 			fs->e2fs->e2fs_state |= E2FS_ISCLEAN;
 		ext2_sbupdate(ump, MNT_WAIT);
	}

	DROP_GIANT();
	g_topology_lock();
	g_vfs_close(ump->um_cp);
	g_topology_unlock();
	PICKUP_GIANT();
	vrele(ump->um_devvp);
	sump = fs->e2fs_clustersum;
	for (i = 0; i < fs->e2fs_gcount; i++, sump++)
		free(sump->cs_sum, M_EXT2MNT);
	free(fs->e2fs_clustersum, M_EXT2MNT);
	free(fs->e2fs_maxcluster, M_EXT2MNT);
	free(fs->e2fs_gd, M_EXT2MNT);
	free(fs->e2fs_contigdirs, M_EXT2MNT);
	free(fs->e2fs, M_EXT2MNT);
	free(fs, M_EXT2MNT);
	free(ump, M_EXT2MNT);
	mp->mnt_data = NULL;
	MNT_ILOCK(mp);
	mp->mnt_flag &= ~MNT_LOCAL;
	MNT_IUNLOCK(mp);
	return (error);
}
Esempio n. 10
0
afs_omount(struct mount *mp, char *path, caddr_t data, struct nameidata *ndp,
	THREAD_OR_PROC)
#endif
{
    /* ndp contains the mounted-from device.  Just ignore it.
     * we also don't care about our proc struct. */
    size_t size;

    if (mp->mnt_flag & MNT_UPDATE)
	return EINVAL;

    AFS_GLOCK();
    AFS_STATCNT(afs_mount);

    if (afs_globalVFS) {	/* Don't allow remounts. */
	AFS_GUNLOCK();
	return EBUSY;
    }

    afs_globalVFS = mp;
    mp->vfs_bsize = 8192;
    vfs_getnewfsid(mp);
#ifdef AFS_FBSD70_ENV /* XXX 70? */
    MNT_ILOCK(mp);
    mp->mnt_flag &= ~MNT_LOCAL;
    mp->mnt_kern_flag |= MNTK_MPSAFE; /* solid steel */
#endif
    mp->mnt_stat.f_iosize = 8192;

    if (path != NULL)
	copyinstr(path, mp->mnt_stat.f_mntonname, MNAMELEN - 1, &size);
    else
	bcopy("/afs", mp->mnt_stat.f_mntonname, size = 4);
    memset(mp->mnt_stat.f_mntonname + size, 0, MNAMELEN - size);
    memset(mp->mnt_stat.f_mntfromname, 0, MNAMELEN);
    strcpy(mp->mnt_stat.f_mntfromname, "AFS");
    /* null terminated string "AFS" will fit, just leave it be. */
    strcpy(mp->mnt_stat.f_fstypename, "afs");
#ifdef AFS_FBSD70_ENV
    MNT_IUNLOCK(mp);
#endif
    AFS_GUNLOCK();
    afs_statfs(mp, &mp->mnt_stat, p);

    return 0;
}
Esempio n. 11
0
static int
udf_unmount(struct mount *mp, int mntflags)
{
	struct udf_mnt *udfmp;
	int error, flags = 0;

	udfmp = VFSTOUDFFS(mp);

	if (mntflags & MNT_FORCE)
		flags |= FORCECLOSE;

	if ((error = vflush(mp, 0, flags, curthread)))
		return (error);

	if (udfmp->im_flags & UDFMNT_KICONV && udf_iconv) {
		if (udfmp->im_d2l)
			udf_iconv->close(udfmp->im_d2l);
#if 0
		if (udfmp->im_l2d)
			udf_iconv->close(udfmp->im_l2d);
#endif
	}

	DROP_GIANT();
	g_topology_lock();
	g_vfs_close(udfmp->im_cp);
	g_topology_unlock();
	PICKUP_GIANT();
	vrele(udfmp->im_devvp);
	dev_rel(udfmp->im_dev);

	if (udfmp->s_table != NULL)
		free(udfmp->s_table, M_UDFMOUNT);

	free(udfmp, M_UDFMOUNT);

	mp->mnt_data = NULL;
	MNT_ILOCK(mp);
	mp->mnt_flag &= ~MNT_LOCAL;
	MNT_IUNLOCK(mp);

	return (0);
}
Esempio n. 12
0
/*
 * Mount the per-process file descriptors (/dev/fd)
 */
static int
fdesc_mount(struct mount *mp)
{
	int error = 0;
	struct fdescmount *fmp;
	struct vnode *rvp;

	/*
	 * Update is a no-op
	 */
	if (mp->mnt_flag & (MNT_UPDATE | MNT_ROOTFS))
		return (EOPNOTSUPP);

	fmp = malloc(sizeof(struct fdescmount),
				M_FDESCMNT, M_WAITOK);	/* XXX */

	/*
	 * We need to initialize a few bits of our local mount point struct to
	 * avoid confusion in allocvp.
	 */
	mp->mnt_data = (qaddr_t) fmp;
	fmp->flags = 0;
	error = fdesc_allocvp(Froot, -1, FD_ROOT, mp, &rvp);
	if (error) {
		free(fmp, M_FDESCMNT);
		mp->mnt_data = 0;
		return (error);
	}
	rvp->v_type = VDIR;
	rvp->v_vflag |= VV_ROOT;
	fmp->f_root = rvp;
	VOP_UNLOCK(rvp, 0);
	/* XXX -- don't mark as local to work around fts() problems */
	/*mp->mnt_flag |= MNT_LOCAL;*/
	MNT_ILOCK(mp);
	mp->mnt_kern_flag |= MNTK_MPSAFE;
	MNT_IUNLOCK(mp);
	vfs_getnewfsid(mp);

	vfs_mountedfrom(mp, "fdescfs");
	return (0);
}
Esempio n. 13
0
/*
 * Turns off quotas, assumes that ump->um_qflags are already checked
 * and QTF_CLOSING is set to indicate operation in progress. Fixes
 * ump->um_qflags and mp->mnt_flag after.
 */
int
quotaoff_inchange(struct thread *td, struct mount *mp, int type)
{
    struct ufsmount *ump;
    int i;
    int error;

    error = quotaoff1(td, mp, type);

    ump = VFSTOUFS(mp);
    UFS_LOCK(ump);
    ump->um_qflags[type] &= ~QTF_CLOSING;
    for (i = 0; i < MAXQUOTAS; i++)
        if (ump->um_quotas[i] != NULLVP)
            break;
    if (i == MAXQUOTAS) {
        MNT_ILOCK(mp);
        mp->mnt_flag &= ~MNT_QUOTA;
        MNT_IUNLOCK(mp);
    }
    UFS_UNLOCK(ump);
    return (error);
}
Esempio n. 14
0
/*
 * Q_QUOTAON - set up a quota file for a particular filesystem.
 */
int
quotaon(struct thread *td, struct mount *mp, int type, void *fname)
{
    struct ufsmount *ump;
    struct vnode *vp, **vpp;
    struct vnode *mvp;
    struct dquot *dq;
    int error, flags;
    struct nameidata nd;

    error = priv_check(td, PRIV_UFS_QUOTAON);
    if (error != 0) {
        vfs_unbusy(mp);
        return (error);
    }

    if ((mp->mnt_flag & MNT_RDONLY) != 0) {
        vfs_unbusy(mp);
        return (EROFS);
    }

    ump = VFSTOUFS(mp);
    dq = NODQUOT;

    NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, fname, td);
    flags = FREAD | FWRITE;
    vfs_ref(mp);
    vfs_unbusy(mp);
    error = vn_open(&nd, &flags, 0, NULL);
    if (error != 0) {
        vfs_rel(mp);
        return (error);
    }
    NDFREE(&nd, NDF_ONLY_PNBUF);
    vp = nd.ni_vp;
    error = vfs_busy(mp, MBF_NOWAIT);
    vfs_rel(mp);
    if (error == 0) {
        if (vp->v_type != VREG) {
            error = EACCES;
            vfs_unbusy(mp);
        }
    }
    if (error != 0) {
        VOP_UNLOCK(vp, 0);
        (void) vn_close(vp, FREAD|FWRITE, td->td_ucred, td);
        return (error);
    }

    UFS_LOCK(ump);
    if ((ump->um_qflags[type] & (QTF_OPENING|QTF_CLOSING)) != 0) {
        UFS_UNLOCK(ump);
        VOP_UNLOCK(vp, 0);
        (void) vn_close(vp, FREAD|FWRITE, td->td_ucred, td);
        vfs_unbusy(mp);
        return (EALREADY);
    }
    ump->um_qflags[type] |= QTF_OPENING|QTF_CLOSING;
    UFS_UNLOCK(ump);
    if ((error = dqopen(vp, ump, type)) != 0) {
        VOP_UNLOCK(vp, 0);
        UFS_LOCK(ump);
        ump->um_qflags[type] &= ~(QTF_OPENING|QTF_CLOSING);
        UFS_UNLOCK(ump);
        (void) vn_close(vp, FREAD|FWRITE, td->td_ucred, td);
        vfs_unbusy(mp);
        return (error);
    }
    VOP_UNLOCK(vp, 0);
    MNT_ILOCK(mp);
    mp->mnt_flag |= MNT_QUOTA;
    MNT_IUNLOCK(mp);

    vpp = &ump->um_quotas[type];
    if (*vpp != vp)
        quotaoff1(td, mp, type);

    /*
     * When the directory vnode containing the quota file is
     * inactivated, due to the shared lookup of the quota file
     * vput()ing the dvp, the qsyncvp() call for the containing
     * directory would try to acquire the quota lock exclusive.
     * At the same time, lookup already locked the quota vnode
     * shared.  Mark the quota vnode lock as allowing recursion
     * and automatically converting shared locks to exclusive.
     *
     * Also mark quota vnode as system.
     */
    vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
    vp->v_vflag |= VV_SYSTEM;
    VN_LOCK_AREC(vp);
    VN_LOCK_DSHARE(vp);
    VOP_UNLOCK(vp, 0);
    *vpp = vp;
    /*
     * Save the credential of the process that turned on quotas.
     * Set up the time limits for this quota.
     */
    ump->um_cred[type] = crhold(td->td_ucred);
    ump->um_btime[type] = MAX_DQ_TIME;
    ump->um_itime[type] = MAX_IQ_TIME;
    if (dqget(NULLVP, 0, ump, type, &dq) == 0) {
        if (dq->dq_btime > 0)
            ump->um_btime[type] = dq->dq_btime;
        if (dq->dq_itime > 0)
            ump->um_itime[type] = dq->dq_itime;
        dqrele(NULLVP, dq);
    }
    /*
     * Allow the getdq from getinoquota below to read the quota
     * from file.
     */
    UFS_LOCK(ump);
    ump->um_qflags[type] &= ~QTF_CLOSING;
    UFS_UNLOCK(ump);
    /*
     * Search vnodes associated with this mount point,
     * adding references to quota file being opened.
     * NB: only need to add dquot's for inodes being modified.
     */
again:
    MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
        if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td)) {
            MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
            goto again;
        }
        if (vp->v_type == VNON || vp->v_writecount == 0) {
            VOP_UNLOCK(vp, 0);
            vrele(vp);
            continue;
        }
        error = getinoquota(VTOI(vp));
        VOP_UNLOCK(vp, 0);
        vrele(vp);
        if (error) {
            MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
            break;
        }
    }

    if (error)
        quotaoff_inchange(td, mp, type);
    UFS_LOCK(ump);
    ump->um_qflags[type] &= ~QTF_OPENING;
    KASSERT((ump->um_qflags[type] & QTF_CLOSING) == 0,
            ("quotaon: leaking flags"));
    UFS_UNLOCK(ump);

    vfs_unbusy(mp);
    return (error);
}
Esempio n. 15
0
/*
 * Build hash lists of net addresses and hang them off the mount point.
 * Called by vfs_export() to set up the lists of export addresses.
 */
static int
vfs_hang_addrlist(struct mount *mp, struct netexport *nep,
    struct export_args *argp)
{
	register struct netcred *np;
	register struct radix_node_head *rnh;
	register int i;
	struct radix_node *rn;
	struct sockaddr *saddr, *smask = 0;
	struct domain *dom;
	int error;

	/*
	 * XXX: This routine converts from a `struct xucred'
	 * (argp->ex_anon) to a `struct ucred' (np->netc_anon).  This
	 * operation is questionable; for example, what should be done
	 * with fields like cr_uidinfo and cr_prison?  Currently, this
	 * routine does not touch them (leaves them as NULL).
	 */
	if (argp->ex_anon.cr_version != XUCRED_VERSION) {
		vfs_mount_error(mp, "ex_anon.cr_version: %d != %d",
		    argp->ex_anon.cr_version, XUCRED_VERSION);
		return (EINVAL);
	}

	if (argp->ex_addrlen == 0) {
		if (mp->mnt_flag & MNT_DEFEXPORTED) {
			vfs_mount_error(mp,
			    "MNT_DEFEXPORTED already set for mount %p", mp);
			return (EPERM);
		}
		np = &nep->ne_defexported;
		np->netc_exflags = argp->ex_flags;
		np->netc_anon = crget();
		np->netc_anon->cr_uid = argp->ex_anon.cr_uid;
		crsetgroups(np->netc_anon, argp->ex_anon.cr_ngroups,
		    argp->ex_anon.cr_groups);
		np->netc_anon->cr_prison = &prison0;
		prison_hold(np->netc_anon->cr_prison);
		np->netc_numsecflavors = argp->ex_numsecflavors;
		bcopy(argp->ex_secflavors, np->netc_secflavors,
		    sizeof(np->netc_secflavors));
		MNT_ILOCK(mp);
		mp->mnt_flag |= MNT_DEFEXPORTED;
		MNT_IUNLOCK(mp);
		return (0);
	}

#if MSIZE <= 256
	if (argp->ex_addrlen > MLEN) {
		vfs_mount_error(mp, "ex_addrlen %d is greater than %d",
		    argp->ex_addrlen, MLEN);
		return (EINVAL);
	}
#endif

	i = sizeof(struct netcred) + argp->ex_addrlen + argp->ex_masklen;
	np = (struct netcred *) malloc(i, M_NETADDR, M_WAITOK | M_ZERO);
	saddr = (struct sockaddr *) (np + 1);
	if ((error = copyin(argp->ex_addr, saddr, argp->ex_addrlen)))
		goto out;
	if (saddr->sa_family == AF_UNSPEC || saddr->sa_family > AF_MAX) {
		error = EINVAL;
		vfs_mount_error(mp, "Invalid saddr->sa_family: %d");
		goto out;
	}
	if (saddr->sa_len > argp->ex_addrlen)
		saddr->sa_len = argp->ex_addrlen;
	if (argp->ex_masklen) {
		smask = (struct sockaddr *)((caddr_t)saddr + argp->ex_addrlen);
		error = copyin(argp->ex_mask, smask, argp->ex_masklen);
		if (error)
			goto out;
		if (smask->sa_len > argp->ex_masklen)
			smask->sa_len = argp->ex_masklen;
	}
	i = saddr->sa_family;
	if ((rnh = nep->ne_rtable[i]) == NULL) {
		/*
		 * Seems silly to initialize every AF when most are not used,
		 * do so on demand here
		 */
		for (dom = domains; dom; dom = dom->dom_next) {
			KASSERT(((i == AF_INET) || (i == AF_INET6)), 
			    ("unexpected protocol in vfs_hang_addrlist"));
			if (dom->dom_family == i && dom->dom_rtattach) {
				/*
				 * XXX MRT 
				 * The INET and INET6 domains know the
				 * offset already. We don't need to send it
				 * So we just use it as a flag to say that
				 * we are or are not setting up a real routing
				 * table. Only IP and IPV6 need have this
				 * be 0 so all other protocols can stay the 
				 * same (ABI compatible).
				 */ 
				dom->dom_rtattach(
				    (void **) &nep->ne_rtable[i], 0);
				break;
			}
		}
		if ((rnh = nep->ne_rtable[i]) == NULL) {
			error = ENOBUFS;
			vfs_mount_error(mp, "%s %s %d",
			    "Unable to initialize radix node head ",
			    "for address family", i);
			goto out;
		}
	}
	RADIX_NODE_HEAD_LOCK(rnh);
	rn = (*rnh->rnh_addaddr)(saddr, smask, rnh, np->netc_rnodes);
	RADIX_NODE_HEAD_UNLOCK(rnh);
	if (rn == NULL || np != (struct netcred *)rn) {	/* already exists */
		error = EPERM;
		vfs_mount_error(mp, "Invalid radix node head, rn: %p %p",
		    rn, np);
		goto out;
	}
	np->netc_exflags = argp->ex_flags;
	np->netc_anon = crget();
	np->netc_anon->cr_uid = argp->ex_anon.cr_uid;
	crsetgroups(np->netc_anon, argp->ex_anon.cr_ngroups,
	    argp->ex_anon.cr_groups);
	np->netc_anon->cr_prison = &prison0;
	prison_hold(np->netc_anon->cr_prison);
	np->netc_numsecflavors = argp->ex_numsecflavors;
	bcopy(argp->ex_secflavors, np->netc_secflavors,
	    sizeof(np->netc_secflavors));
	return (0);
out:
	free(np, M_NETADDR);
	return (error);
}
Esempio n. 16
0
static int
udf_mountfs(struct vnode *devvp, struct mount *mp)
{
	struct buf *bp = NULL;
	struct cdev *dev;
	struct anchor_vdp avdp;
	struct udf_mnt *udfmp = NULL;
	struct part_desc *pd;
	struct logvol_desc *lvd;
	struct fileset_desc *fsd;
	struct file_entry *root_fentry;
	uint32_t sector, size, mvds_start, mvds_end;
	uint32_t logical_secsize;
	uint32_t fsd_offset = 0;
	uint16_t part_num = 0, fsd_part = 0;
	int error = EINVAL;
	int logvol_found = 0, part_found = 0, fsd_found = 0;
	int bsize;
	struct g_consumer *cp;
	struct bufobj *bo;

	dev = devvp->v_rdev;
	dev_ref(dev);
	DROP_GIANT();
	g_topology_lock();
	error = g_vfs_open(devvp, &cp, "udf", 0);
	g_topology_unlock();
	PICKUP_GIANT();
	VOP_UNLOCK(devvp, 0);
	if (error)
		goto bail;

	bo = &devvp->v_bufobj;

	if (devvp->v_rdev->si_iosize_max != 0)
		mp->mnt_iosize_max = devvp->v_rdev->si_iosize_max;
	if (mp->mnt_iosize_max > MAXPHYS)
		mp->mnt_iosize_max = MAXPHYS;

	/* XXX: should be M_WAITOK */
	udfmp = malloc(sizeof(struct udf_mnt), M_UDFMOUNT,
	    M_NOWAIT | M_ZERO);
	if (udfmp == NULL) {
		printf("Cannot allocate UDF mount struct\n");
		error = ENOMEM;
		goto bail;
	}

	mp->mnt_data = udfmp;
	mp->mnt_stat.f_fsid.val[0] = dev2udev(devvp->v_rdev);
	mp->mnt_stat.f_fsid.val[1] = mp->mnt_vfc->vfc_typenum;
	MNT_ILOCK(mp);
	mp->mnt_flag |= MNT_LOCAL;
	mp->mnt_kern_flag |= MNTK_LOOKUP_SHARED | MNTK_EXTENDED_SHARED;
	MNT_IUNLOCK(mp);
	udfmp->im_mountp = mp;
	udfmp->im_dev = dev;
	udfmp->im_devvp = devvp;
	udfmp->im_d2l = NULL;
	udfmp->im_cp = cp;
	udfmp->im_bo = bo;

#if 0
	udfmp->im_l2d = NULL;
#endif
	/*
	 * The UDF specification defines a logical sectorsize of 2048
	 * for DVD media.
	 */
	logical_secsize = 2048;

	if (((logical_secsize % cp->provider->sectorsize) != 0) ||
	    (logical_secsize < cp->provider->sectorsize)) {
		error = EINVAL;
		goto bail;
	}

	bsize = cp->provider->sectorsize;

	/* 
	 * Get the Anchor Volume Descriptor Pointer from sector 256.
	 * XXX Should also check sector n - 256, n, and 512.
	 */
	sector = 256;
	if ((error = bread(devvp, sector * btodb(logical_secsize), bsize,
			   NOCRED, &bp)) != 0)
		goto bail;
	if ((error = udf_checktag((struct desc_tag *)bp->b_data, TAGID_ANCHOR)))
		goto bail;

	bcopy(bp->b_data, &avdp, sizeof(struct anchor_vdp));
	brelse(bp);
	bp = NULL;

	/*
	 * Extract the Partition Descriptor and Logical Volume Descriptor
	 * from the Volume Descriptor Sequence.
	 * XXX Should we care about the partition type right now?
	 * XXX What about multiple partitions?
	 */
	mvds_start = le32toh(avdp.main_vds_ex.loc);
	mvds_end = mvds_start + (le32toh(avdp.main_vds_ex.len) - 1) / bsize;
	for (sector = mvds_start; sector < mvds_end; sector++) {
		if ((error = bread(devvp, sector * btodb(logical_secsize),
				   bsize, NOCRED, &bp)) != 0) {
			printf("Can't read sector %d of VDS\n", sector);
			goto bail;
		}
		lvd = (struct logvol_desc *)bp->b_data;
		if (!udf_checktag(&lvd->tag, TAGID_LOGVOL)) {
			udfmp->bsize = le32toh(lvd->lb_size);
			udfmp->bmask = udfmp->bsize - 1;
			udfmp->bshift = ffs(udfmp->bsize) - 1;
			fsd_part = le16toh(lvd->_lvd_use.fsd_loc.loc.part_num);
			fsd_offset = le32toh(lvd->_lvd_use.fsd_loc.loc.lb_num);
			if (udf_find_partmaps(udfmp, lvd))
				break;
			logvol_found = 1;
		}
		pd = (struct part_desc *)bp->b_data;
		if (!udf_checktag(&pd->tag, TAGID_PARTITION)) {
			part_found = 1;
			part_num = le16toh(pd->part_num);
			udfmp->part_len = le32toh(pd->part_len);
			udfmp->part_start = le32toh(pd->start_loc);
		}

		brelse(bp); 
		bp = NULL;
		if ((part_found) && (logvol_found))
			break;
	}

	if (!part_found || !logvol_found) {
		error = EINVAL;
		goto bail;
	}

	if (fsd_part != part_num) {
		printf("FSD does not lie within the partition!\n");
		error = EINVAL;
		goto bail;
	}


	/*
	 * Grab the Fileset Descriptor
	 * Thanks to Chuck McCrobie <*****@*****.**> for pointing
	 * me in the right direction here.
	 */
	sector = udfmp->part_start + fsd_offset;
	if ((error = RDSECTOR(devvp, sector, udfmp->bsize, &bp)) != 0) {
		printf("Cannot read sector %d of FSD\n", sector);
		goto bail;
	}
	fsd = (struct fileset_desc *)bp->b_data;
	if (!udf_checktag(&fsd->tag, TAGID_FSD)) {
		fsd_found = 1;
		bcopy(&fsd->rootdir_icb, &udfmp->root_icb,
		    sizeof(struct long_ad));
	}

	brelse(bp);
	bp = NULL;

	if (!fsd_found) {
		printf("Couldn't find the fsd\n");
		error = EINVAL;
		goto bail;
	}

	/*
	 * Find the file entry for the root directory.
	 */
	sector = le32toh(udfmp->root_icb.loc.lb_num) + udfmp->part_start;
	size = le32toh(udfmp->root_icb.len);
	if ((error = udf_readdevblks(udfmp, sector, size, &bp)) != 0) {
		printf("Cannot read sector %d\n", sector);
		goto bail;
	}

	root_fentry = (struct file_entry *)bp->b_data;
	if ((error = udf_checktag(&root_fentry->tag, TAGID_FENTRY))) {
		printf("Invalid root file entry!\n");
		goto bail;
	}

	brelse(bp);
	bp = NULL;

	return 0;

bail:
	if (udfmp != NULL)
		free(udfmp, M_UDFMOUNT);
	if (bp != NULL)
		brelse(bp);
	if (cp != NULL) {
		DROP_GIANT();
		g_topology_lock();
		g_vfs_close(cp);
		g_topology_unlock();
		PICKUP_GIANT();
	}
	dev_rel(dev);
	return error;
};
Esempio n. 17
0
/* ARGSUSED */
static int
nfs_mount(struct mount *mp)
{
	struct nfs_args args = {
	    .version = NFS_ARGSVERSION,
	    .addr = NULL,
	    .addrlen = sizeof (struct sockaddr_in),
	    .sotype = SOCK_STREAM,
	    .proto = 0,
	    .fh = NULL,
	    .fhsize = 0,
	    .flags = NFSMNT_RESVPORT,
	    .wsize = NFS_WSIZE,
	    .rsize = NFS_RSIZE,
	    .readdirsize = NFS_READDIRSIZE,
	    .timeo = 10,
	    .retrans = NFS_RETRANS,
	    .maxgrouplist = NFS_MAXGRPS,
	    .readahead = NFS_DEFRAHEAD,
	    .wcommitsize = 0,			/* was: NQ_DEFLEASE */
	    .deadthresh = NFS_MAXDEADTHRESH,	/* was: NQ_DEADTHRESH */
	    .hostname = NULL,
	    /* args version 4 */
	    .acregmin = NFS_MINATTRTIMO,
	    .acregmax = NFS_MAXATTRTIMO,
	    .acdirmin = NFS_MINDIRATTRTIMO,
	    .acdirmax = NFS_MAXDIRATTRTIMO,
	};
	int error, ret, has_nfs_args_opt;
	int has_addr_opt, has_fh_opt, has_hostname_opt;
	struct sockaddr *nam;
	struct vnode *vp;
	char hst[MNAMELEN];
	size_t len;
	u_char nfh[NFSX_V3FHMAX];
	char *opt;
	int nametimeo = NFS_DEFAULT_NAMETIMEO;
	int negnametimeo = NFS_DEFAULT_NEGNAMETIMEO;

	has_nfs_args_opt = 0;
	has_addr_opt = 0;
	has_fh_opt = 0;
	has_hostname_opt = 0;

	if (vfs_filteropt(mp->mnt_optnew, nfs_opts)) {
		error = EINVAL;
		goto out;
	}

	if ((mp->mnt_flag & (MNT_ROOTFS | MNT_UPDATE)) == MNT_ROOTFS) {
		error = nfs_mountroot(mp);
		goto out;
	}

	/*
	 * The old mount_nfs program passed the struct nfs_args
	 * from userspace to kernel.  The new mount_nfs program
	 * passes string options via nmount() from userspace to kernel
	 * and we populate the struct nfs_args in the kernel.
	 */
	if (vfs_getopt(mp->mnt_optnew, "nfs_args", NULL, NULL) == 0) {
		error = vfs_copyopt(mp->mnt_optnew, "nfs_args", &args,
		    sizeof args);
		if (error)
			goto out;

		if (args.version != NFS_ARGSVERSION) {
			error = EPROGMISMATCH;
			goto out;
		}
		has_nfs_args_opt = 1;
	}

	if (vfs_getopt(mp->mnt_optnew, "dumbtimer", NULL, NULL) == 0)
		args.flags |= NFSMNT_DUMBTIMR;
	if (vfs_getopt(mp->mnt_optnew, "noconn", NULL, NULL) == 0)
		args.flags |= NFSMNT_NOCONN;
	if (vfs_getopt(mp->mnt_optnew, "conn", NULL, NULL) == 0)
		args.flags |= NFSMNT_NOCONN;
	if (vfs_getopt(mp->mnt_optnew, "nolockd", NULL, NULL) == 0)
		args.flags |= NFSMNT_NOLOCKD;
	if (vfs_getopt(mp->mnt_optnew, "lockd", NULL, NULL) == 0)
		args.flags &= ~NFSMNT_NOLOCKD;
	if (vfs_getopt(mp->mnt_optnew, "intr", NULL, NULL) == 0)
		args.flags |= NFSMNT_INT;
	if (vfs_getopt(mp->mnt_optnew, "rdirplus", NULL, NULL) == 0)
		args.flags |= NFSMNT_RDIRPLUS;
	if (vfs_getopt(mp->mnt_optnew, "resvport", NULL, NULL) == 0)
		args.flags |= NFSMNT_RESVPORT;
	if (vfs_getopt(mp->mnt_optnew, "noresvport", NULL, NULL) == 0)
		args.flags &= ~NFSMNT_RESVPORT;
	if (vfs_getopt(mp->mnt_optnew, "soft", NULL, NULL) == 0)
		args.flags |= NFSMNT_SOFT;
	if (vfs_getopt(mp->mnt_optnew, "hard", NULL, NULL) == 0)
		args.flags &= ~NFSMNT_SOFT;
	if (vfs_getopt(mp->mnt_optnew, "mntudp", NULL, NULL) == 0)
		args.sotype = SOCK_DGRAM;
	if (vfs_getopt(mp->mnt_optnew, "udp", NULL, NULL) == 0)
		args.sotype = SOCK_DGRAM;
	if (vfs_getopt(mp->mnt_optnew, "tcp", NULL, NULL) == 0)
		args.sotype = SOCK_STREAM;
	if (vfs_getopt(mp->mnt_optnew, "nfsv3", NULL, NULL) == 0)
		args.flags |= NFSMNT_NFSV3;
	if (vfs_getopt(mp->mnt_optnew, "nocto", NULL, NULL) == 0)
		args.flags |= NFSMNT_NOCTO;
	if (vfs_getopt(mp->mnt_optnew, "readdirsize", (void **)&opt, NULL) == 0) {
		if (opt == NULL) { 
			vfs_mount_error(mp, "illegal readdirsize");
			error = EINVAL;
			goto out;
		}
		ret = sscanf(opt, "%d", &args.readdirsize);
		if (ret != 1 || args.readdirsize <= 0) {
			vfs_mount_error(mp, "illegal readdirsize: %s",
			    opt);
			error = EINVAL;
			goto out;
		}
		args.flags |= NFSMNT_READDIRSIZE;
	}
	if (vfs_getopt(mp->mnt_optnew, "readahead", (void **)&opt, NULL) == 0) {
		if (opt == NULL) { 
			vfs_mount_error(mp, "illegal readahead");
			error = EINVAL;
			goto out;
		}
		ret = sscanf(opt, "%d", &args.readahead);
		if (ret != 1 || args.readahead <= 0) {
			vfs_mount_error(mp, "illegal readahead: %s",
			    opt);
			error = EINVAL;
			goto out;
		}
		args.flags |= NFSMNT_READAHEAD;
	}
	if (vfs_getopt(mp->mnt_optnew, "wsize", (void **)&opt, NULL) == 0) {
		if (opt == NULL) { 
			vfs_mount_error(mp, "illegal wsize");
			error = EINVAL;
			goto out;
		}
		ret = sscanf(opt, "%d", &args.wsize);
		if (ret != 1 || args.wsize <= 0) {
			vfs_mount_error(mp, "illegal wsize: %s",
			    opt);
			error = EINVAL;
			goto out;
		}
		args.flags |= NFSMNT_WSIZE;
	}
	if (vfs_getopt(mp->mnt_optnew, "rsize", (void **)&opt, NULL) == 0) {
		if (opt == NULL) { 
			vfs_mount_error(mp, "illegal rsize");
			error = EINVAL;
			goto out;
		}
		ret = sscanf(opt, "%d", &args.rsize);
		if (ret != 1 || args.rsize <= 0) {
			vfs_mount_error(mp, "illegal wsize: %s",
			    opt);
			error = EINVAL;
			goto out;
		}
		args.flags |= NFSMNT_RSIZE;
	}
	if (vfs_getopt(mp->mnt_optnew, "retrans", (void **)&opt, NULL) == 0) {
		if (opt == NULL) { 
			vfs_mount_error(mp, "illegal retrans");
			error = EINVAL;
			goto out;
		}
		ret = sscanf(opt, "%d", &args.retrans);
		if (ret != 1 || args.retrans <= 0) {
			vfs_mount_error(mp, "illegal retrans: %s",
			    opt);
			error = EINVAL;
			goto out;
		}
		args.flags |= NFSMNT_RETRANS;
	}
	if (vfs_getopt(mp->mnt_optnew, "acregmin", (void **)&opt, NULL) == 0) {
		ret = sscanf(opt, "%d", &args.acregmin);
		if (ret != 1 || args.acregmin < 0) {
			vfs_mount_error(mp, "illegal acregmin: %s",
			    opt);
			error = EINVAL;
			goto out;
		}
		args.flags |= NFSMNT_ACREGMIN;
	}
	if (vfs_getopt(mp->mnt_optnew, "acregmax", (void **)&opt, NULL) == 0) {
		ret = sscanf(opt, "%d", &args.acregmax);
		if (ret != 1 || args.acregmax < 0) {
			vfs_mount_error(mp, "illegal acregmax: %s",
			    opt);
			error = EINVAL;
			goto out;
		}
		args.flags |= NFSMNT_ACREGMAX;
	}
	if (vfs_getopt(mp->mnt_optnew, "acdirmin", (void **)&opt, NULL) == 0) {
		ret = sscanf(opt, "%d", &args.acdirmin);
		if (ret != 1 || args.acdirmin < 0) {
			vfs_mount_error(mp, "illegal acdirmin: %s",
			    opt);
			error = EINVAL;
			goto out;
		}
		args.flags |= NFSMNT_ACDIRMIN;
	}
	if (vfs_getopt(mp->mnt_optnew, "acdirmax", (void **)&opt, NULL) == 0) {
		ret = sscanf(opt, "%d", &args.acdirmax);
		if (ret != 1 || args.acdirmax < 0) {
			vfs_mount_error(mp, "illegal acdirmax: %s",
			    opt);
			error = EINVAL;
			goto out;
		}
		args.flags |= NFSMNT_ACDIRMAX;
	}
	if (vfs_getopt(mp->mnt_optnew, "wcommitsize", (void **)&opt, NULL) == 0) {
		ret = sscanf(opt, "%d", &args.wcommitsize);
		if (ret != 1 || args.wcommitsize < 0) {
			vfs_mount_error(mp, "illegal wcommitsize: %s", opt);
			error = EINVAL;
			goto out;
		}
		args.flags |= NFSMNT_WCOMMITSIZE;
	}
	if (vfs_getopt(mp->mnt_optnew, "deadthresh", (void **)&opt, NULL) == 0) {
		ret = sscanf(opt, "%d", &args.deadthresh);
		if (ret != 1 || args.deadthresh <= 0) {
			vfs_mount_error(mp, "illegal deadthresh: %s",
			    opt);
			error = EINVAL;
			goto out;
		}
		args.flags |= NFSMNT_DEADTHRESH;
	}
	if (vfs_getopt(mp->mnt_optnew, "timeout", (void **)&opt, NULL) == 0) {
		ret = sscanf(opt, "%d", &args.timeo);
		if (ret != 1 || args.timeo <= 0) {
			vfs_mount_error(mp, "illegal timeout: %s",
			    opt);
			error = EINVAL;
			goto out;
		}
		args.flags |= NFSMNT_TIMEO;
	}
	if (vfs_getopt(mp->mnt_optnew, "maxgroups", (void **)&opt, NULL) == 0) {
		ret = sscanf(opt, "%d", &args.maxgrouplist);
		if (ret != 1 || args.maxgrouplist <= 0) {
			vfs_mount_error(mp, "illegal maxgroups: %s",
			    opt);
			error = EINVAL;
			goto out;
		}
		args.flags |= NFSMNT_MAXGRPS;
	}
	if (vfs_getopt(mp->mnt_optnew, "nametimeo", (void **)&opt, NULL) == 0) {
		ret = sscanf(opt, "%d", &nametimeo);
		if (ret != 1 || nametimeo < 0) {
			vfs_mount_error(mp, "illegal nametimeo: %s", opt);
			error = EINVAL;
			goto out;
		}
	}
	if (vfs_getopt(mp->mnt_optnew, "negnametimeo", (void **)&opt, NULL)
	    == 0) {
		ret = sscanf(opt, "%d", &negnametimeo);
		if (ret != 1 || negnametimeo < 0) {
			vfs_mount_error(mp, "illegal negnametimeo: %s",
			    opt);
			error = EINVAL;
			goto out;
		}
	}
	if (vfs_getopt(mp->mnt_optnew, "addr", (void **)&args.addr,
		&args.addrlen) == 0) {
		has_addr_opt = 1;
		if (args.addrlen > SOCK_MAXADDRLEN) {
			error = ENAMETOOLONG;
			goto out;
		}
		nam = malloc(args.addrlen, M_SONAME,
		    M_WAITOK);
		bcopy(args.addr, nam, args.addrlen);
		nam->sa_len = args.addrlen;
	}
	if (vfs_getopt(mp->mnt_optnew, "fh", (void **)&args.fh,
		&args.fhsize) == 0) {
		has_fh_opt = 1;
	}
	if (vfs_getopt(mp->mnt_optnew, "hostname", (void **)&args.hostname,
		NULL) == 0) {
		has_hostname_opt = 1;
	}
	if (args.hostname == NULL) {
		vfs_mount_error(mp, "Invalid hostname");
		error = EINVAL;
		goto out;
	}
	if (args.fhsize < 0 || args.fhsize > NFSX_V3FHMAX) {
		vfs_mount_error(mp, "Bad file handle");
		error = EINVAL;
		goto out;
	}

	if (mp->mnt_flag & MNT_UPDATE) {
		struct nfsmount *nmp = VFSTONFS(mp);

		if (nmp == NULL) {
			error = EIO;
			goto out;
		}

		/*
		 * If a change from TCP->UDP is done and there are thread(s)
		 * that have I/O RPC(s) in progress with a tranfer size
		 * greater than NFS_MAXDGRAMDATA, those thread(s) will be
		 * hung, retrying the RPC(s) forever. Usually these threads
		 * will be seen doing an uninterruptible sleep on wait channel
		 * "newnfsreq" (truncated to "newnfsre" by procstat).
		 */
		if (args.sotype == SOCK_DGRAM && nmp->nm_sotype == SOCK_STREAM)
			tprintf(curthread->td_proc, LOG_WARNING,
	"Warning: mount -u that changes TCP->UDP can result in hung threads\n");

		/*
		 * When doing an update, we can't change from or to
		 * v3, switch lockd strategies or change cookie translation
		 */
		args.flags = (args.flags &
		    ~(NFSMNT_NFSV3 | NFSMNT_NOLOCKD /*|NFSMNT_XLATECOOKIE*/)) |
		    (nmp->nm_flag &
			(NFSMNT_NFSV3 | NFSMNT_NOLOCKD /*|NFSMNT_XLATECOOKIE*/));
		nfs_decode_args(mp, nmp, &args, NULL);
		goto out;
	}

	/*
	 * Make the nfs_ip_paranoia sysctl serve as the default connection
	 * or no-connection mode for those protocols that support 
	 * no-connection mode (the flag will be cleared later for protocols
	 * that do not support no-connection mode).  This will allow a client
	 * to receive replies from a different IP then the request was
	 * sent to.  Note: default value for nfs_ip_paranoia is 1 (paranoid),
	 * not 0.
	 */
	if (nfs_ip_paranoia == 0)
		args.flags |= NFSMNT_NOCONN;

	if (has_nfs_args_opt) {
		/*
		 * In the 'nfs_args' case, the pointers in the args
		 * structure are in userland - we copy them in here.
		 */
		if (!has_fh_opt) {
			error = copyin((caddr_t)args.fh, (caddr_t)nfh,
			    args.fhsize);
			if (error) {
				goto out;
			}
			args.fh = nfh;
		}
		if (!has_hostname_opt) {
			error = copyinstr(args.hostname, hst, MNAMELEN-1, &len);
			if (error) {
				goto out;
			}
			bzero(&hst[len], MNAMELEN - len);
			args.hostname = hst;
		}
		if (!has_addr_opt) {
			/* sockargs() call must be after above copyin() calls */
			error = getsockaddr(&nam, (caddr_t)args.addr,
			    args.addrlen);
			if (error) {
				goto out;
			}
		}
	} else if (has_addr_opt == 0) {
		vfs_mount_error(mp, "No server address");
		error = EINVAL;
		goto out;
	}
	error = mountnfs(&args, mp, nam, args.hostname, &vp,
	    curthread->td_ucred, nametimeo, negnametimeo);
out:
	if (!error) {
		MNT_ILOCK(mp);
		mp->mnt_kern_flag |= MNTK_LOOKUP_SHARED;
		MNT_IUNLOCK(mp);
	}
	return (error);
}


/*
 * VFS Operations.
 *
 * mount system call
 * It seems a bit dumb to copyinstr() the host and path here and then
 * bcopy() them in mountnfs(), but I wanted to detect errors before
 * doing the sockargs() call because sockargs() allocates an mbuf and
 * an error after that means that I have to release the mbuf.
 */
/* ARGSUSED */
static int
nfs_cmount(struct mntarg *ma, void *data, uint64_t flags)
{
	int error;
	struct nfs_args args;

	error = copyin(data, &args, sizeof (struct nfs_args));
	if (error)
		return error;

	ma = mount_arg(ma, "nfs_args", &args, sizeof args);

	error = kernel_mount(ma, flags);
	return (error);
}
Esempio n. 18
0
/*
 * Unmount the filesystem described by mp.
 */
static int
msdosfs_unmount(struct mount *mp, int mntflags)
{
	struct msdosfsmount *pmp;
	int error, flags;

	error = flags = 0;
	pmp = VFSTOMSDOSFS(mp);
	if ((pmp->pm_flags & MSDOSFSMNT_RONLY) == 0)
		error = msdosfs_sync(mp, MNT_WAIT);
	if ((mntflags & MNT_FORCE) != 0)
		flags |= FORCECLOSE;
	else if (error != 0)
		return (error);
	error = vflush(mp, 0, flags, curthread);
	if (error != 0 && error != ENXIO)
		return (error);
	if ((pmp->pm_flags & MSDOSFSMNT_RONLY) == 0) {
		error = markvoldirty(pmp, 0);
		if (error && error != ENXIO) {
			(void)markvoldirty(pmp, 1);
			return (error);
		}
	}
	if (pmp->pm_flags & MSDOSFSMNT_KICONV && msdosfs_iconv) {
		if (pmp->pm_w2u)
			msdosfs_iconv->close(pmp->pm_w2u);
		if (pmp->pm_u2w)
			msdosfs_iconv->close(pmp->pm_u2w);
		if (pmp->pm_d2u)
			msdosfs_iconv->close(pmp->pm_d2u);
		if (pmp->pm_u2d)
			msdosfs_iconv->close(pmp->pm_u2d);
	}

#ifdef MSDOSFS_DEBUG
	{
		struct vnode *vp = pmp->pm_devvp;
		struct bufobj *bo;

		bo = &vp->v_bufobj;
		BO_LOCK(bo);
		VI_LOCK(vp);
		vn_printf(vp,
		    "msdosfs_umount(): just before calling VOP_CLOSE()\n");
		printf("freef %p, freeb %p, mount %p\n",
		    TAILQ_NEXT(vp, v_actfreelist), vp->v_actfreelist.tqe_prev,
		    vp->v_mount);
		printf("cleanblkhd %p, dirtyblkhd %p, numoutput %ld, type %d\n",
		    TAILQ_FIRST(&vp->v_bufobj.bo_clean.bv_hd),
		    TAILQ_FIRST(&vp->v_bufobj.bo_dirty.bv_hd),
		    vp->v_bufobj.bo_numoutput, vp->v_type);
		VI_UNLOCK(vp);
		BO_UNLOCK(bo);
	}
#endif
	DROP_GIANT();
	if (pmp->pm_devvp->v_type == VCHR && pmp->pm_devvp->v_rdev != NULL)
		pmp->pm_devvp->v_rdev->si_mountpt = NULL;
	g_topology_lock();
	g_vfs_close(pmp->pm_cp);
	g_topology_unlock();
	PICKUP_GIANT();
	vrele(pmp->pm_devvp);
	dev_rel(pmp->pm_dev);
	free(pmp->pm_inusemap, M_MSDOSFSFAT);
	if (pmp->pm_flags & MSDOSFS_LARGEFS)
		msdosfs_fileno_free(mp);
	lockdestroy(&pmp->pm_fatlock);
	free(pmp, M_MSDOSFSMNT);
	mp->mnt_data = NULL;
	MNT_ILOCK(mp);
	mp->mnt_flag &= ~MNT_LOCAL;
	MNT_IUNLOCK(mp);
	return (error);
}
Esempio n. 19
0
static int
udf_mount(struct mount *mp)
{
	struct vnode *devvp;	/* vnode of the mount device */
	struct thread *td;
	struct udf_mnt *imp = NULL;
	struct vfsoptlist *opts;
	char *fspec, *cs_disk, *cs_local;
	int error, len, *udf_flags;
	struct nameidata nd, *ndp = &nd;

	td = curthread;
	opts = mp->mnt_optnew;

	/*
	 * Unconditionally mount as read-only.
	 */
	MNT_ILOCK(mp);
	mp->mnt_flag |= MNT_RDONLY;
	MNT_IUNLOCK(mp);

	/*
	 * No root filesystem support.  Probably not a big deal, since the
	 * bootloader doesn't understand UDF.
	 */
	if (mp->mnt_flag & MNT_ROOTFS)
		return (ENOTSUP);

	fspec = NULL;
	error = vfs_getopt(opts, "from", (void **)&fspec, &len);
	if (!error && fspec[len - 1] != '\0')
		return (EINVAL);

	if (mp->mnt_flag & MNT_UPDATE) {
		return (0);
	}

	/* Check that the mount device exists */
	if (fspec == NULL)
		return (EINVAL);
	NDINIT(ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, fspec, td);
	if ((error = namei(ndp)))
		return (error);
	NDFREE(ndp, NDF_ONLY_PNBUF);
	devvp = ndp->ni_vp;

	if (vn_isdisk(devvp, &error) == 0) {
		vput(devvp);
		return (error);
	}

	/* Check the access rights on the mount device */
	error = VOP_ACCESS(devvp, VREAD, td->td_ucred, td);
	if (error)
		error = priv_check(td, PRIV_VFS_MOUNT_PERM);
	if (error) {
		vput(devvp);
		return (error);
	}

	if ((error = udf_mountfs(devvp, mp))) {
		vrele(devvp);
		return (error);
	}

	imp = VFSTOUDFFS(mp);

	udf_flags = NULL;
	error = vfs_getopt(opts, "flags", (void **)&udf_flags, &len);
	if (error || len != sizeof(int))
		return (EINVAL);
	imp->im_flags = *udf_flags;

	if (imp->im_flags & UDFMNT_KICONV && udf_iconv) {
		cs_disk = NULL;
		error = vfs_getopt(opts, "cs_disk", (void **)&cs_disk, &len);
		if (!error && cs_disk[len - 1] != '\0')
			return (EINVAL);
		cs_local = NULL;
		error = vfs_getopt(opts, "cs_local", (void **)&cs_local, &len);
		if (!error && cs_local[len - 1] != '\0')
			return (EINVAL);
		udf_iconv->open(cs_local, cs_disk, &imp->im_d2l);
#if 0
		udf_iconv->open(cs_disk, cs_local, &imp->im_l2d);
#endif
	}

	vfs_mountedfrom(mp, fspec);
	return 0;
};
Esempio n. 20
0
/*
 * mp - path - addr in user space of mount point (ie /usr or whatever)
 * data - addr in user space of mount params including the name of the block
 * special file to treat as a filesystem.
 */
static int
msdosfs_mount(struct mount *mp)
{
	struct vnode *devvp;	  /* vnode for blk device to mount */
	struct thread *td;
	/* msdosfs specific mount control block */
	struct msdosfsmount *pmp = NULL;
	struct nameidata ndp;
	int error, flags;
	accmode_t accmode;
	char *from;

	td = curthread;
	if (vfs_filteropt(mp->mnt_optnew, msdosfs_opts))
		return (EINVAL);

	/*
	 * If updating, check whether changing from read-only to
	 * read/write; if there is no device name, that's all we do.
	 */
	if (mp->mnt_flag & MNT_UPDATE) {
		pmp = VFSTOMSDOSFS(mp);
		if (vfs_flagopt(mp->mnt_optnew, "export", NULL, 0)) {
			/*
			 * Forbid export requests if filesystem has
			 * MSDOSFS_LARGEFS flag set.
			 */
			if ((pmp->pm_flags & MSDOSFS_LARGEFS) != 0) {
				vfs_mount_error(mp,
				    "MSDOSFS_LARGEFS flag set, cannot export");
				return (EOPNOTSUPP);
			}
		}
		if (!(pmp->pm_flags & MSDOSFSMNT_RONLY) &&
		    vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0)) {
			error = VFS_SYNC(mp, MNT_WAIT);
			if (error)
				return (error);
			flags = WRITECLOSE;
			if (mp->mnt_flag & MNT_FORCE)
				flags |= FORCECLOSE;
			error = vflush(mp, 0, flags, td);
			if (error)
				return (error);

			/*
			 * Now the volume is clean.  Mark it so while the
			 * device is still rw.
			 */
			error = markvoldirty(pmp, 0);
			if (error) {
				(void)markvoldirty(pmp, 1);
				return (error);
			}

			/* Downgrade the device from rw to ro. */
			DROP_GIANT();
			g_topology_lock();
			error = g_access(pmp->pm_cp, 0, -1, 0);
			g_topology_unlock();
			PICKUP_GIANT();
			if (error) {
				(void)markvoldirty(pmp, 1);
				return (error);
			}

			/*
			 * Backing out after an error was painful in the
			 * above.  Now we are committed to succeeding.
			 */
			pmp->pm_fmod = 0;
			pmp->pm_flags |= MSDOSFSMNT_RONLY;
			MNT_ILOCK(mp);
			mp->mnt_flag |= MNT_RDONLY;
			MNT_IUNLOCK(mp);
		} else if ((pmp->pm_flags & MSDOSFSMNT_RONLY) &&
		    !vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0)) {
			/*
			 * If upgrade to read-write by non-root, then verify
			 * that user has necessary permissions on the device.
			 */
			devvp = pmp->pm_devvp;
			vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
			error = VOP_ACCESS(devvp, VREAD | VWRITE,
			    td->td_ucred, td);
			if (error)
				error = priv_check(td, PRIV_VFS_MOUNT_PERM);
			if (error) {
				VOP_UNLOCK(devvp, 0);
				return (error);
			}
			VOP_UNLOCK(devvp, 0);
			DROP_GIANT();
			g_topology_lock();
			error = g_access(pmp->pm_cp, 0, 1, 0);
			g_topology_unlock();
			PICKUP_GIANT();
			if (error)
				return (error);

			pmp->pm_fmod = 1;
			pmp->pm_flags &= ~MSDOSFSMNT_RONLY;
			MNT_ILOCK(mp);
			mp->mnt_flag &= ~MNT_RDONLY;
			MNT_IUNLOCK(mp);

			/* Now that the volume is modifiable, mark it dirty. */
			error = markvoldirty(pmp, 1);
			if (error)
				return (error); 
		}
	}
	/*
	 * Not an update, or updating the name: look up the name
	 * and verify that it refers to a sensible disk device.
	 */
	if (vfs_getopt(mp->mnt_optnew, "from", (void **)&from, NULL))
		return (EINVAL);
	NDINIT(&ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, from, td);
	error = namei(&ndp);
	if (error)
		return (error);
	devvp = ndp.ni_vp;
	NDFREE(&ndp, NDF_ONLY_PNBUF);

	if (!vn_isdisk(devvp, &error)) {
		vput(devvp);
		return (error);
	}
	/*
	 * If mount by non-root, then verify that user has necessary
	 * permissions on the device.
	 */
	accmode = VREAD;
	if ((mp->mnt_flag & MNT_RDONLY) == 0)
		accmode |= VWRITE;
	error = VOP_ACCESS(devvp, accmode, td->td_ucred, td);
	if (error)
		error = priv_check(td, PRIV_VFS_MOUNT_PERM);
	if (error) {
		vput(devvp);
		return (error);
	}
	if ((mp->mnt_flag & MNT_UPDATE) == 0) {
		error = mountmsdosfs(devvp, mp);
#ifdef MSDOSFS_DEBUG		/* only needed for the printf below */
		pmp = VFSTOMSDOSFS(mp);
#endif
	} else {
		vput(devvp);
		if (devvp != pmp->pm_devvp)
			return (EINVAL);	/* XXX needs translation */
	}
	if (error) {
		vrele(devvp);
		return (error);
	}

	error = update_mp(mp, td);
	if (error) {
		if ((mp->mnt_flag & MNT_UPDATE) == 0)
			msdosfs_unmount(mp, MNT_FORCE);
		return error;
	}

	if (devvp->v_type == VCHR && devvp->v_rdev != NULL)
		devvp->v_rdev->si_mountpt = mp;
	vfs_mountedfrom(mp, from);
#ifdef MSDOSFS_DEBUG
	printf("msdosfs_mount(): mp %p, pmp %p, inusemap %p\n", mp, pmp, pmp->pm_inusemap);
#endif
	return (0);
}
Esempio n. 21
0
static int
mountmsdosfs(struct vnode *devvp, struct mount *mp)
{
	struct msdosfsmount *pmp;
	struct buf *bp;
	struct cdev *dev;
	union bootsector *bsp;
	struct byte_bpb33 *b33;
	struct byte_bpb50 *b50;
	struct byte_bpb710 *b710;
	u_int8_t SecPerClust;
	u_long clusters;
	int ronly, error;
	struct g_consumer *cp;
	struct bufobj *bo;

	bp = NULL;		/* This and pmp both used in error_exit. */
	pmp = NULL;
	ronly = (mp->mnt_flag & MNT_RDONLY) != 0;

	dev = devvp->v_rdev;
	dev_ref(dev);
	DROP_GIANT();
	g_topology_lock();
	error = g_vfs_open(devvp, &cp, "msdosfs", ronly ? 0 : 1);
	g_topology_unlock();
	PICKUP_GIANT();
	VOP_UNLOCK(devvp, 0);
	if (error)
		goto error_exit;

	bo = &devvp->v_bufobj;

	/*
	 * Read the boot sector of the filesystem, and then check the
	 * boot signature.  If not a dos boot sector then error out.
	 *
	 * NOTE: 8192 is a magic size that works for ffs.
	 */
	error = bread(devvp, 0, 8192, NOCRED, &bp);
	if (error)
		goto error_exit;
	bp->b_flags |= B_AGE;
	bsp = (union bootsector *)bp->b_data;
	b33 = (struct byte_bpb33 *)bsp->bs33.bsBPB;
	b50 = (struct byte_bpb50 *)bsp->bs50.bsBPB;
	b710 = (struct byte_bpb710 *)bsp->bs710.bsBPB;

#ifndef MSDOSFS_NOCHECKSIG
	if (bsp->bs50.bsBootSectSig0 != BOOTSIG0
	    || bsp->bs50.bsBootSectSig1 != BOOTSIG1) {
		error = EINVAL;
		goto error_exit;
	}
#endif

	pmp = malloc(sizeof *pmp, M_MSDOSFSMNT, M_WAITOK | M_ZERO);
	pmp->pm_mountp = mp;
	pmp->pm_cp = cp;
	pmp->pm_bo = bo;

	lockinit(&pmp->pm_fatlock, 0, msdosfs_lock_msg, 0, 0);

	/*
	 * Initialize ownerships and permissions, since nothing else will
	 * initialize them iff we are mounting root.
	 */
	pmp->pm_uid = UID_ROOT;
	pmp->pm_gid = GID_WHEEL;
	pmp->pm_mask = pmp->pm_dirmask = S_IXUSR | S_IXGRP | S_IXOTH |
	    S_IRUSR | S_IRGRP | S_IROTH | S_IWUSR;

	/*
	 * Experimental support for large MS-DOS filesystems.
	 * WARNING: This uses at least 32 bytes of kernel memory (which is not
	 * reclaimed until the FS is unmounted) for each file on disk to map
	 * between the 32-bit inode numbers used by VFS and the 64-bit
	 * pseudo-inode numbers used internally by msdosfs. This is only
	 * safe to use in certain controlled situations (e.g. read-only FS
	 * with less than 1 million files).
	 * Since the mappings do not persist across unmounts (or reboots), these
	 * filesystems are not suitable for exporting through NFS, or any other
	 * application that requires fixed inode numbers.
	 */
	vfs_flagopt(mp->mnt_optnew, "large", &pmp->pm_flags, MSDOSFS_LARGEFS);

	/*
	 * Compute several useful quantities from the bpb in the
	 * bootsector.  Copy in the dos 5 variant of the bpb then fix up
	 * the fields that are different between dos 5 and dos 3.3.
	 */
	SecPerClust = b50->bpbSecPerClust;
	pmp->pm_BytesPerSec = getushort(b50->bpbBytesPerSec);
	if (pmp->pm_BytesPerSec < DEV_BSIZE) {
		error = EINVAL;
		goto error_exit;
	}
	pmp->pm_ResSectors = getushort(b50->bpbResSectors);
	pmp->pm_FATs = b50->bpbFATs;
	pmp->pm_RootDirEnts = getushort(b50->bpbRootDirEnts);
	pmp->pm_Sectors = getushort(b50->bpbSectors);
	pmp->pm_FATsecs = getushort(b50->bpbFATsecs);
	pmp->pm_SecPerTrack = getushort(b50->bpbSecPerTrack);
	pmp->pm_Heads = getushort(b50->bpbHeads);
	pmp->pm_Media = b50->bpbMedia;

	/* calculate the ratio of sector size to DEV_BSIZE */
	pmp->pm_BlkPerSec = pmp->pm_BytesPerSec / DEV_BSIZE;

	/*
	 * We don't check pm_Heads nor pm_SecPerTrack, because
	 * these may not be set for EFI file systems. We don't
	 * use these anyway, so we're unaffected if they are
	 * invalid.
	 */
	if (!pmp->pm_BytesPerSec || !SecPerClust) {
		error = EINVAL;
		goto error_exit;
	}

	if (pmp->pm_Sectors == 0) {
		pmp->pm_HiddenSects = getulong(b50->bpbHiddenSecs);
		pmp->pm_HugeSectors = getulong(b50->bpbHugeSectors);
	} else {
		pmp->pm_HiddenSects = getushort(b33->bpbHiddenSecs);
		pmp->pm_HugeSectors = pmp->pm_Sectors;
	}
	if (!(pmp->pm_flags & MSDOSFS_LARGEFS)) {
		if (pmp->pm_HugeSectors > 0xffffffff /
		    (pmp->pm_BytesPerSec / sizeof(struct direntry)) + 1) {
			/*
			 * We cannot deal currently with this size of disk
			 * due to fileid limitations (see msdosfs_getattr and
			 * msdosfs_readdir)
			 */
			error = EINVAL;
			vfs_mount_error(mp,
			    "Disk too big, try '-o large' mount option");
			goto error_exit;
		}
	}

	if (pmp->pm_RootDirEnts == 0) {
		if (pmp->pm_FATsecs
		    || getushort(b710->bpbFSVers)) {
			error = EINVAL;
#ifdef MSDOSFS_DEBUG
			printf("mountmsdosfs(): bad FAT32 filesystem\n");
#endif
			goto error_exit;
		}
		pmp->pm_fatmask = FAT32_MASK;
		pmp->pm_fatmult = 4;
		pmp->pm_fatdiv = 1;
		pmp->pm_FATsecs = getulong(b710->bpbBigFATsecs);
		if (getushort(b710->bpbExtFlags) & FATMIRROR)
			pmp->pm_curfat = getushort(b710->bpbExtFlags) & FATNUM;
		else
			pmp->pm_flags |= MSDOSFS_FATMIRROR;
	} else
		pmp->pm_flags |= MSDOSFS_FATMIRROR;

	/*
	 * Check a few values (could do some more):
	 * - logical sector size: power of 2, >= block size
	 * - sectors per cluster: power of 2, >= 1
	 * - number of sectors:   >= 1, <= size of partition
	 * - number of FAT sectors: >= 1
	 */
	if ( (SecPerClust == 0)
	  || (SecPerClust & (SecPerClust - 1))
	  || (pmp->pm_BytesPerSec < DEV_BSIZE)
	  || (pmp->pm_BytesPerSec & (pmp->pm_BytesPerSec - 1))
	  || (pmp->pm_HugeSectors == 0)
	  || (pmp->pm_FATsecs == 0)
	  || (SecPerClust * pmp->pm_BlkPerSec > MAXBSIZE / DEV_BSIZE)
	) {
		error = EINVAL;
		goto error_exit;
	}

	pmp->pm_HugeSectors *= pmp->pm_BlkPerSec;
	pmp->pm_HiddenSects *= pmp->pm_BlkPerSec;	/* XXX not used? */
	pmp->pm_FATsecs     *= pmp->pm_BlkPerSec;
	SecPerClust         *= pmp->pm_BlkPerSec;

	pmp->pm_fatblk = pmp->pm_ResSectors * pmp->pm_BlkPerSec;

	if (FAT32(pmp)) {
		pmp->pm_rootdirblk = getulong(b710->bpbRootClust);
		pmp->pm_firstcluster = pmp->pm_fatblk
			+ (pmp->pm_FATs * pmp->pm_FATsecs);
		pmp->pm_fsinfo = getushort(b710->bpbFSInfo) * pmp->pm_BlkPerSec;
	} else {
		pmp->pm_rootdirblk = pmp->pm_fatblk +
			(pmp->pm_FATs * pmp->pm_FATsecs);
		pmp->pm_rootdirsize = howmany(pmp->pm_RootDirEnts *
			sizeof(struct direntry), DEV_BSIZE); /* in blocks */
		pmp->pm_firstcluster = pmp->pm_rootdirblk + pmp->pm_rootdirsize;
	}

	pmp->pm_maxcluster = (pmp->pm_HugeSectors - pmp->pm_firstcluster) /
	    SecPerClust + 1;
	pmp->pm_fatsize = pmp->pm_FATsecs * DEV_BSIZE;	/* XXX not used? */

	if (pmp->pm_fatmask == 0) {
		if (pmp->pm_maxcluster
		    <= ((CLUST_RSRVD - CLUST_FIRST) & FAT12_MASK)) {
			/*
			 * This will usually be a floppy disk. This size makes
			 * sure that one fat entry will not be split across
			 * multiple blocks.
			 */
			pmp->pm_fatmask = FAT12_MASK;
			pmp->pm_fatmult = 3;
			pmp->pm_fatdiv = 2;
		} else {
			pmp->pm_fatmask = FAT16_MASK;
			pmp->pm_fatmult = 2;
			pmp->pm_fatdiv = 1;
		}
	}

	clusters = (pmp->pm_fatsize / pmp->pm_fatmult) * pmp->pm_fatdiv;
	if (pmp->pm_maxcluster >= clusters) {
#ifdef MSDOSFS_DEBUG
		printf("Warning: number of clusters (%ld) exceeds FAT "
		    "capacity (%ld)\n", pmp->pm_maxcluster + 1, clusters);
#endif
		pmp->pm_maxcluster = clusters - 1;
	}

	if (FAT12(pmp))
		pmp->pm_fatblocksize = 3 * 512;
	else
		pmp->pm_fatblocksize = PAGE_SIZE;
	pmp->pm_fatblocksize = roundup(pmp->pm_fatblocksize,
	    pmp->pm_BytesPerSec);
	pmp->pm_fatblocksec = pmp->pm_fatblocksize / DEV_BSIZE;
	pmp->pm_bnshift = ffs(DEV_BSIZE) - 1;

	/*
	 * Compute mask and shift value for isolating cluster relative byte
	 * offsets and cluster numbers from a file offset.
	 */
	pmp->pm_bpcluster = SecPerClust * DEV_BSIZE;
	pmp->pm_crbomask = pmp->pm_bpcluster - 1;
	pmp->pm_cnshift = ffs(pmp->pm_bpcluster) - 1;

	/*
	 * Check for valid cluster size
	 * must be a power of 2
	 */
	if (pmp->pm_bpcluster ^ (1 << pmp->pm_cnshift)) {
		error = EINVAL;
		goto error_exit;
	}

	/*
	 * Release the bootsector buffer.
	 */
	brelse(bp);
	bp = NULL;

	/*
	 * Check the fsinfo sector if we have one.  Silently fix up our
	 * in-core copy of fp->fsinxtfree if it is unknown (0xffffffff)
	 * or too large.  Ignore fp->fsinfree for now, since we need to
	 * read the entire FAT anyway to fill the inuse map.
	 */
	if (pmp->pm_fsinfo) {
		struct fsinfo *fp;

		if ((error = bread(devvp, pmp->pm_fsinfo, pmp->pm_BytesPerSec,
		    NOCRED, &bp)) != 0)
			goto error_exit;
		fp = (struct fsinfo *)bp->b_data;
		if (!bcmp(fp->fsisig1, "RRaA", 4)
		    && !bcmp(fp->fsisig2, "rrAa", 4)
		    && !bcmp(fp->fsisig3, "\0\0\125\252", 4)) {
			pmp->pm_nxtfree = getulong(fp->fsinxtfree);
			if (pmp->pm_nxtfree > pmp->pm_maxcluster)
				pmp->pm_nxtfree = CLUST_FIRST;
		} else
			pmp->pm_fsinfo = 0;
		brelse(bp);
		bp = NULL;
	}

	/*
	 * Finish initializing pmp->pm_nxtfree (just in case the first few
	 * sectors aren't properly reserved in the FAT).  This completes
	 * the fixup for fp->fsinxtfree, and fixes up the zero-initialized
	 * value if there is no fsinfo.  We will use pmp->pm_nxtfree
	 * internally even if there is no fsinfo.
	 */
	if (pmp->pm_nxtfree < CLUST_FIRST)
		pmp->pm_nxtfree = CLUST_FIRST;

	/*
	 * Allocate memory for the bitmap of allocated clusters, and then
	 * fill it in.
	 */
	pmp->pm_inusemap = malloc(howmany(pmp->pm_maxcluster + 1, N_INUSEBITS)
				  * sizeof(*pmp->pm_inusemap),
				  M_MSDOSFSFAT, M_WAITOK);

	/*
	 * fillinusemap() needs pm_devvp.
	 */
	pmp->pm_devvp = devvp;
	pmp->pm_dev = dev;

	/*
	 * Have the inuse map filled in.
	 */
	MSDOSFS_LOCK_MP(pmp);
	error = fillinusemap(pmp);
	MSDOSFS_UNLOCK_MP(pmp);
	if (error != 0)
		goto error_exit;

	/*
	 * If they want fat updates to be synchronous then let them suffer
	 * the performance degradation in exchange for the on disk copy of
	 * the fat being correct just about all the time.  I suppose this
	 * would be a good thing to turn on if the kernel is still flakey.
	 */
	if (mp->mnt_flag & MNT_SYNCHRONOUS)
		pmp->pm_flags |= MSDOSFSMNT_WAITONFAT;

	/*
	 * Finish up.
	 */
	if (ronly)
		pmp->pm_flags |= MSDOSFSMNT_RONLY;
	else {
		if ((error = markvoldirty(pmp, 1)) != 0) {
			(void)markvoldirty(pmp, 0);
			goto error_exit;
		}
		pmp->pm_fmod = 1;
	}
	mp->mnt_data =  pmp;
	mp->mnt_stat.f_fsid.val[0] = dev2udev(dev);
	mp->mnt_stat.f_fsid.val[1] = mp->mnt_vfc->vfc_typenum;
	MNT_ILOCK(mp);
	mp->mnt_flag |= MNT_LOCAL;
	mp->mnt_kern_flag |= MNTK_USES_BCACHE;
	MNT_IUNLOCK(mp);

	if (pmp->pm_flags & MSDOSFS_LARGEFS)
		msdosfs_fileno_init(mp);

	return 0;

error_exit:
	if (bp)
		brelse(bp);
	if (cp != NULL) {
		DROP_GIANT();
		g_topology_lock();
		g_vfs_close(cp);
		g_topology_unlock();
		PICKUP_GIANT();
	}
	if (pmp) {
		lockdestroy(&pmp->pm_fatlock);
		if (pmp->pm_inusemap)
			free(pmp->pm_inusemap, M_MSDOSFSFAT);
		free(pmp, M_MSDOSFSMNT);
		mp->mnt_data = NULL;
	}
	dev_rel(dev);
	return (error);
}
Esempio n. 22
0
/*
 * Unmount system call
 */
static int
reiserfs_unmount(struct mount *mp, int mntflags)
{
	int error, flags = 0;
	struct reiserfs_mount *rmp;
	struct reiserfs_sb_info *sbi;

	reiserfs_log(LOG_DEBUG, "get private data\n");
	rmp = VFSTOREISERFS(mp);
	sbi = rmp->rm_reiserfs;

	/* Flangs handling */
	reiserfs_log(LOG_DEBUG, "handle mntflags\n");
	if (mntflags & MNT_FORCE)
		flags |= FORCECLOSE;

	/* Flush files -> vflush */
	reiserfs_log(LOG_DEBUG, "flush vnodes\n");
	if ((error = vflush(mp, 0, flags, curthread)))
		return (error);

	/* XXX Super block update */

	if (sbi) {
		if (SB_AP_BITMAP(sbi)) {
			int i;
			reiserfs_log(LOG_DEBUG,
			    "release bitmap buffers (total: %d)\n",
			    SB_BMAP_NR(sbi));
			for (i = 0; i < SB_BMAP_NR(sbi); i++) {
				if (SB_AP_BITMAP(sbi)[i].bp_data) {
					free(SB_AP_BITMAP(sbi)[i].bp_data,
					    M_REISERFSMNT);
					SB_AP_BITMAP(sbi)[i].bp_data = NULL;
				}
			}

			reiserfs_log(LOG_DEBUG, "free bitmaps structure\n");
			free(SB_AP_BITMAP(sbi), M_REISERFSMNT);
			SB_AP_BITMAP(sbi) = NULL;
		}

		if (sbi->s_rs) {
			reiserfs_log(LOG_DEBUG, "free super block data\n");
			free(sbi->s_rs, M_REISERFSMNT);
			sbi->s_rs = NULL;
		}
	}

	reiserfs_log(LOG_DEBUG, "close device\n");
#if defined(si_mountpoint)
	rmp->rm_devvp->v_rdev->si_mountpoint = NULL;
#endif

	DROP_GIANT();
	g_topology_lock();
	g_vfs_close(rmp->rm_cp);
	g_topology_unlock();
	PICKUP_GIANT();
	vrele(rmp->rm_devvp);
	dev_rel(rmp->rm_dev);

	if (sbi) {
		reiserfs_log(LOG_DEBUG, "free sbi\n");
		free(sbi, M_REISERFSMNT);
		sbi = rmp->rm_reiserfs = NULL;
	}
	if (rmp) {
		reiserfs_log(LOG_DEBUG, "free rmp\n");
		free(rmp, M_REISERFSMNT);
		rmp = NULL;
	}

	mp->mnt_data  = 0;
	MNT_ILOCK(mp);
	mp->mnt_flag &= ~MNT_LOCAL;
	MNT_IUNLOCK(mp);

	reiserfs_log(LOG_DEBUG, "done\n");
	return (error);
}
Esempio n. 23
0
static int
fuse_vfsop_unmount(struct mount *mp, int mntflags)
{
	int err = 0;
	int flags = 0;

	struct cdev *fdev;
	struct fuse_data *data;
	struct fuse_dispatcher fdi;
	struct thread *td = curthread;

	fuse_trace_printf_vfsop();

	if (mntflags & MNT_FORCE) {
		flags |= FORCECLOSE;
	}
	data = fuse_get_mpdata(mp);
	if (!data) {
		panic("no private data for mount point?");
	}
	/* There is 1 extra root vnode reference (mp->mnt_data). */
	FUSE_LOCK();
	if (data->vroot != NULL) {
		struct vnode *vroot = data->vroot;

		data->vroot = NULL;
		FUSE_UNLOCK();
		vrele(vroot);
	} else
		FUSE_UNLOCK();
	err = vflush(mp, 0, flags, td);
	if (err) {
		debug_printf("vflush failed");
		return err;
	}
	if (fdata_get_dead(data)) {
		goto alreadydead;
	}
	fdisp_init(&fdi, 0);
	fdisp_make(&fdi, FUSE_DESTROY, mp, 0, td, NULL);

	err = fdisp_wait_answ(&fdi);
	fdisp_destroy(&fdi);

	fdata_set_dead(data);

alreadydead:
	FUSE_LOCK();
	data->mp = NULL;
	fdev = data->fdev;
	fdata_trydestroy(data);
	FUSE_UNLOCK();

	MNT_ILOCK(mp);
	mp->mnt_data = NULL;
	mp->mnt_flag &= ~MNT_LOCAL;
	MNT_IUNLOCK(mp);

	dev_rel(fdev);

	return 0;
}
Esempio n. 24
0
/*
 * Common code for mount and mountroot
 */ 
static int
reiserfs_mountfs(struct vnode *devvp, struct mount *mp, struct thread *td)
{
	int error, old_format = 0;
	struct reiserfs_mount *rmp;
	struct reiserfs_sb_info *sbi;
	struct reiserfs_super_block *rs;
	struct cdev *dev;

	struct g_consumer *cp;
	struct bufobj *bo;

	//ronly = (mp->mnt_flag & MNT_RDONLY) != 0;

	dev = devvp->v_rdev;
	dev_ref(dev);
	DROP_GIANT();
	g_topology_lock();
	error = g_vfs_open(devvp, &cp, "reiserfs", /* read-only */ 0);
	g_topology_unlock();
	PICKUP_GIANT();
	VOP_UNLOCK(devvp, 0);
	if (error) {
		dev_rel(dev);
		return (error);
	}

	bo = &devvp->v_bufobj;
	bo->bo_private = cp;
	bo->bo_ops = g_vfs_bufops;

	if (devvp->v_rdev->si_iosize_max != 0)
		mp->mnt_iosize_max = devvp->v_rdev->si_iosize_max;
	if (mp->mnt_iosize_max > MAXPHYS)
		mp->mnt_iosize_max = MAXPHYS;

	rmp = NULL;
	sbi = NULL;

	/* rmp contains any information about this specific mount */
	rmp = malloc(sizeof *rmp, M_REISERFSMNT, M_WAITOK | M_ZERO);
	if (!rmp) {
		error = (ENOMEM);
		goto out;
	}
	sbi = malloc(sizeof *sbi, M_REISERFSMNT, M_WAITOK | M_ZERO);
	if (!sbi) {
		error = (ENOMEM);
		goto out;
	}
	rmp->rm_reiserfs = sbi;
	rmp->rm_mountp   = mp;
	rmp->rm_devvp    = devvp;
	rmp->rm_dev      = dev;
	rmp->rm_bo       = &devvp->v_bufobj;
	rmp->rm_cp       = cp;

	/* Set default values for options: non-aggressive tails */
	REISERFS_SB(sbi)->s_mount_opt = (1 << REISERFS_SMALLTAIL);
	REISERFS_SB(sbi)->s_rd_only   = 1;
	REISERFS_SB(sbi)->s_devvp     = devvp;

	/* Read the super block */
	if ((error = read_super_block(rmp, REISERFS_OLD_DISK_OFFSET)) == 0) {
		/* The read process succeeded, it's an old format */
		old_format = 1;
	} else if ((error = read_super_block(rmp, REISERFS_DISK_OFFSET)) != 0) {
		reiserfs_log(LOG_ERR, "can not find a ReiserFS filesystem\n");
		goto out;
	}

	rs = SB_DISK_SUPER_BLOCK(sbi);

	/*
	 * Let's do basic sanity check to verify that underlying device is
	 * not smaller than the filesystem. If the check fails then abort and
	 * scream, because bad stuff will happen otherwise.
	 */
#if 0
	if (s->s_bdev && s->s_bdev->bd_inode &&
	    i_size_read(s->s_bdev->bd_inode) <
	    sb_block_count(rs) * sb_blocksize(rs)) {
		reiserfs_log(LOG_ERR,
		    "reiserfs: filesystem cannot be mounted because it is "
		    "bigger than the device.\n");
		reiserfs_log(LOG_ERR, "reiserfs: you may need to run fsck "
		    "rr may be you forgot to reboot after fdisk when it "
		    "told you to.\n");
		goto out;
	}
#endif

	/*
	 * XXX This is from the original Linux code, but why affecting 2 values
	 * to the same variable?
	 */
	sbi->s_mount_state = SB_REISERFS_STATE(sbi);
	sbi->s_mount_state = REISERFS_VALID_FS;

	if ((error = (old_format ?
	    read_old_bitmaps(rmp) : read_bitmaps(rmp)))) {
		reiserfs_log(LOG_ERR, "unable to read bitmap\n");
		goto out;
	}

	/* Make data=ordered the default */
	if (!reiserfs_data_log(sbi) && !reiserfs_data_ordered(sbi) &&
	    !reiserfs_data_writeback(sbi)) {
		REISERFS_SB(sbi)->s_mount_opt |= (1 << REISERFS_DATA_ORDERED);
	}

	if (reiserfs_data_log(sbi)) {
		reiserfs_log(LOG_INFO, "using journaled data mode\n");
	} else if (reiserfs_data_ordered(sbi)) {
		reiserfs_log(LOG_INFO, "using ordered data mode\n");
	} else {
		reiserfs_log(LOG_INFO, "using writeback data mode\n");
	}

	/* TODO Not yet supported */
#if 0
	if(journal_init(sbi, jdev_name, old_format, commit_max_age)) {
		reiserfs_log(LOG_ERR, "unable to initialize journal space\n");
		goto out;
	} else {
		jinit_done = 1 ; /* once this is set, journal_release must
				    be called if we error out of the mount */
	}

	if (reread_meta_blocks(sbi)) {
		reiserfs_log(LOG_ERR,
		    "unable to reread meta blocks after journal init\n");
		goto out;
	}
#endif

	/* Define and initialize hash function */
	sbi->s_hash_function = hash_function(rmp);

	if (sbi->s_hash_function == NULL) {
		reiserfs_log(LOG_ERR, "couldn't determined hash function\n");
		error = (EINVAL);
		goto out;
	}

	if (is_reiserfs_3_5(rs) ||
	    (is_reiserfs_jr(rs) && SB_VERSION(sbi) == REISERFS_VERSION_1))
		bit_set(&(sbi->s_properties), REISERFS_3_5);
	else
		bit_set(&(sbi->s_properties), REISERFS_3_6);

	mp->mnt_data = rmp;
	mp->mnt_stat.f_fsid.val[0] = dev2udev(dev);
	mp->mnt_stat.f_fsid.val[1] = mp->mnt_vfc->vfc_typenum;
	MNT_ILOCK(mp);
	mp->mnt_flag |= MNT_LOCAL;
	mp->mnt_kern_flag |= MNTK_MPSAFE;
	MNT_IUNLOCK(mp);
#if defined(si_mountpoint)
	devvp->v_rdev->si_mountpoint = mp;
#endif

	return (0);

out:
	reiserfs_log(LOG_INFO, "*** error during mount ***\n");
	if (sbi) {
		if (SB_AP_BITMAP(sbi)) {
			int i;
			for (i = 0; i < SB_BMAP_NR(sbi); i++) {
				if (!SB_AP_BITMAP(sbi)[i].bp_data)
					break;
				free(SB_AP_BITMAP(sbi)[i].bp_data,
				    M_REISERFSMNT);
			}
			free(SB_AP_BITMAP(sbi), M_REISERFSMNT);
		}

		if (sbi->s_rs) {
			free(sbi->s_rs, M_REISERFSMNT);
			sbi->s_rs = NULL;
		}
	}

	if (cp != NULL) {
		DROP_GIANT();
		g_topology_lock();
		g_vfs_close(cp);
		g_topology_unlock();
		PICKUP_GIANT();
	}

	if (sbi)
		free(sbi, M_REISERFSMNT);
	if (rmp)
		free(rmp, M_REISERFSMNT);
	dev_rel(dev);
	return (error);
}
Esempio n. 25
0
/*
 * Common code for mount and mountroot.
 */
static int
ext2_mountfs(struct vnode *devvp, struct mount *mp)
{
	struct ext2mount *ump;
	struct buf *bp;
	struct m_ext2fs *fs;
	struct ext2fs *es;
	struct cdev *dev = devvp->v_rdev;
	struct g_consumer *cp;
	struct bufobj *bo;
	struct csum *sump;
	int error;
	int ronly;
	int i, size;
	int32_t *lp;

	ronly = vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0);
	/* XXX: use VOP_ACESS to check FS perms */
	DROP_GIANT();
	g_topology_lock();
	error = g_vfs_open(devvp, &cp, "ext2fs", ronly ? 0 : 1);
	g_topology_unlock();
	PICKUP_GIANT();
	VOP_UNLOCK(devvp, 0);
	if (error)
		return (error);

	/* XXX: should we check for some sectorsize or 512 instead? */
	if (((SBSIZE % cp->provider->sectorsize) != 0) ||
	    (SBSIZE < cp->provider->sectorsize)) {
		DROP_GIANT();
		g_topology_lock();
		g_vfs_close(cp);
		g_topology_unlock();
		PICKUP_GIANT();
		return (EINVAL);
	}

	bo = &devvp->v_bufobj;
	bo->bo_private = cp;
	bo->bo_ops = g_vfs_bufops;
	if (devvp->v_rdev->si_iosize_max != 0)
		mp->mnt_iosize_max = devvp->v_rdev->si_iosize_max;
	if (mp->mnt_iosize_max > MAXPHYS)
		mp->mnt_iosize_max = MAXPHYS;

	bp = NULL;
	ump = NULL;
	if ((error = bread(devvp, SBLOCK, SBSIZE, NOCRED, &bp)) != 0)
		goto out;
	es = (struct ext2fs *)bp->b_data;
	if (ext2_check_sb_compat(es, dev, ronly) != 0) {
		error = EINVAL;		/* XXX needs translation */
		goto out;
	}
	if ((es->e2fs_state & E2FS_ISCLEAN) == 0 ||
	    (es->e2fs_state & E2FS_ERRORS)) {
		if (ronly || (mp->mnt_flag & MNT_FORCE)) {
			printf(
"WARNING: Filesystem was not properly dismounted\n");
		} else {
			printf(
"WARNING: R/W mount denied.  Filesystem is not clean - run fsck\n");
			error = EPERM;
			goto out;
		}
	}
	ump = malloc(sizeof(*ump), M_EXT2MNT, M_WAITOK | M_ZERO);

	/*
	 * I don't know whether this is the right strategy. Note that
	 * we dynamically allocate both an ext2_sb_info and an ext2_super_block
	 * while Linux keeps the super block in a locked buffer.
	 */
	ump->um_e2fs = malloc(sizeof(struct m_ext2fs),
		M_EXT2MNT, M_WAITOK);
	ump->um_e2fs->e2fs = malloc(sizeof(struct ext2fs),
		M_EXT2MNT, M_WAITOK);
	mtx_init(EXT2_MTX(ump), "EXT2FS", "EXT2FS Lock", MTX_DEF);
	bcopy(es, ump->um_e2fs->e2fs, (u_int)sizeof(struct ext2fs));
	if ((error = compute_sb_data(devvp, ump->um_e2fs->e2fs, ump->um_e2fs)))
		goto out;

	/*
	 * Calculate the maximum contiguous blocks and size of cluster summary
	 * array.  In FFS this is done by newfs; however, the superblock 
	 * in ext2fs doesn't have these variables, so we can calculate 
	 * them here.
	 */
	ump->um_e2fs->e2fs_maxcontig = MAX(1, MAXPHYS / ump->um_e2fs->e2fs_bsize);
	if (ump->um_e2fs->e2fs_maxcontig > 0)
		ump->um_e2fs->e2fs_contigsumsize =
		    MIN(ump->um_e2fs->e2fs_maxcontig, EXT2_MAXCONTIG);
	else
		ump->um_e2fs->e2fs_contigsumsize = 0;
	if (ump->um_e2fs->e2fs_contigsumsize > 0) {
		size = ump->um_e2fs->e2fs_gcount * sizeof(int32_t);
		ump->um_e2fs->e2fs_maxcluster = malloc(size, M_EXT2MNT, M_WAITOK);
		size = ump->um_e2fs->e2fs_gcount * sizeof(struct csum);
		ump->um_e2fs->e2fs_clustersum = malloc(size, M_EXT2MNT, M_WAITOK);
		lp = ump->um_e2fs->e2fs_maxcluster;
		sump = ump->um_e2fs->e2fs_clustersum;
		for (i = 0; i < ump->um_e2fs->e2fs_gcount; i++, sump++) {
			*lp++ = ump->um_e2fs->e2fs_contigsumsize;
			sump->cs_init = 0;
			sump->cs_sum = malloc((ump->um_e2fs->e2fs_contigsumsize + 1) *
			    sizeof(int32_t), M_EXT2MNT, M_WAITOK | M_ZERO);
		}
	}

	brelse(bp);
	bp = NULL;
	fs = ump->um_e2fs;
	fs->e2fs_ronly = ronly;	/* ronly is set according to mnt_flags */

	/*
	 * If the fs is not mounted read-only, make sure the super block is
	 * always written back on a sync().
	 */
	fs->e2fs_wasvalid = fs->e2fs->e2fs_state & E2FS_ISCLEAN ? 1 : 0;
	if (ronly == 0) {
		fs->e2fs_fmod = 1;		/* mark it modified */
		fs->e2fs->e2fs_state &= ~E2FS_ISCLEAN;	/* set fs invalid */
	}
	mp->mnt_data = ump;
	mp->mnt_stat.f_fsid.val[0] = dev2udev(dev);
	mp->mnt_stat.f_fsid.val[1] = mp->mnt_vfc->vfc_typenum;
	mp->mnt_maxsymlinklen = EXT2_MAXSYMLINKLEN;
	MNT_ILOCK(mp);
	mp->mnt_flag |= MNT_LOCAL;
	MNT_IUNLOCK(mp);
	ump->um_mountp = mp;
	ump->um_dev = dev;
	ump->um_devvp = devvp;
	ump->um_bo = &devvp->v_bufobj;
	ump->um_cp = cp;

	/*
	 * Setting those two parameters allowed us to use
	 * ufs_bmap w/o changse!
	 */
	ump->um_nindir = EXT2_ADDR_PER_BLOCK(fs);
	ump->um_bptrtodb = fs->e2fs->e2fs_log_bsize + 1;
	ump->um_seqinc = EXT2_FRAGS_PER_BLOCK(fs);
	if (ronly == 0)
		ext2_sbupdate(ump, MNT_WAIT);
	/*
	 * Initialize filesystem stat information in mount struct.
	 */
	MNT_ILOCK(mp);
 	mp->mnt_kern_flag |= MNTK_MPSAFE | MNTK_LOOKUP_SHARED |
            MNTK_EXTENDED_SHARED;
	MNT_IUNLOCK(mp);
	return (0);
out:
	if (bp)
		brelse(bp);
	if (cp != NULL) {
		DROP_GIANT();
		g_topology_lock();
		g_vfs_close(cp);
		g_topology_unlock();
		PICKUP_GIANT();
	}
	if (ump) {
	  	mtx_destroy(EXT2_MTX(ump));
		free(ump->um_e2fs->e2fs_gd, M_EXT2MNT);
		free(ump->um_e2fs->e2fs_contigdirs, M_EXT2MNT);
		free(ump->um_e2fs->e2fs, M_EXT2MNT);
		free(ump->um_e2fs, M_EXT2MNT);
		free(ump, M_EXT2MNT);
		mp->mnt_data = NULL;
	}
	return (error);
}
Esempio n. 26
0
static int
ntfs_mount(struct mount *mp)
{
	int err = 0, error;
	struct vnode *devvp;
	struct nameidata ndp;
	struct thread *td;
	char *from;

	td = curthread;
	if (vfs_filteropt(mp->mnt_optnew, ntfs_opts))
		return (EINVAL);

	/* Force mount as read-only. */
	MNT_ILOCK(mp);
	mp->mnt_flag |= MNT_RDONLY;
	MNT_IUNLOCK(mp);

	from = vfs_getopts(mp->mnt_optnew, "from", &error);
	if (error)	
		return (error);

	/*
	 * If updating, check whether changing from read-only to
	 * read/write.
	 */
	if (mp->mnt_flag & MNT_UPDATE) {
		if (vfs_flagopt(mp->mnt_optnew, "export", NULL, 0)) {
			/* Process export requests in vfs_mount.c */
			return (0);
		} else {
			printf("ntfs_mount(): MNT_UPDATE not supported\n");
			return (EINVAL);
		}
	}

	/*
	 * Not an update, or updating the name: look up the name
	 * and verify that it refers to a sensible block device.
	 */
	NDINIT(&ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, from, td);
	err = namei(&ndp);
	if (err)
		return (err);
	NDFREE(&ndp, NDF_ONLY_PNBUF);
	devvp = ndp.ni_vp;

	if (!vn_isdisk(devvp, &err))  {
		vput(devvp);
		return (err);
	}

	/*
	 * If mount by non-root, then verify that user has necessary
	 * permissions on the device.
	 */
	err = VOP_ACCESS(devvp, VREAD, td->td_ucred, td);
	if (err)
		err = priv_check(td, PRIV_VFS_MOUNT_PERM);
	if (err) {
		vput(devvp);
		return (err);
	}


	/*
	 * Since this is a new mount, we want the names for the device and
	 * the mount point copied in.  If an error occurs, the mountpoint is
	 * discarded by the upper level code.  Note that vfs_mount() handles
	 * copying the mountpoint f_mntonname for us, so we don't have to do
	 * it here unless we want to set it to something other than "path"
	 * for some rason.
	 */

	err = ntfs_mountfs(devvp, mp, td);
	if (err == 0) {

		/* Save "mounted from" info for mount point. */
		vfs_mountedfrom(mp, from);
	} else
		vrele(devvp);
	return (err);
}
Esempio n. 27
0
/*
 * Mount null layer
 */
static int
nullfs_mount(struct mount *mp)
{
	int error = 0;
	struct vnode *lowerrootvp, *vp;
	struct vnode *nullm_rootvp;
	struct null_mount *xmp;
	struct thread *td = curthread;
	char *target;
	int isvnunlocked = 0, len;
	struct nameidata nd, *ndp = &nd;

	NULLFSDEBUG("nullfs_mount(mp = %p)\n", (void *)mp);

	if (!prison_allow(td->td_ucred, PR_ALLOW_MOUNT_NULLFS))
		return (EPERM);
	if (mp->mnt_flag & MNT_ROOTFS)
		return (EOPNOTSUPP);

	/*
	 * Update is a no-op
	 */
	if (mp->mnt_flag & MNT_UPDATE) {
		/*
		 * Only support update mounts for NFS export.
		 */
		if (vfs_flagopt(mp->mnt_optnew, "export", NULL, 0))
			return (0);
		else
			return (EOPNOTSUPP);
	}

	/*
	 * Get argument
	 */
	error = vfs_getopt(mp->mnt_optnew, "target", (void **)&target, &len);
	if (error || target[len - 1] != '\0')
		return (EINVAL);

	/*
	 * Unlock lower node to avoid possible deadlock.
	 */
	if ((mp->mnt_vnodecovered->v_op == &null_vnodeops) &&
	    VOP_ISLOCKED(mp->mnt_vnodecovered) == LK_EXCLUSIVE) {
		VOP_UNLOCK(mp->mnt_vnodecovered, 0);
		isvnunlocked = 1;
	}
	/*
	 * Find lower node
	 */
	NDINIT(ndp, LOOKUP, FOLLOW|LOCKLEAF, UIO_SYSSPACE, target, curthread);
	error = namei(ndp);

	/*
	 * Re-lock vnode.
	 * XXXKIB This is deadlock-prone as well.
	 */
	if (isvnunlocked)
		vn_lock(mp->mnt_vnodecovered, LK_EXCLUSIVE | LK_RETRY);

	if (error)
		return (error);
	NDFREE(ndp, NDF_ONLY_PNBUF);

	/*
	 * Sanity check on lower vnode
	 */
	lowerrootvp = ndp->ni_vp;

	/*
	 * Check multi null mount to avoid `lock against myself' panic.
	 */
	if (lowerrootvp == VTONULL(mp->mnt_vnodecovered)->null_lowervp) {
		NULLFSDEBUG("nullfs_mount: multi null mount?\n");
		vput(lowerrootvp);
		return (EDEADLK);
	}

	xmp = (struct null_mount *) malloc(sizeof(struct null_mount),
	    M_NULLFSMNT, M_WAITOK | M_ZERO);

	/*
	 * Save reference to underlying FS
	 */
	xmp->nullm_vfs = lowerrootvp->v_mount;

	/*
	 * Save reference.  Each mount also holds
	 * a reference on the root vnode.
	 */
	error = null_nodeget(mp, lowerrootvp, &vp);
	/*
	 * Make sure the node alias worked
	 */
	if (error) {
		free(xmp, M_NULLFSMNT);
		return (error);
	}

	/*
	 * Keep a held reference to the root vnode.
	 * It is vrele'd in nullfs_unmount.
	 */
	nullm_rootvp = vp;
	nullm_rootvp->v_vflag |= VV_ROOT;
	xmp->nullm_rootvp = nullm_rootvp;

	/*
	 * Unlock the node (either the lower or the alias)
	 */
	VOP_UNLOCK(vp, 0);

	if (NULLVPTOLOWERVP(nullm_rootvp)->v_mount->mnt_flag & MNT_LOCAL) {
		MNT_ILOCK(mp);
		mp->mnt_flag |= MNT_LOCAL;
		MNT_IUNLOCK(mp);
	}

	xmp->nullm_flags |= NULLM_CACHE;
	if (vfs_getopt(mp->mnt_optnew, "nocache", NULL, NULL) == 0)
		xmp->nullm_flags &= ~NULLM_CACHE;

	MNT_ILOCK(mp);
	if ((xmp->nullm_flags & NULLM_CACHE) != 0) {
		mp->mnt_kern_flag |= lowerrootvp->v_mount->mnt_kern_flag &
		    (MNTK_SHARED_WRITES | MNTK_LOOKUP_SHARED |
		    MNTK_EXTENDED_SHARED);
	}
	mp->mnt_kern_flag |= MNTK_LOOKUP_EXCL_DOTDOT;
	MNT_IUNLOCK(mp);
	mp->mnt_data = xmp;
	vfs_getnewfsid(mp);
	if ((xmp->nullm_flags & NULLM_CACHE) != 0) {
		MNT_ILOCK(xmp->nullm_vfs);
		TAILQ_INSERT_TAIL(&xmp->nullm_vfs->mnt_uppers, mp,
		    mnt_upper_link);
		MNT_IUNLOCK(xmp->nullm_vfs);
	}

	vfs_mountedfrom(mp, target);

	NULLFSDEBUG("nullfs_mount: lower %s, alias at %s\n",
		mp->mnt_stat.f_mntfromname, mp->mnt_stat.f_mntonname);
	return (0);
}
Esempio n. 28
0
/*
 * High level function to manipulate export options on a mount point
 * and the passed in netexport.
 * Struct export_args *argp is the variable used to twiddle options,
 * the structure is described in sys/mount.h
 */
int
vfs_export(struct mount *mp, struct export_args *argp)
{
	struct netexport *nep;
	int error;

	if (argp->ex_numsecflavors < 0
	    || argp->ex_numsecflavors >= MAXSECFLAVORS)
		return (EINVAL);

	error = 0;
	lockmgr(&mp->mnt_explock, LK_EXCLUSIVE, NULL);
	nep = mp->mnt_export;
	if (argp->ex_flags & MNT_DELEXPORT) {
		if (nep == NULL) {
			error = ENOENT;
			goto out;
		}
		if (mp->mnt_flag & MNT_EXPUBLIC) {
			vfs_setpublicfs(NULL, NULL, NULL);
			MNT_ILOCK(mp);
			mp->mnt_flag &= ~MNT_EXPUBLIC;
			MNT_IUNLOCK(mp);
		}
		vfs_free_addrlist(nep);
		mp->mnt_export = NULL;
		free(nep, M_MOUNT);
		nep = NULL;
		MNT_ILOCK(mp);
		mp->mnt_flag &= ~(MNT_EXPORTED | MNT_DEFEXPORTED);
		MNT_IUNLOCK(mp);
	}
	if (argp->ex_flags & MNT_EXPORTED) {
		if (nep == NULL) {
			nep = malloc(sizeof(struct netexport), M_MOUNT, M_WAITOK | M_ZERO);
			mp->mnt_export = nep;
		}
		if (argp->ex_flags & MNT_EXPUBLIC) {
			if ((error = vfs_setpublicfs(mp, nep, argp)) != 0)
				goto out;
			MNT_ILOCK(mp);
			mp->mnt_flag |= MNT_EXPUBLIC;
			MNT_IUNLOCK(mp);
		}
		if ((error = vfs_hang_addrlist(mp, nep, argp)))
			goto out;
		MNT_ILOCK(mp);
		mp->mnt_flag |= MNT_EXPORTED;
		MNT_IUNLOCK(mp);
	}

out:
	lockmgr(&mp->mnt_explock, LK_RELEASE, NULL);
	/*
	 * Once we have executed the vfs_export() command, we do
	 * not want to keep the "export" option around in the
	 * options list, since that will cause subsequent MNT_UPDATE
	 * calls to fail.  The export information is saved in
	 * mp->mnt_export, so we can safely delete the "export" mount option
	 * here.
	 */
	vfs_deleteopt(mp->mnt_optnew, "export");
	vfs_deleteopt(mp->mnt_opt, "export");
	return (error);
}
Esempio n. 29
0
static int
cd9660_mount(struct mount *mp, struct thread *td)
{
	struct vnode *devvp;
	char *fspec;
	int error;
	mode_t accessmode;
	struct nameidata ndp;
	struct iso_mnt *imp = 0;

	/*
	 * Unconditionally mount as read-only.
	 */
	MNT_ILOCK(mp);
	mp->mnt_flag |= MNT_RDONLY;
	MNT_IUNLOCK(mp);

	fspec = vfs_getopts(mp->mnt_optnew, "from", &error);
	if (error)
		return (error);

	imp = VFSTOISOFS(mp);

	if (mp->mnt_flag & MNT_UPDATE) {
		if (vfs_flagopt(mp->mnt_optnew, "export", NULL, 0))
			return (0);
	}
	/*
	 * Not an update, or updating the name: look up the name
	 * and verify that it refers to a sensible block device.
	 */
	NDINIT(&ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, fspec, td);
	if ((error = namei(&ndp)))
		return (error);
	NDFREE(&ndp, NDF_ONLY_PNBUF);
	devvp = ndp.ni_vp;

	if (!vn_isdisk(devvp, &error)) {
		vput(devvp);
		return (error);
	}

	/*
	 * Verify that user has necessary permissions on the device,
	 * or has superuser abilities
	 */
	accessmode = VREAD;
	error = VOP_ACCESS(devvp, accessmode, td->td_ucred, td);
	if (error)
		error = priv_check(td, PRIV_VFS_MOUNT_PERM);
	if (error) {
		vput(devvp);
		return (error);
	}

	if ((mp->mnt_flag & MNT_UPDATE) == 0) {
		error = iso_mountfs(devvp, mp, td);
		if (error)
			vrele(devvp);
	} else {
		if (devvp != imp->im_devvp)
			error = EINVAL;	/* needs translation */
		vput(devvp);
	}
	if (error)
		return (error);
	vfs_mountedfrom(mp, fspec);
	return (0);
}
Esempio n. 30
0
/*
 * VFS Operations.
 *
 * mount system call
 */
static int
ext2_mount(struct mount *mp)
{
	struct vfsoptlist *opts;
	struct vnode *devvp;
	struct thread *td;
	struct ext2mount *ump = NULL;
	struct m_ext2fs *fs;
	struct nameidata nd, *ndp = &nd;
	accmode_t accmode;
	char *path, *fspec;
	int error, flags, len;

	td = curthread;
	opts = mp->mnt_optnew;

	if (vfs_filteropt(opts, ext2_opts))
		return (EINVAL);

	vfs_getopt(opts, "fspath", (void **)&path, NULL);
	/* Double-check the length of path.. */
	if (strlen(path) >= MAXMNTLEN - 1)
		return (ENAMETOOLONG);

	fspec = NULL;
	error = vfs_getopt(opts, "from", (void **)&fspec, &len);
	if (!error && fspec[len - 1] != '\0')
		return (EINVAL);

	/*
	 * If updating, check whether changing from read-only to
	 * read/write; if there is no device name, that's all we do.
	 */
	if (mp->mnt_flag & MNT_UPDATE) {
		ump = VFSTOEXT2(mp);
		fs = ump->um_e2fs; 
		error = 0;
		if (fs->e2fs_ronly == 0 &&
		    vfs_flagopt(opts, "ro", NULL, 0)) {
			error = VFS_SYNC(mp, MNT_WAIT);
			if (error)
				return (error);
			flags = WRITECLOSE;
			if (mp->mnt_flag & MNT_FORCE)
				flags |= FORCECLOSE;
			error = ext2_flushfiles(mp, flags, td);
			if ( error == 0 && fs->e2fs_wasvalid && ext2_cgupdate(ump, MNT_WAIT) == 0) {
				fs->e2fs->e2fs_state |= E2FS_ISCLEAN;
				ext2_sbupdate(ump, MNT_WAIT);
			}
			fs->e2fs_ronly = 1;
			vfs_flagopt(opts, "ro", &mp->mnt_flag, MNT_RDONLY);
			DROP_GIANT();
			g_topology_lock();
			g_access(ump->um_cp, 0, -1, 0);
			g_topology_unlock();
			PICKUP_GIANT();
		}
		if (!error && (mp->mnt_flag & MNT_RELOAD))
			error = ext2_reload(mp, td);
		if (error)
			return (error);
		devvp = ump->um_devvp;
		if (fs->e2fs_ronly && !vfs_flagopt(opts, "ro", NULL, 0)) {
			if (ext2_check_sb_compat(fs->e2fs, devvp->v_rdev, 0))
				return (EPERM);

			/*
			 * If upgrade to read-write by non-root, then verify
			 * that user has necessary permissions on the device.
			 */
			vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
			error = VOP_ACCESS(devvp, VREAD | VWRITE,
			    td->td_ucred, td);
			if (error)
				error = priv_check(td, PRIV_VFS_MOUNT_PERM);
			if (error) {
				VOP_UNLOCK(devvp, 0);
				return (error);
			}
			VOP_UNLOCK(devvp, 0);
			DROP_GIANT();
			g_topology_lock();
			error = g_access(ump->um_cp, 0, 1, 0);
			g_topology_unlock();
			PICKUP_GIANT();
			if (error)
				return (error);

			if ((fs->e2fs->e2fs_state & E2FS_ISCLEAN) == 0 ||
			    (fs->e2fs->e2fs_state & E2FS_ERRORS)) {
				if (mp->mnt_flag & MNT_FORCE) {
					printf(
"WARNING: %s was not properly dismounted\n", fs->e2fs_fsmnt);
				} else {
					printf(
"WARNING: R/W mount of %s denied.  Filesystem is not clean - run fsck\n",
					    fs->e2fs_fsmnt);
					return (EPERM);
				}
			}
			fs->e2fs->e2fs_state &= ~E2FS_ISCLEAN;
			(void)ext2_cgupdate(ump, MNT_WAIT);
			fs->e2fs_ronly = 0;
			MNT_ILOCK(mp);
			mp->mnt_flag &= ~MNT_RDONLY;
			MNT_IUNLOCK(mp);
		}
		if (vfs_flagopt(opts, "export", NULL, 0)) {
			/* Process export requests in vfs_mount.c. */
			return (error);
		}
	}

	/*
	 * Not an update, or updating the name: look up the name
	 * and verify that it refers to a sensible disk device.
	 */
	if (fspec == NULL)
		return (EINVAL);
	NDINIT(ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, fspec, td);
	if ((error = namei(ndp)) != 0)
		return (error);
	NDFREE(ndp, NDF_ONLY_PNBUF);
	devvp = ndp->ni_vp;

	if (!vn_isdisk(devvp, &error)) {
		vput(devvp);
		return (error);
	}

	/*
	 * If mount by non-root, then verify that user has necessary
	 * permissions on the device.
	 *
	 * XXXRW: VOP_ACCESS() enough?
	 */
	accmode = VREAD;
	if ((mp->mnt_flag & MNT_RDONLY) == 0)
		accmode |= VWRITE;
	error = VOP_ACCESS(devvp, accmode, td->td_ucred, td);
	if (error)
		error = priv_check(td, PRIV_VFS_MOUNT_PERM);
	if (error) {
		vput(devvp);
		return (error);
	}

	if ((mp->mnt_flag & MNT_UPDATE) == 0) {
		error = ext2_mountfs(devvp, mp);
	} else {
		if (devvp != ump->um_devvp) {
			vput(devvp);
			return (EINVAL);	/* needs translation */
		} else
			vput(devvp);
	}
	if (error) {
		vrele(devvp);
		return (error);
	}
	ump = VFSTOEXT2(mp);
	fs = ump->um_e2fs;

	/*
	 * Note that this strncpy() is ok because of a check at the start
	 * of ext2_mount().
	 */
	strncpy(fs->e2fs_fsmnt, path, MAXMNTLEN);
	fs->e2fs_fsmnt[MAXMNTLEN - 1] = '\0';
	vfs_mountedfrom(mp, fspec);
	return (0);
}