Exemplo n.º 1
0
static void
vfs_mountroot_wait(void)
{
	struct root_hold_token *h;
	struct timeval lastfail;
	int curfail;

	curfail = 0;
	while (1) {
		DROP_GIANT();
		g_waitidle();
		PICKUP_GIANT();
		mtx_lock(&mountlist_mtx);
		if (LIST_EMPTY(&root_holds)) {
			mtx_unlock(&mountlist_mtx);
			break;
		}
		if (ppsratecheck(&lastfail, &curfail, 1)) {
			printf("Root mount waiting for:");
			LIST_FOREACH(h, &root_holds, list)
				printf(" %s", h->who);
			printf("\n");
		}
		msleep(&root_holds, &mountlist_mtx, PZERO | PDROP, "roothold",
		    hz);
	}
}
Exemplo n.º 2
0
/*
 * Unmount system call.
 */
static int
ext2_unmount(struct mount *mp, int mntflags)
{
	struct ext2mount *ump;
	struct m_ext2fs *fs;
	struct csum *sump;
	int error, flags, i, ronly;

	flags = 0;
	if (mntflags & MNT_FORCE) {
		if (mp->mnt_flag & MNT_ROOTFS)
			return (EINVAL);
		flags |= FORCECLOSE;
	}
	if ((error = ext2_flushfiles(mp, flags, curthread)) != 0)
		return (error);
	ump = VFSTOEXT2(mp);
	fs = ump->um_e2fs;
	ronly = fs->e2fs_ronly;
	if (ronly == 0 && ext2_cgupdate(ump, MNT_WAIT) == 0) {
		if (fs->e2fs_wasvalid)
 			fs->e2fs->e2fs_state |= E2FS_ISCLEAN;
 		ext2_sbupdate(ump, MNT_WAIT);
	}

	DROP_GIANT();
	g_topology_lock();
	g_vfs_close(ump->um_cp);
	g_topology_unlock();
	PICKUP_GIANT();
	vrele(ump->um_devvp);
	sump = fs->e2fs_clustersum;
	for (i = 0; i < fs->e2fs_gcount; i++, sump++)
		free(sump->cs_sum, M_EXT2MNT);
	free(fs->e2fs_clustersum, M_EXT2MNT);
	free(fs->e2fs_maxcluster, M_EXT2MNT);
	free(fs->e2fs_gd, M_EXT2MNT);
	free(fs->e2fs_contigdirs, M_EXT2MNT);
	free(fs->e2fs, M_EXT2MNT);
	free(fs, M_EXT2MNT);
	free(ump, M_EXT2MNT);
	mp->mnt_data = NULL;
	MNT_ILOCK(mp);
	mp->mnt_flag &= ~MNT_LOCAL;
	MNT_IUNLOCK(mp);
	return (error);
}
Exemplo n.º 3
0
void
uio_yield(void)
{
	struct thread *td;

	td = curthread;
	DROP_GIANT();
#ifndef __rtems__
	thread_lock(td);
	sched_prio(td, td->td_user_pri);
	mi_switch(SW_INVOL | SWT_RELINQUISH, NULL);
	thread_unlock(td);
#else /* __rtems__ */
	rtems_task_wake_after(RTEMS_YIELD_PROCESSOR);
#endif /* __rtems__ */
	PICKUP_GIANT();
}
Exemplo n.º 4
0
static int
udf_unmount(struct mount *mp, int mntflags)
{
	struct udf_mnt *udfmp;
	int error, flags = 0;

	udfmp = VFSTOUDFFS(mp);

	if (mntflags & MNT_FORCE)
		flags |= FORCECLOSE;

	if ((error = vflush(mp, 0, flags, curthread)))
		return (error);

	if (udfmp->im_flags & UDFMNT_KICONV && udf_iconv) {
		if (udfmp->im_d2l)
			udf_iconv->close(udfmp->im_d2l);
#if 0
		if (udfmp->im_l2d)
			udf_iconv->close(udfmp->im_l2d);
#endif
	}

	DROP_GIANT();
	g_topology_lock();
	g_vfs_close(udfmp->im_cp);
	g_topology_unlock();
	PICKUP_GIANT();
	vrele(udfmp->im_devvp);
	dev_rel(udfmp->im_dev);

	if (udfmp->s_table != NULL)
		free(udfmp->s_table, M_UDFMOUNT);

	free(udfmp, M_UDFMOUNT);

	mp->mnt_data = NULL;
	MNT_ILOCK(mp);
	mp->mnt_flag &= ~MNT_LOCAL;
	MNT_IUNLOCK(mp);

	return (0);
}
Exemplo n.º 5
0
static int
g_mbr_ioctl(struct g_provider *pp, u_long cmd, void *data, int fflag, struct thread *td)
{
	struct g_geom *gp;
	struct g_mbr_softc *ms;
	struct g_slicer *gsp;
	struct g_consumer *cp;
	int error, opened;

	gp = pp->geom;
	gsp = gp->softc;
	ms = gsp->softc;

	opened = 0;
	error = 0;
	switch(cmd) {
	case DIOCSMBR: {
		if (!(fflag & FWRITE))
			return (EPERM);
		DROP_GIANT();
		g_topology_lock();
		cp = LIST_FIRST(&gp->consumer);
		if (cp->acw == 0) {
			error = g_access(cp, 0, 1, 0);
			if (error == 0)
				opened = 1;
		}
		if (!error)
			error = g_mbr_modify(gp, ms, data, 512);
		if (!error)
			error = g_write_data(cp, 0, data, 512);
		if (opened)
			g_access(cp, 0, -1 , 0);
		g_topology_unlock();
		PICKUP_GIANT();
		return(error);
	}
	default:
		return (ENOIOCTL);
	}
}
Exemplo n.º 6
0
static int
udf_mountfs(struct vnode *devvp, struct mount *mp)
{
	struct buf *bp = NULL;
	struct cdev *dev;
	struct anchor_vdp avdp;
	struct udf_mnt *udfmp = NULL;
	struct part_desc *pd;
	struct logvol_desc *lvd;
	struct fileset_desc *fsd;
	struct file_entry *root_fentry;
	uint32_t sector, size, mvds_start, mvds_end;
	uint32_t logical_secsize;
	uint32_t fsd_offset = 0;
	uint16_t part_num = 0, fsd_part = 0;
	int error = EINVAL;
	int logvol_found = 0, part_found = 0, fsd_found = 0;
	int bsize;
	struct g_consumer *cp;
	struct bufobj *bo;

	dev = devvp->v_rdev;
	dev_ref(dev);
	DROP_GIANT();
	g_topology_lock();
	error = g_vfs_open(devvp, &cp, "udf", 0);
	g_topology_unlock();
	PICKUP_GIANT();
	VOP_UNLOCK(devvp, 0);
	if (error)
		goto bail;

	bo = &devvp->v_bufobj;

	if (devvp->v_rdev->si_iosize_max != 0)
		mp->mnt_iosize_max = devvp->v_rdev->si_iosize_max;
	if (mp->mnt_iosize_max > MAXPHYS)
		mp->mnt_iosize_max = MAXPHYS;

	/* XXX: should be M_WAITOK */
	udfmp = malloc(sizeof(struct udf_mnt), M_UDFMOUNT,
	    M_NOWAIT | M_ZERO);
	if (udfmp == NULL) {
		printf("Cannot allocate UDF mount struct\n");
		error = ENOMEM;
		goto bail;
	}

	mp->mnt_data = udfmp;
	mp->mnt_stat.f_fsid.val[0] = dev2udev(devvp->v_rdev);
	mp->mnt_stat.f_fsid.val[1] = mp->mnt_vfc->vfc_typenum;
	MNT_ILOCK(mp);
	mp->mnt_flag |= MNT_LOCAL;
	mp->mnt_kern_flag |= MNTK_LOOKUP_SHARED | MNTK_EXTENDED_SHARED;
	MNT_IUNLOCK(mp);
	udfmp->im_mountp = mp;
	udfmp->im_dev = dev;
	udfmp->im_devvp = devvp;
	udfmp->im_d2l = NULL;
	udfmp->im_cp = cp;
	udfmp->im_bo = bo;

#if 0
	udfmp->im_l2d = NULL;
#endif
	/*
	 * The UDF specification defines a logical sectorsize of 2048
	 * for DVD media.
	 */
	logical_secsize = 2048;

	if (((logical_secsize % cp->provider->sectorsize) != 0) ||
	    (logical_secsize < cp->provider->sectorsize)) {
		error = EINVAL;
		goto bail;
	}

	bsize = cp->provider->sectorsize;

	/* 
	 * Get the Anchor Volume Descriptor Pointer from sector 256.
	 * XXX Should also check sector n - 256, n, and 512.
	 */
	sector = 256;
	if ((error = bread(devvp, sector * btodb(logical_secsize), bsize,
			   NOCRED, &bp)) != 0)
		goto bail;
	if ((error = udf_checktag((struct desc_tag *)bp->b_data, TAGID_ANCHOR)))
		goto bail;

	bcopy(bp->b_data, &avdp, sizeof(struct anchor_vdp));
	brelse(bp);
	bp = NULL;

	/*
	 * Extract the Partition Descriptor and Logical Volume Descriptor
	 * from the Volume Descriptor Sequence.
	 * XXX Should we care about the partition type right now?
	 * XXX What about multiple partitions?
	 */
	mvds_start = le32toh(avdp.main_vds_ex.loc);
	mvds_end = mvds_start + (le32toh(avdp.main_vds_ex.len) - 1) / bsize;
	for (sector = mvds_start; sector < mvds_end; sector++) {
		if ((error = bread(devvp, sector * btodb(logical_secsize),
				   bsize, NOCRED, &bp)) != 0) {
			printf("Can't read sector %d of VDS\n", sector);
			goto bail;
		}
		lvd = (struct logvol_desc *)bp->b_data;
		if (!udf_checktag(&lvd->tag, TAGID_LOGVOL)) {
			udfmp->bsize = le32toh(lvd->lb_size);
			udfmp->bmask = udfmp->bsize - 1;
			udfmp->bshift = ffs(udfmp->bsize) - 1;
			fsd_part = le16toh(lvd->_lvd_use.fsd_loc.loc.part_num);
			fsd_offset = le32toh(lvd->_lvd_use.fsd_loc.loc.lb_num);
			if (udf_find_partmaps(udfmp, lvd))
				break;
			logvol_found = 1;
		}
		pd = (struct part_desc *)bp->b_data;
		if (!udf_checktag(&pd->tag, TAGID_PARTITION)) {
			part_found = 1;
			part_num = le16toh(pd->part_num);
			udfmp->part_len = le32toh(pd->part_len);
			udfmp->part_start = le32toh(pd->start_loc);
		}

		brelse(bp); 
		bp = NULL;
		if ((part_found) && (logvol_found))
			break;
	}

	if (!part_found || !logvol_found) {
		error = EINVAL;
		goto bail;
	}

	if (fsd_part != part_num) {
		printf("FSD does not lie within the partition!\n");
		error = EINVAL;
		goto bail;
	}


	/*
	 * Grab the Fileset Descriptor
	 * Thanks to Chuck McCrobie <*****@*****.**> for pointing
	 * me in the right direction here.
	 */
	sector = udfmp->part_start + fsd_offset;
	if ((error = RDSECTOR(devvp, sector, udfmp->bsize, &bp)) != 0) {
		printf("Cannot read sector %d of FSD\n", sector);
		goto bail;
	}
	fsd = (struct fileset_desc *)bp->b_data;
	if (!udf_checktag(&fsd->tag, TAGID_FSD)) {
		fsd_found = 1;
		bcopy(&fsd->rootdir_icb, &udfmp->root_icb,
		    sizeof(struct long_ad));
	}

	brelse(bp);
	bp = NULL;

	if (!fsd_found) {
		printf("Couldn't find the fsd\n");
		error = EINVAL;
		goto bail;
	}

	/*
	 * Find the file entry for the root directory.
	 */
	sector = le32toh(udfmp->root_icb.loc.lb_num) + udfmp->part_start;
	size = le32toh(udfmp->root_icb.len);
	if ((error = udf_readdevblks(udfmp, sector, size, &bp)) != 0) {
		printf("Cannot read sector %d\n", sector);
		goto bail;
	}

	root_fentry = (struct file_entry *)bp->b_data;
	if ((error = udf_checktag(&root_fentry->tag, TAGID_FENTRY))) {
		printf("Invalid root file entry!\n");
		goto bail;
	}

	brelse(bp);
	bp = NULL;

	return 0;

bail:
	if (udfmp != NULL)
		free(udfmp, M_UDFMOUNT);
	if (bp != NULL)
		brelse(bp);
	if (cp != NULL) {
		DROP_GIANT();
		g_topology_lock();
		g_vfs_close(cp);
		g_topology_unlock();
		PICKUP_GIANT();
	}
	dev_rel(dev);
	return error;
};
Exemplo n.º 7
0
/*
 * Unmount the filesystem described by mp.
 */
static int
msdosfs_unmount(struct mount *mp, int mntflags)
{
	struct msdosfsmount *pmp;
	int error, flags;

	error = flags = 0;
	pmp = VFSTOMSDOSFS(mp);
	if ((pmp->pm_flags & MSDOSFSMNT_RONLY) == 0)
		error = msdosfs_sync(mp, MNT_WAIT);
	if ((mntflags & MNT_FORCE) != 0)
		flags |= FORCECLOSE;
	else if (error != 0)
		return (error);
	error = vflush(mp, 0, flags, curthread);
	if (error != 0 && error != ENXIO)
		return (error);
	if ((pmp->pm_flags & MSDOSFSMNT_RONLY) == 0) {
		error = markvoldirty(pmp, 0);
		if (error && error != ENXIO) {
			(void)markvoldirty(pmp, 1);
			return (error);
		}
	}
	if (pmp->pm_flags & MSDOSFSMNT_KICONV && msdosfs_iconv) {
		if (pmp->pm_w2u)
			msdosfs_iconv->close(pmp->pm_w2u);
		if (pmp->pm_u2w)
			msdosfs_iconv->close(pmp->pm_u2w);
		if (pmp->pm_d2u)
			msdosfs_iconv->close(pmp->pm_d2u);
		if (pmp->pm_u2d)
			msdosfs_iconv->close(pmp->pm_u2d);
	}

#ifdef MSDOSFS_DEBUG
	{
		struct vnode *vp = pmp->pm_devvp;
		struct bufobj *bo;

		bo = &vp->v_bufobj;
		BO_LOCK(bo);
		VI_LOCK(vp);
		vn_printf(vp,
		    "msdosfs_umount(): just before calling VOP_CLOSE()\n");
		printf("freef %p, freeb %p, mount %p\n",
		    TAILQ_NEXT(vp, v_actfreelist), vp->v_actfreelist.tqe_prev,
		    vp->v_mount);
		printf("cleanblkhd %p, dirtyblkhd %p, numoutput %ld, type %d\n",
		    TAILQ_FIRST(&vp->v_bufobj.bo_clean.bv_hd),
		    TAILQ_FIRST(&vp->v_bufobj.bo_dirty.bv_hd),
		    vp->v_bufobj.bo_numoutput, vp->v_type);
		VI_UNLOCK(vp);
		BO_UNLOCK(bo);
	}
#endif
	DROP_GIANT();
	if (pmp->pm_devvp->v_type == VCHR && pmp->pm_devvp->v_rdev != NULL)
		pmp->pm_devvp->v_rdev->si_mountpt = NULL;
	g_topology_lock();
	g_vfs_close(pmp->pm_cp);
	g_topology_unlock();
	PICKUP_GIANT();
	vrele(pmp->pm_devvp);
	dev_rel(pmp->pm_dev);
	free(pmp->pm_inusemap, M_MSDOSFSFAT);
	if (pmp->pm_flags & MSDOSFS_LARGEFS)
		msdosfs_fileno_free(mp);
	lockdestroy(&pmp->pm_fatlock);
	free(pmp, M_MSDOSFSMNT);
	mp->mnt_data = NULL;
	MNT_ILOCK(mp);
	mp->mnt_flag &= ~MNT_LOCAL;
	MNT_IUNLOCK(mp);
	return (error);
}
Exemplo n.º 8
0
static int
mountmsdosfs(struct vnode *devvp, struct mount *mp)
{
	struct msdosfsmount *pmp;
	struct buf *bp;
	struct cdev *dev;
	union bootsector *bsp;
	struct byte_bpb33 *b33;
	struct byte_bpb50 *b50;
	struct byte_bpb710 *b710;
	u_int8_t SecPerClust;
	u_long clusters;
	int ronly, error;
	struct g_consumer *cp;
	struct bufobj *bo;

	bp = NULL;		/* This and pmp both used in error_exit. */
	pmp = NULL;
	ronly = (mp->mnt_flag & MNT_RDONLY) != 0;

	dev = devvp->v_rdev;
	dev_ref(dev);
	DROP_GIANT();
	g_topology_lock();
	error = g_vfs_open(devvp, &cp, "msdosfs", ronly ? 0 : 1);
	g_topology_unlock();
	PICKUP_GIANT();
	VOP_UNLOCK(devvp, 0);
	if (error)
		goto error_exit;

	bo = &devvp->v_bufobj;

	/*
	 * Read the boot sector of the filesystem, and then check the
	 * boot signature.  If not a dos boot sector then error out.
	 *
	 * NOTE: 8192 is a magic size that works for ffs.
	 */
	error = bread(devvp, 0, 8192, NOCRED, &bp);
	if (error)
		goto error_exit;
	bp->b_flags |= B_AGE;
	bsp = (union bootsector *)bp->b_data;
	b33 = (struct byte_bpb33 *)bsp->bs33.bsBPB;
	b50 = (struct byte_bpb50 *)bsp->bs50.bsBPB;
	b710 = (struct byte_bpb710 *)bsp->bs710.bsBPB;

#ifndef MSDOSFS_NOCHECKSIG
	if (bsp->bs50.bsBootSectSig0 != BOOTSIG0
	    || bsp->bs50.bsBootSectSig1 != BOOTSIG1) {
		error = EINVAL;
		goto error_exit;
	}
#endif

	pmp = malloc(sizeof *pmp, M_MSDOSFSMNT, M_WAITOK | M_ZERO);
	pmp->pm_mountp = mp;
	pmp->pm_cp = cp;
	pmp->pm_bo = bo;

	lockinit(&pmp->pm_fatlock, 0, msdosfs_lock_msg, 0, 0);

	/*
	 * Initialize ownerships and permissions, since nothing else will
	 * initialize them iff we are mounting root.
	 */
	pmp->pm_uid = UID_ROOT;
	pmp->pm_gid = GID_WHEEL;
	pmp->pm_mask = pmp->pm_dirmask = S_IXUSR | S_IXGRP | S_IXOTH |
	    S_IRUSR | S_IRGRP | S_IROTH | S_IWUSR;

	/*
	 * Experimental support for large MS-DOS filesystems.
	 * WARNING: This uses at least 32 bytes of kernel memory (which is not
	 * reclaimed until the FS is unmounted) for each file on disk to map
	 * between the 32-bit inode numbers used by VFS and the 64-bit
	 * pseudo-inode numbers used internally by msdosfs. This is only
	 * safe to use in certain controlled situations (e.g. read-only FS
	 * with less than 1 million files).
	 * Since the mappings do not persist across unmounts (or reboots), these
	 * filesystems are not suitable for exporting through NFS, or any other
	 * application that requires fixed inode numbers.
	 */
	vfs_flagopt(mp->mnt_optnew, "large", &pmp->pm_flags, MSDOSFS_LARGEFS);

	/*
	 * Compute several useful quantities from the bpb in the
	 * bootsector.  Copy in the dos 5 variant of the bpb then fix up
	 * the fields that are different between dos 5 and dos 3.3.
	 */
	SecPerClust = b50->bpbSecPerClust;
	pmp->pm_BytesPerSec = getushort(b50->bpbBytesPerSec);
	if (pmp->pm_BytesPerSec < DEV_BSIZE) {
		error = EINVAL;
		goto error_exit;
	}
	pmp->pm_ResSectors = getushort(b50->bpbResSectors);
	pmp->pm_FATs = b50->bpbFATs;
	pmp->pm_RootDirEnts = getushort(b50->bpbRootDirEnts);
	pmp->pm_Sectors = getushort(b50->bpbSectors);
	pmp->pm_FATsecs = getushort(b50->bpbFATsecs);
	pmp->pm_SecPerTrack = getushort(b50->bpbSecPerTrack);
	pmp->pm_Heads = getushort(b50->bpbHeads);
	pmp->pm_Media = b50->bpbMedia;

	/* calculate the ratio of sector size to DEV_BSIZE */
	pmp->pm_BlkPerSec = pmp->pm_BytesPerSec / DEV_BSIZE;

	/*
	 * We don't check pm_Heads nor pm_SecPerTrack, because
	 * these may not be set for EFI file systems. We don't
	 * use these anyway, so we're unaffected if they are
	 * invalid.
	 */
	if (!pmp->pm_BytesPerSec || !SecPerClust) {
		error = EINVAL;
		goto error_exit;
	}

	if (pmp->pm_Sectors == 0) {
		pmp->pm_HiddenSects = getulong(b50->bpbHiddenSecs);
		pmp->pm_HugeSectors = getulong(b50->bpbHugeSectors);
	} else {
		pmp->pm_HiddenSects = getushort(b33->bpbHiddenSecs);
		pmp->pm_HugeSectors = pmp->pm_Sectors;
	}
	if (!(pmp->pm_flags & MSDOSFS_LARGEFS)) {
		if (pmp->pm_HugeSectors > 0xffffffff /
		    (pmp->pm_BytesPerSec / sizeof(struct direntry)) + 1) {
			/*
			 * We cannot deal currently with this size of disk
			 * due to fileid limitations (see msdosfs_getattr and
			 * msdosfs_readdir)
			 */
			error = EINVAL;
			vfs_mount_error(mp,
			    "Disk too big, try '-o large' mount option");
			goto error_exit;
		}
	}

	if (pmp->pm_RootDirEnts == 0) {
		if (pmp->pm_FATsecs
		    || getushort(b710->bpbFSVers)) {
			error = EINVAL;
#ifdef MSDOSFS_DEBUG
			printf("mountmsdosfs(): bad FAT32 filesystem\n");
#endif
			goto error_exit;
		}
		pmp->pm_fatmask = FAT32_MASK;
		pmp->pm_fatmult = 4;
		pmp->pm_fatdiv = 1;
		pmp->pm_FATsecs = getulong(b710->bpbBigFATsecs);
		if (getushort(b710->bpbExtFlags) & FATMIRROR)
			pmp->pm_curfat = getushort(b710->bpbExtFlags) & FATNUM;
		else
			pmp->pm_flags |= MSDOSFS_FATMIRROR;
	} else
		pmp->pm_flags |= MSDOSFS_FATMIRROR;

	/*
	 * Check a few values (could do some more):
	 * - logical sector size: power of 2, >= block size
	 * - sectors per cluster: power of 2, >= 1
	 * - number of sectors:   >= 1, <= size of partition
	 * - number of FAT sectors: >= 1
	 */
	if ( (SecPerClust == 0)
	  || (SecPerClust & (SecPerClust - 1))
	  || (pmp->pm_BytesPerSec < DEV_BSIZE)
	  || (pmp->pm_BytesPerSec & (pmp->pm_BytesPerSec - 1))
	  || (pmp->pm_HugeSectors == 0)
	  || (pmp->pm_FATsecs == 0)
	  || (SecPerClust * pmp->pm_BlkPerSec > MAXBSIZE / DEV_BSIZE)
	) {
		error = EINVAL;
		goto error_exit;
	}

	pmp->pm_HugeSectors *= pmp->pm_BlkPerSec;
	pmp->pm_HiddenSects *= pmp->pm_BlkPerSec;	/* XXX not used? */
	pmp->pm_FATsecs     *= pmp->pm_BlkPerSec;
	SecPerClust         *= pmp->pm_BlkPerSec;

	pmp->pm_fatblk = pmp->pm_ResSectors * pmp->pm_BlkPerSec;

	if (FAT32(pmp)) {
		pmp->pm_rootdirblk = getulong(b710->bpbRootClust);
		pmp->pm_firstcluster = pmp->pm_fatblk
			+ (pmp->pm_FATs * pmp->pm_FATsecs);
		pmp->pm_fsinfo = getushort(b710->bpbFSInfo) * pmp->pm_BlkPerSec;
	} else {
		pmp->pm_rootdirblk = pmp->pm_fatblk +
			(pmp->pm_FATs * pmp->pm_FATsecs);
		pmp->pm_rootdirsize = howmany(pmp->pm_RootDirEnts *
			sizeof(struct direntry), DEV_BSIZE); /* in blocks */
		pmp->pm_firstcluster = pmp->pm_rootdirblk + pmp->pm_rootdirsize;
	}

	pmp->pm_maxcluster = (pmp->pm_HugeSectors - pmp->pm_firstcluster) /
	    SecPerClust + 1;
	pmp->pm_fatsize = pmp->pm_FATsecs * DEV_BSIZE;	/* XXX not used? */

	if (pmp->pm_fatmask == 0) {
		if (pmp->pm_maxcluster
		    <= ((CLUST_RSRVD - CLUST_FIRST) & FAT12_MASK)) {
			/*
			 * This will usually be a floppy disk. This size makes
			 * sure that one fat entry will not be split across
			 * multiple blocks.
			 */
			pmp->pm_fatmask = FAT12_MASK;
			pmp->pm_fatmult = 3;
			pmp->pm_fatdiv = 2;
		} else {
			pmp->pm_fatmask = FAT16_MASK;
			pmp->pm_fatmult = 2;
			pmp->pm_fatdiv = 1;
		}
	}

	clusters = (pmp->pm_fatsize / pmp->pm_fatmult) * pmp->pm_fatdiv;
	if (pmp->pm_maxcluster >= clusters) {
#ifdef MSDOSFS_DEBUG
		printf("Warning: number of clusters (%ld) exceeds FAT "
		    "capacity (%ld)\n", pmp->pm_maxcluster + 1, clusters);
#endif
		pmp->pm_maxcluster = clusters - 1;
	}

	if (FAT12(pmp))
		pmp->pm_fatblocksize = 3 * 512;
	else
		pmp->pm_fatblocksize = PAGE_SIZE;
	pmp->pm_fatblocksize = roundup(pmp->pm_fatblocksize,
	    pmp->pm_BytesPerSec);
	pmp->pm_fatblocksec = pmp->pm_fatblocksize / DEV_BSIZE;
	pmp->pm_bnshift = ffs(DEV_BSIZE) - 1;

	/*
	 * Compute mask and shift value for isolating cluster relative byte
	 * offsets and cluster numbers from a file offset.
	 */
	pmp->pm_bpcluster = SecPerClust * DEV_BSIZE;
	pmp->pm_crbomask = pmp->pm_bpcluster - 1;
	pmp->pm_cnshift = ffs(pmp->pm_bpcluster) - 1;

	/*
	 * Check for valid cluster size
	 * must be a power of 2
	 */
	if (pmp->pm_bpcluster ^ (1 << pmp->pm_cnshift)) {
		error = EINVAL;
		goto error_exit;
	}

	/*
	 * Release the bootsector buffer.
	 */
	brelse(bp);
	bp = NULL;

	/*
	 * Check the fsinfo sector if we have one.  Silently fix up our
	 * in-core copy of fp->fsinxtfree if it is unknown (0xffffffff)
	 * or too large.  Ignore fp->fsinfree for now, since we need to
	 * read the entire FAT anyway to fill the inuse map.
	 */
	if (pmp->pm_fsinfo) {
		struct fsinfo *fp;

		if ((error = bread(devvp, pmp->pm_fsinfo, pmp->pm_BytesPerSec,
		    NOCRED, &bp)) != 0)
			goto error_exit;
		fp = (struct fsinfo *)bp->b_data;
		if (!bcmp(fp->fsisig1, "RRaA", 4)
		    && !bcmp(fp->fsisig2, "rrAa", 4)
		    && !bcmp(fp->fsisig3, "\0\0\125\252", 4)) {
			pmp->pm_nxtfree = getulong(fp->fsinxtfree);
			if (pmp->pm_nxtfree > pmp->pm_maxcluster)
				pmp->pm_nxtfree = CLUST_FIRST;
		} else
			pmp->pm_fsinfo = 0;
		brelse(bp);
		bp = NULL;
	}

	/*
	 * Finish initializing pmp->pm_nxtfree (just in case the first few
	 * sectors aren't properly reserved in the FAT).  This completes
	 * the fixup for fp->fsinxtfree, and fixes up the zero-initialized
	 * value if there is no fsinfo.  We will use pmp->pm_nxtfree
	 * internally even if there is no fsinfo.
	 */
	if (pmp->pm_nxtfree < CLUST_FIRST)
		pmp->pm_nxtfree = CLUST_FIRST;

	/*
	 * Allocate memory for the bitmap of allocated clusters, and then
	 * fill it in.
	 */
	pmp->pm_inusemap = malloc(howmany(pmp->pm_maxcluster + 1, N_INUSEBITS)
				  * sizeof(*pmp->pm_inusemap),
				  M_MSDOSFSFAT, M_WAITOK);

	/*
	 * fillinusemap() needs pm_devvp.
	 */
	pmp->pm_devvp = devvp;
	pmp->pm_dev = dev;

	/*
	 * Have the inuse map filled in.
	 */
	MSDOSFS_LOCK_MP(pmp);
	error = fillinusemap(pmp);
	MSDOSFS_UNLOCK_MP(pmp);
	if (error != 0)
		goto error_exit;

	/*
	 * If they want fat updates to be synchronous then let them suffer
	 * the performance degradation in exchange for the on disk copy of
	 * the fat being correct just about all the time.  I suppose this
	 * would be a good thing to turn on if the kernel is still flakey.
	 */
	if (mp->mnt_flag & MNT_SYNCHRONOUS)
		pmp->pm_flags |= MSDOSFSMNT_WAITONFAT;

	/*
	 * Finish up.
	 */
	if (ronly)
		pmp->pm_flags |= MSDOSFSMNT_RONLY;
	else {
		if ((error = markvoldirty(pmp, 1)) != 0) {
			(void)markvoldirty(pmp, 0);
			goto error_exit;
		}
		pmp->pm_fmod = 1;
	}
	mp->mnt_data =  pmp;
	mp->mnt_stat.f_fsid.val[0] = dev2udev(dev);
	mp->mnt_stat.f_fsid.val[1] = mp->mnt_vfc->vfc_typenum;
	MNT_ILOCK(mp);
	mp->mnt_flag |= MNT_LOCAL;
	mp->mnt_kern_flag |= MNTK_USES_BCACHE;
	MNT_IUNLOCK(mp);

	if (pmp->pm_flags & MSDOSFS_LARGEFS)
		msdosfs_fileno_init(mp);

	return 0;

error_exit:
	if (bp)
		brelse(bp);
	if (cp != NULL) {
		DROP_GIANT();
		g_topology_lock();
		g_vfs_close(cp);
		g_topology_unlock();
		PICKUP_GIANT();
	}
	if (pmp) {
		lockdestroy(&pmp->pm_fatlock);
		if (pmp->pm_inusemap)
			free(pmp->pm_inusemap, M_MSDOSFSFAT);
		free(pmp, M_MSDOSFSMNT);
		mp->mnt_data = NULL;
	}
	dev_rel(dev);
	return (error);
}
Exemplo n.º 9
0
/*
 * mp - path - addr in user space of mount point (ie /usr or whatever)
 * data - addr in user space of mount params including the name of the block
 * special file to treat as a filesystem.
 */
static int
msdosfs_mount(struct mount *mp)
{
	struct vnode *devvp;	  /* vnode for blk device to mount */
	struct thread *td;
	/* msdosfs specific mount control block */
	struct msdosfsmount *pmp = NULL;
	struct nameidata ndp;
	int error, flags;
	accmode_t accmode;
	char *from;

	td = curthread;
	if (vfs_filteropt(mp->mnt_optnew, msdosfs_opts))
		return (EINVAL);

	/*
	 * If updating, check whether changing from read-only to
	 * read/write; if there is no device name, that's all we do.
	 */
	if (mp->mnt_flag & MNT_UPDATE) {
		pmp = VFSTOMSDOSFS(mp);
		if (vfs_flagopt(mp->mnt_optnew, "export", NULL, 0)) {
			/*
			 * Forbid export requests if filesystem has
			 * MSDOSFS_LARGEFS flag set.
			 */
			if ((pmp->pm_flags & MSDOSFS_LARGEFS) != 0) {
				vfs_mount_error(mp,
				    "MSDOSFS_LARGEFS flag set, cannot export");
				return (EOPNOTSUPP);
			}
		}
		if (!(pmp->pm_flags & MSDOSFSMNT_RONLY) &&
		    vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0)) {
			error = VFS_SYNC(mp, MNT_WAIT);
			if (error)
				return (error);
			flags = WRITECLOSE;
			if (mp->mnt_flag & MNT_FORCE)
				flags |= FORCECLOSE;
			error = vflush(mp, 0, flags, td);
			if (error)
				return (error);

			/*
			 * Now the volume is clean.  Mark it so while the
			 * device is still rw.
			 */
			error = markvoldirty(pmp, 0);
			if (error) {
				(void)markvoldirty(pmp, 1);
				return (error);
			}

			/* Downgrade the device from rw to ro. */
			DROP_GIANT();
			g_topology_lock();
			error = g_access(pmp->pm_cp, 0, -1, 0);
			g_topology_unlock();
			PICKUP_GIANT();
			if (error) {
				(void)markvoldirty(pmp, 1);
				return (error);
			}

			/*
			 * Backing out after an error was painful in the
			 * above.  Now we are committed to succeeding.
			 */
			pmp->pm_fmod = 0;
			pmp->pm_flags |= MSDOSFSMNT_RONLY;
			MNT_ILOCK(mp);
			mp->mnt_flag |= MNT_RDONLY;
			MNT_IUNLOCK(mp);
		} else if ((pmp->pm_flags & MSDOSFSMNT_RONLY) &&
		    !vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0)) {
			/*
			 * If upgrade to read-write by non-root, then verify
			 * that user has necessary permissions on the device.
			 */
			devvp = pmp->pm_devvp;
			vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
			error = VOP_ACCESS(devvp, VREAD | VWRITE,
			    td->td_ucred, td);
			if (error)
				error = priv_check(td, PRIV_VFS_MOUNT_PERM);
			if (error) {
				VOP_UNLOCK(devvp, 0);
				return (error);
			}
			VOP_UNLOCK(devvp, 0);
			DROP_GIANT();
			g_topology_lock();
			error = g_access(pmp->pm_cp, 0, 1, 0);
			g_topology_unlock();
			PICKUP_GIANT();
			if (error)
				return (error);

			pmp->pm_fmod = 1;
			pmp->pm_flags &= ~MSDOSFSMNT_RONLY;
			MNT_ILOCK(mp);
			mp->mnt_flag &= ~MNT_RDONLY;
			MNT_IUNLOCK(mp);

			/* Now that the volume is modifiable, mark it dirty. */
			error = markvoldirty(pmp, 1);
			if (error)
				return (error); 
		}
	}
	/*
	 * Not an update, or updating the name: look up the name
	 * and verify that it refers to a sensible disk device.
	 */
	if (vfs_getopt(mp->mnt_optnew, "from", (void **)&from, NULL))
		return (EINVAL);
	NDINIT(&ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, from, td);
	error = namei(&ndp);
	if (error)
		return (error);
	devvp = ndp.ni_vp;
	NDFREE(&ndp, NDF_ONLY_PNBUF);

	if (!vn_isdisk(devvp, &error)) {
		vput(devvp);
		return (error);
	}
	/*
	 * If mount by non-root, then verify that user has necessary
	 * permissions on the device.
	 */
	accmode = VREAD;
	if ((mp->mnt_flag & MNT_RDONLY) == 0)
		accmode |= VWRITE;
	error = VOP_ACCESS(devvp, accmode, td->td_ucred, td);
	if (error)
		error = priv_check(td, PRIV_VFS_MOUNT_PERM);
	if (error) {
		vput(devvp);
		return (error);
	}
	if ((mp->mnt_flag & MNT_UPDATE) == 0) {
		error = mountmsdosfs(devvp, mp);
#ifdef MSDOSFS_DEBUG		/* only needed for the printf below */
		pmp = VFSTOMSDOSFS(mp);
#endif
	} else {
		vput(devvp);
		if (devvp != pmp->pm_devvp)
			return (EINVAL);	/* XXX needs translation */
	}
	if (error) {
		vrele(devvp);
		return (error);
	}

	error = update_mp(mp, td);
	if (error) {
		if ((mp->mnt_flag & MNT_UPDATE) == 0)
			msdosfs_unmount(mp, MNT_FORCE);
		return error;
	}

	if (devvp->v_type == VCHR && devvp->v_rdev != NULL)
		devvp->v_rdev->si_mountpt = mp;
	vfs_mountedfrom(mp, from);
#ifdef MSDOSFS_DEBUG
	printf("msdosfs_mount(): mp %p, pmp %p, inusemap %p\n", mp, pmp, pmp->pm_inusemap);
#endif
	return (0);
}
Exemplo n.º 10
0
/*
 * Common code for mount and mountroot.
 */
static int
ext2_mountfs(struct vnode *devvp, struct mount *mp)
{
	struct ext2mount *ump;
	struct buf *bp;
	struct m_ext2fs *fs;
	struct ext2fs *es;
	struct cdev *dev = devvp->v_rdev;
	struct g_consumer *cp;
	struct bufobj *bo;
	struct csum *sump;
	int error;
	int ronly;
	int i, size;
	int32_t *lp;

	ronly = vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0);
	/* XXX: use VOP_ACESS to check FS perms */
	DROP_GIANT();
	g_topology_lock();
	error = g_vfs_open(devvp, &cp, "ext2fs", ronly ? 0 : 1);
	g_topology_unlock();
	PICKUP_GIANT();
	VOP_UNLOCK(devvp, 0);
	if (error)
		return (error);

	/* XXX: should we check for some sectorsize or 512 instead? */
	if (((SBSIZE % cp->provider->sectorsize) != 0) ||
	    (SBSIZE < cp->provider->sectorsize)) {
		DROP_GIANT();
		g_topology_lock();
		g_vfs_close(cp);
		g_topology_unlock();
		PICKUP_GIANT();
		return (EINVAL);
	}

	bo = &devvp->v_bufobj;
	bo->bo_private = cp;
	bo->bo_ops = g_vfs_bufops;
	if (devvp->v_rdev->si_iosize_max != 0)
		mp->mnt_iosize_max = devvp->v_rdev->si_iosize_max;
	if (mp->mnt_iosize_max > MAXPHYS)
		mp->mnt_iosize_max = MAXPHYS;

	bp = NULL;
	ump = NULL;
	if ((error = bread(devvp, SBLOCK, SBSIZE, NOCRED, &bp)) != 0)
		goto out;
	es = (struct ext2fs *)bp->b_data;
	if (ext2_check_sb_compat(es, dev, ronly) != 0) {
		error = EINVAL;		/* XXX needs translation */
		goto out;
	}
	if ((es->e2fs_state & E2FS_ISCLEAN) == 0 ||
	    (es->e2fs_state & E2FS_ERRORS)) {
		if (ronly || (mp->mnt_flag & MNT_FORCE)) {
			printf(
"WARNING: Filesystem was not properly dismounted\n");
		} else {
			printf(
"WARNING: R/W mount denied.  Filesystem is not clean - run fsck\n");
			error = EPERM;
			goto out;
		}
	}
	ump = malloc(sizeof(*ump), M_EXT2MNT, M_WAITOK | M_ZERO);

	/*
	 * I don't know whether this is the right strategy. Note that
	 * we dynamically allocate both an ext2_sb_info and an ext2_super_block
	 * while Linux keeps the super block in a locked buffer.
	 */
	ump->um_e2fs = malloc(sizeof(struct m_ext2fs),
		M_EXT2MNT, M_WAITOK);
	ump->um_e2fs->e2fs = malloc(sizeof(struct ext2fs),
		M_EXT2MNT, M_WAITOK);
	mtx_init(EXT2_MTX(ump), "EXT2FS", "EXT2FS Lock", MTX_DEF);
	bcopy(es, ump->um_e2fs->e2fs, (u_int)sizeof(struct ext2fs));
	if ((error = compute_sb_data(devvp, ump->um_e2fs->e2fs, ump->um_e2fs)))
		goto out;

	/*
	 * Calculate the maximum contiguous blocks and size of cluster summary
	 * array.  In FFS this is done by newfs; however, the superblock 
	 * in ext2fs doesn't have these variables, so we can calculate 
	 * them here.
	 */
	ump->um_e2fs->e2fs_maxcontig = MAX(1, MAXPHYS / ump->um_e2fs->e2fs_bsize);
	if (ump->um_e2fs->e2fs_maxcontig > 0)
		ump->um_e2fs->e2fs_contigsumsize =
		    MIN(ump->um_e2fs->e2fs_maxcontig, EXT2_MAXCONTIG);
	else
		ump->um_e2fs->e2fs_contigsumsize = 0;
	if (ump->um_e2fs->e2fs_contigsumsize > 0) {
		size = ump->um_e2fs->e2fs_gcount * sizeof(int32_t);
		ump->um_e2fs->e2fs_maxcluster = malloc(size, M_EXT2MNT, M_WAITOK);
		size = ump->um_e2fs->e2fs_gcount * sizeof(struct csum);
		ump->um_e2fs->e2fs_clustersum = malloc(size, M_EXT2MNT, M_WAITOK);
		lp = ump->um_e2fs->e2fs_maxcluster;
		sump = ump->um_e2fs->e2fs_clustersum;
		for (i = 0; i < ump->um_e2fs->e2fs_gcount; i++, sump++) {
			*lp++ = ump->um_e2fs->e2fs_contigsumsize;
			sump->cs_init = 0;
			sump->cs_sum = malloc((ump->um_e2fs->e2fs_contigsumsize + 1) *
			    sizeof(int32_t), M_EXT2MNT, M_WAITOK | M_ZERO);
		}
	}

	brelse(bp);
	bp = NULL;
	fs = ump->um_e2fs;
	fs->e2fs_ronly = ronly;	/* ronly is set according to mnt_flags */

	/*
	 * If the fs is not mounted read-only, make sure the super block is
	 * always written back on a sync().
	 */
	fs->e2fs_wasvalid = fs->e2fs->e2fs_state & E2FS_ISCLEAN ? 1 : 0;
	if (ronly == 0) {
		fs->e2fs_fmod = 1;		/* mark it modified */
		fs->e2fs->e2fs_state &= ~E2FS_ISCLEAN;	/* set fs invalid */
	}
	mp->mnt_data = ump;
	mp->mnt_stat.f_fsid.val[0] = dev2udev(dev);
	mp->mnt_stat.f_fsid.val[1] = mp->mnt_vfc->vfc_typenum;
	mp->mnt_maxsymlinklen = EXT2_MAXSYMLINKLEN;
	MNT_ILOCK(mp);
	mp->mnt_flag |= MNT_LOCAL;
	MNT_IUNLOCK(mp);
	ump->um_mountp = mp;
	ump->um_dev = dev;
	ump->um_devvp = devvp;
	ump->um_bo = &devvp->v_bufobj;
	ump->um_cp = cp;

	/*
	 * Setting those two parameters allowed us to use
	 * ufs_bmap w/o changse!
	 */
	ump->um_nindir = EXT2_ADDR_PER_BLOCK(fs);
	ump->um_bptrtodb = fs->e2fs->e2fs_log_bsize + 1;
	ump->um_seqinc = EXT2_FRAGS_PER_BLOCK(fs);
	if (ronly == 0)
		ext2_sbupdate(ump, MNT_WAIT);
	/*
	 * Initialize filesystem stat information in mount struct.
	 */
	MNT_ILOCK(mp);
 	mp->mnt_kern_flag |= MNTK_MPSAFE | MNTK_LOOKUP_SHARED |
            MNTK_EXTENDED_SHARED;
	MNT_IUNLOCK(mp);
	return (0);
out:
	if (bp)
		brelse(bp);
	if (cp != NULL) {
		DROP_GIANT();
		g_topology_lock();
		g_vfs_close(cp);
		g_topology_unlock();
		PICKUP_GIANT();
	}
	if (ump) {
	  	mtx_destroy(EXT2_MTX(ump));
		free(ump->um_e2fs->e2fs_gd, M_EXT2MNT);
		free(ump->um_e2fs->e2fs_contigdirs, M_EXT2MNT);
		free(ump->um_e2fs->e2fs, M_EXT2MNT);
		free(ump->um_e2fs, M_EXT2MNT);
		free(ump, M_EXT2MNT);
		mp->mnt_data = NULL;
	}
	return (error);
}
Exemplo n.º 11
0
/*-
 * This start routine is only called for non-trivial requests, all the
 * trivial ones are handled autonomously by the slice code.
 * For requests we handle here, we must call the g_io_deliver() on the
 * bio, and return non-zero to indicate to the slice code that we did so.
 * This code executes in the "DOWN" I/O path, this means:
 *    * No sleeping.
 *    * Don't grab the topology lock.
 *    * Don't call biowait, g_getattr(), g_setattr() or g_read_data()
 */
static int
g_bsd_ioctl(struct g_provider *pp, u_long cmd, void *data, int fflag, struct thread *td)
{
	struct g_geom *gp;
	struct g_bsd_softc *ms;
	struct g_slicer *gsp;
	u_char *label;
	int error;

	gp = pp->geom;
	gsp = gp->softc;
	ms = gsp->softc;

	switch(cmd) {
	case DIOCGDINFO:
		/* Return a copy of the disklabel to userland. */
		bsd_disklabel_le_dec(ms->label, data, MAXPARTITIONS);
		return(0);
	case DIOCBSDBB: {
		struct g_consumer *cp;
		u_char *buf;
		void *p;
		int error, i;
		uint64_t sum;

		if (!(fflag & FWRITE))
			return (EPERM);
		/* The disklabel to set is the ioctl argument. */
		buf = g_malloc(BBSIZE, M_WAITOK);
		p = *(void **)data;
		error = copyin(p, buf, BBSIZE);
		if (!error) {
			/* XXX: Rude, but supposedly safe */
			DROP_GIANT();
			g_topology_lock();
			/* Validate and modify our slice instance to match. */
			error = g_bsd_modify(gp, buf + ms->labeloffset);
			if (!error) {
				cp = LIST_FIRST(&gp->consumer);
				if (ms->labeloffset == ALPHA_LABEL_OFFSET) {
					sum = 0;
					for (i = 0; i < 63; i++)
						sum += le64dec(buf + i * 8);
					le64enc(buf + 504, sum);
				}
				error = g_write_data(cp, 0, buf, BBSIZE);
			}
			g_topology_unlock();
			PICKUP_GIANT();
		}
		g_free(buf);
		return (error);
	}
	case DIOCSDINFO:
	case DIOCWDINFO: {
		if (!(fflag & FWRITE))
			return (EPERM);
		label = g_malloc(LABELSIZE, M_WAITOK);
		/* The disklabel to set is the ioctl argument. */
		bsd_disklabel_le_enc(label, data);

		DROP_GIANT();
		g_topology_lock();
		/* Validate and modify our slice instance to match. */
		error = g_bsd_modify(gp, label);
		if (error == 0 && cmd == DIOCWDINFO)
			error = g_bsd_writelabel(gp, NULL);
		g_topology_unlock();
		PICKUP_GIANT();
		g_free(label);
		return(error);
	}
	default:
		return (ENOIOCTL);
	}
}
Exemplo n.º 12
0
static int
parse_dir_md(char **conf)
{
	struct stat sb;
	struct thread *td;
	struct md_ioctl *mdio;
	char *path, *tok;
	int error, fd, len;

	td = curthread;

	error = parse_token(conf, &tok);
	if (error)
		return (error);

	len = strlen(tok);
	mdio = malloc(sizeof(*mdio) + len + 1, M_TEMP, M_WAITOK | M_ZERO);
	path = (void *)(mdio + 1);
	bcopy(tok, path, len);
	free(tok, M_TEMP);

	/* Get file status. */
	error = kern_stat(td, path, UIO_SYSSPACE, &sb);
	if (error)
		goto out;

	/* Open /dev/mdctl so that we can attach/detach. */
	error = kern_open(td, "/dev/" MDCTL_NAME, UIO_SYSSPACE, O_RDWR, 0);
	if (error)
		goto out;

	fd = td->td_retval[0];
	mdio->md_version = MDIOVERSION;
	mdio->md_type = MD_VNODE;

	if (root_mount_mddev != -1) {
		mdio->md_unit = root_mount_mddev;
		DROP_GIANT();
		error = kern_ioctl(td, fd, MDIOCDETACH, (void *)mdio);
		PICKUP_GIANT();
		/* Ignore errors. We don't care. */
		root_mount_mddev = -1;
	}

	mdio->md_file = (void *)(mdio + 1);
	mdio->md_options = MD_AUTOUNIT | MD_READONLY;
	mdio->md_mediasize = sb.st_size;
	mdio->md_unit = 0;
	DROP_GIANT();
	error = kern_ioctl(td, fd, MDIOCATTACH, (void *)mdio);
	PICKUP_GIANT();
	if (error)
		goto out;

	if (mdio->md_unit > 9) {
		printf("rootmount: too many md units\n");
		mdio->md_file = NULL;
		mdio->md_options = 0;
		mdio->md_mediasize = 0;
		DROP_GIANT();
		error = kern_ioctl(td, fd, MDIOCDETACH, (void *)mdio);
		PICKUP_GIANT();
		/* Ignore errors. We don't care. */
		error = ERANGE;
		goto out;
	}

	root_mount_mddev = mdio->md_unit;
	printf(MD_NAME "%u attached to %s\n", root_mount_mddev, mdio->md_file);

	error = kern_close(td, fd);

 out:
	free(mdio, M_TEMP);
	return (error);
}
Exemplo n.º 13
0
/*
 * Shutdown the system cleanly to prepare for reboot, halt, or power off.
 */
void
kern_reboot(int howto)
{
	static int first_buf_printf = 1;

#if defined(SMP)
	/*
	 * Bind us to CPU 0 so that all shutdown code runs there.  Some
	 * systems don't shutdown properly (i.e., ACPI power off) if we
	 * run on another processor.
	 */
	if (!SCHEDULER_STOPPED()) {
		thread_lock(curthread);
		sched_bind(curthread, 0);
		thread_unlock(curthread);
		KASSERT(PCPU_GET(cpuid) == 0, ("boot: not running on cpu 0"));
	}
#endif
	/* We're in the process of rebooting. */
	rebooting = 1;

	/* collect extra flags that shutdown_nice might have set */
	howto |= shutdown_howto;

	/* We are out of the debugger now. */
	kdb_active = 0;

	/*
	 * Do any callouts that should be done BEFORE syncing the filesystems.
	 */
	EVENTHANDLER_INVOKE(shutdown_pre_sync, howto);

	/* 
	 * Now sync filesystems
	 */
	if (!cold && (howto & RB_NOSYNC) == 0 && waittime < 0) {
		register struct buf *bp;
		int iter, nbusy, pbusy;
#ifndef PREEMPTION
		int subiter;
#endif

		waittime = 0;

		wdog_kern_pat(WD_LASTVAL);
		sys_sync(curthread, NULL);

		/*
		 * With soft updates, some buffers that are
		 * written will be remarked as dirty until other
		 * buffers are written.
		 */
		for (iter = pbusy = 0; iter < 20; iter++) {
			nbusy = 0;
			for (bp = &buf[nbuf]; --bp >= buf; )
				if (isbufbusy(bp))
					nbusy++;
			if (nbusy == 0) {
				if (first_buf_printf)
					printf("All buffers synced.");
				break;
			}
			if (first_buf_printf) {
				printf("Syncing disks, buffers remaining... ");
				first_buf_printf = 0;
			}
			printf("%d ", nbusy);
			if (nbusy < pbusy)
				iter = 0;
			pbusy = nbusy;

			wdog_kern_pat(WD_LASTVAL);
			sys_sync(curthread, NULL);

#ifdef PREEMPTION
			/*
			 * Drop Giant and spin for a while to allow
			 * interrupt threads to run.
			 */
			DROP_GIANT();
			DELAY(50000 * iter);
			PICKUP_GIANT();
#else
			/*
			 * Drop Giant and context switch several times to
			 * allow interrupt threads to run.
			 */
			DROP_GIANT();
			for (subiter = 0; subiter < 50 * iter; subiter++) {
				thread_lock(curthread);
				mi_switch(SW_VOL, NULL);
				thread_unlock(curthread);
				DELAY(1000);
			}
			PICKUP_GIANT();
#endif
		}
		printf("\n");
		/*
		 * Count only busy local buffers to prevent forcing 
		 * a fsck if we're just a client of a wedged NFS server
		 */
		nbusy = 0;
		for (bp = &buf[nbuf]; --bp >= buf; ) {
			if (isbufbusy(bp)) {
#if 0
/* XXX: This is bogus.  We should probably have a BO_REMOTE flag instead */
				if (bp->b_dev == NULL) {
					TAILQ_REMOVE(&mountlist,
					    bp->b_vp->v_mount, mnt_list);
					continue;
				}
#endif
				nbusy++;
				if (show_busybufs > 0) {
					printf(
	    "%d: buf:%p, vnode:%p, flags:%0x, blkno:%jd, lblkno:%jd, buflock:",
					    nbusy, bp, bp->b_vp, bp->b_flags,
					    (intmax_t)bp->b_blkno,
					    (intmax_t)bp->b_lblkno);
					BUF_LOCKPRINTINFO(bp);
					if (show_busybufs > 1)
						vn_printf(bp->b_vp,
						    "vnode content: ");
				}
			}
		}
		if (nbusy) {
			/*
			 * Failed to sync all blocks. Indicate this and don't
			 * unmount filesystems (thus forcing an fsck on reboot).
			 */
			printf("Giving up on %d buffers\n", nbusy);
			DELAY(5000000);	/* 5 seconds */
		} else {
			if (!first_buf_printf)
				printf("Final sync complete\n");
			/*
			 * Unmount filesystems
			 */
			if (panicstr == 0)
				vfs_unmountall();
		}
		swapoff_all();
		DELAY(100000);		/* wait for console output to finish */
	}

	print_uptime();

	cngrab();

	/*
	 * Ok, now do things that assume all filesystem activity has
	 * been completed.
	 */
	EVENTHANDLER_INVOKE(shutdown_post_sync, howto);

	if ((howto & (RB_HALT|RB_DUMP)) == RB_DUMP && !cold && !dumping) 
		doadump(TRUE);

	/* Now that we're going to really halt the system... */
	EVENTHANDLER_INVOKE(shutdown_final, howto);

	for(;;) ;	/* safety against shutdown_reset not working */
	/* NOTREACHED */
}
Exemplo n.º 14
0
/*
 * Common code for mount and mountroot
 */ 
static int
reiserfs_mountfs(struct vnode *devvp, struct mount *mp, struct thread *td)
{
	int error, old_format = 0;
	struct reiserfs_mount *rmp;
	struct reiserfs_sb_info *sbi;
	struct reiserfs_super_block *rs;
	struct cdev *dev;

	struct g_consumer *cp;
	struct bufobj *bo;

	//ronly = (mp->mnt_flag & MNT_RDONLY) != 0;

	dev = devvp->v_rdev;
	dev_ref(dev);
	DROP_GIANT();
	g_topology_lock();
	error = g_vfs_open(devvp, &cp, "reiserfs", /* read-only */ 0);
	g_topology_unlock();
	PICKUP_GIANT();
	VOP_UNLOCK(devvp, 0);
	if (error) {
		dev_rel(dev);
		return (error);
	}

	bo = &devvp->v_bufobj;
	bo->bo_private = cp;
	bo->bo_ops = g_vfs_bufops;

	if (devvp->v_rdev->si_iosize_max != 0)
		mp->mnt_iosize_max = devvp->v_rdev->si_iosize_max;
	if (mp->mnt_iosize_max > MAXPHYS)
		mp->mnt_iosize_max = MAXPHYS;

	rmp = NULL;
	sbi = NULL;

	/* rmp contains any information about this specific mount */
	rmp = malloc(sizeof *rmp, M_REISERFSMNT, M_WAITOK | M_ZERO);
	if (!rmp) {
		error = (ENOMEM);
		goto out;
	}
	sbi = malloc(sizeof *sbi, M_REISERFSMNT, M_WAITOK | M_ZERO);
	if (!sbi) {
		error = (ENOMEM);
		goto out;
	}
	rmp->rm_reiserfs = sbi;
	rmp->rm_mountp   = mp;
	rmp->rm_devvp    = devvp;
	rmp->rm_dev      = dev;
	rmp->rm_bo       = &devvp->v_bufobj;
	rmp->rm_cp       = cp;

	/* Set default values for options: non-aggressive tails */
	REISERFS_SB(sbi)->s_mount_opt = (1 << REISERFS_SMALLTAIL);
	REISERFS_SB(sbi)->s_rd_only   = 1;
	REISERFS_SB(sbi)->s_devvp     = devvp;

	/* Read the super block */
	if ((error = read_super_block(rmp, REISERFS_OLD_DISK_OFFSET)) == 0) {
		/* The read process succeeded, it's an old format */
		old_format = 1;
	} else if ((error = read_super_block(rmp, REISERFS_DISK_OFFSET)) != 0) {
		reiserfs_log(LOG_ERR, "can not find a ReiserFS filesystem\n");
		goto out;
	}

	rs = SB_DISK_SUPER_BLOCK(sbi);

	/*
	 * Let's do basic sanity check to verify that underlying device is
	 * not smaller than the filesystem. If the check fails then abort and
	 * scream, because bad stuff will happen otherwise.
	 */
#if 0
	if (s->s_bdev && s->s_bdev->bd_inode &&
	    i_size_read(s->s_bdev->bd_inode) <
	    sb_block_count(rs) * sb_blocksize(rs)) {
		reiserfs_log(LOG_ERR,
		    "reiserfs: filesystem cannot be mounted because it is "
		    "bigger than the device.\n");
		reiserfs_log(LOG_ERR, "reiserfs: you may need to run fsck "
		    "rr may be you forgot to reboot after fdisk when it "
		    "told you to.\n");
		goto out;
	}
#endif

	/*
	 * XXX This is from the original Linux code, but why affecting 2 values
	 * to the same variable?
	 */
	sbi->s_mount_state = SB_REISERFS_STATE(sbi);
	sbi->s_mount_state = REISERFS_VALID_FS;

	if ((error = (old_format ?
	    read_old_bitmaps(rmp) : read_bitmaps(rmp)))) {
		reiserfs_log(LOG_ERR, "unable to read bitmap\n");
		goto out;
	}

	/* Make data=ordered the default */
	if (!reiserfs_data_log(sbi) && !reiserfs_data_ordered(sbi) &&
	    !reiserfs_data_writeback(sbi)) {
		REISERFS_SB(sbi)->s_mount_opt |= (1 << REISERFS_DATA_ORDERED);
	}

	if (reiserfs_data_log(sbi)) {
		reiserfs_log(LOG_INFO, "using journaled data mode\n");
	} else if (reiserfs_data_ordered(sbi)) {
		reiserfs_log(LOG_INFO, "using ordered data mode\n");
	} else {
		reiserfs_log(LOG_INFO, "using writeback data mode\n");
	}

	/* TODO Not yet supported */
#if 0
	if(journal_init(sbi, jdev_name, old_format, commit_max_age)) {
		reiserfs_log(LOG_ERR, "unable to initialize journal space\n");
		goto out;
	} else {
		jinit_done = 1 ; /* once this is set, journal_release must
				    be called if we error out of the mount */
	}

	if (reread_meta_blocks(sbi)) {
		reiserfs_log(LOG_ERR,
		    "unable to reread meta blocks after journal init\n");
		goto out;
	}
#endif

	/* Define and initialize hash function */
	sbi->s_hash_function = hash_function(rmp);

	if (sbi->s_hash_function == NULL) {
		reiserfs_log(LOG_ERR, "couldn't determined hash function\n");
		error = (EINVAL);
		goto out;
	}

	if (is_reiserfs_3_5(rs) ||
	    (is_reiserfs_jr(rs) && SB_VERSION(sbi) == REISERFS_VERSION_1))
		bit_set(&(sbi->s_properties), REISERFS_3_5);
	else
		bit_set(&(sbi->s_properties), REISERFS_3_6);

	mp->mnt_data = rmp;
	mp->mnt_stat.f_fsid.val[0] = dev2udev(dev);
	mp->mnt_stat.f_fsid.val[1] = mp->mnt_vfc->vfc_typenum;
	MNT_ILOCK(mp);
	mp->mnt_flag |= MNT_LOCAL;
	mp->mnt_kern_flag |= MNTK_MPSAFE;
	MNT_IUNLOCK(mp);
#if defined(si_mountpoint)
	devvp->v_rdev->si_mountpoint = mp;
#endif

	return (0);

out:
	reiserfs_log(LOG_INFO, "*** error during mount ***\n");
	if (sbi) {
		if (SB_AP_BITMAP(sbi)) {
			int i;
			for (i = 0; i < SB_BMAP_NR(sbi); i++) {
				if (!SB_AP_BITMAP(sbi)[i].bp_data)
					break;
				free(SB_AP_BITMAP(sbi)[i].bp_data,
				    M_REISERFSMNT);
			}
			free(SB_AP_BITMAP(sbi), M_REISERFSMNT);
		}

		if (sbi->s_rs) {
			free(sbi->s_rs, M_REISERFSMNT);
			sbi->s_rs = NULL;
		}
	}

	if (cp != NULL) {
		DROP_GIANT();
		g_topology_lock();
		g_vfs_close(cp);
		g_topology_unlock();
		PICKUP_GIANT();
	}

	if (sbi)
		free(sbi, M_REISERFSMNT);
	if (rmp)
		free(rmp, M_REISERFSMNT);
	dev_rel(dev);
	return (error);
}
Exemplo n.º 15
0
/*
 * Unmount system call
 */
static int
reiserfs_unmount(struct mount *mp, int mntflags)
{
	int error, flags = 0;
	struct reiserfs_mount *rmp;
	struct reiserfs_sb_info *sbi;

	reiserfs_log(LOG_DEBUG, "get private data\n");
	rmp = VFSTOREISERFS(mp);
	sbi = rmp->rm_reiserfs;

	/* Flangs handling */
	reiserfs_log(LOG_DEBUG, "handle mntflags\n");
	if (mntflags & MNT_FORCE)
		flags |= FORCECLOSE;

	/* Flush files -> vflush */
	reiserfs_log(LOG_DEBUG, "flush vnodes\n");
	if ((error = vflush(mp, 0, flags, curthread)))
		return (error);

	/* XXX Super block update */

	if (sbi) {
		if (SB_AP_BITMAP(sbi)) {
			int i;
			reiserfs_log(LOG_DEBUG,
			    "release bitmap buffers (total: %d)\n",
			    SB_BMAP_NR(sbi));
			for (i = 0; i < SB_BMAP_NR(sbi); i++) {
				if (SB_AP_BITMAP(sbi)[i].bp_data) {
					free(SB_AP_BITMAP(sbi)[i].bp_data,
					    M_REISERFSMNT);
					SB_AP_BITMAP(sbi)[i].bp_data = NULL;
				}
			}

			reiserfs_log(LOG_DEBUG, "free bitmaps structure\n");
			free(SB_AP_BITMAP(sbi), M_REISERFSMNT);
			SB_AP_BITMAP(sbi) = NULL;
		}

		if (sbi->s_rs) {
			reiserfs_log(LOG_DEBUG, "free super block data\n");
			free(sbi->s_rs, M_REISERFSMNT);
			sbi->s_rs = NULL;
		}
	}

	reiserfs_log(LOG_DEBUG, "close device\n");
#if defined(si_mountpoint)
	rmp->rm_devvp->v_rdev->si_mountpoint = NULL;
#endif

	DROP_GIANT();
	g_topology_lock();
	g_vfs_close(rmp->rm_cp);
	g_topology_unlock();
	PICKUP_GIANT();
	vrele(rmp->rm_devvp);
	dev_rel(rmp->rm_dev);

	if (sbi) {
		reiserfs_log(LOG_DEBUG, "free sbi\n");
		free(sbi, M_REISERFSMNT);
		sbi = rmp->rm_reiserfs = NULL;
	}
	if (rmp) {
		reiserfs_log(LOG_DEBUG, "free rmp\n");
		free(rmp, M_REISERFSMNT);
		rmp = NULL;
	}

	mp->mnt_data  = 0;
	MNT_ILOCK(mp);
	mp->mnt_flag &= ~MNT_LOCAL;
	MNT_IUNLOCK(mp);

	reiserfs_log(LOG_DEBUG, "done\n");
	return (error);
}
Exemplo n.º 16
0
static int
_xfs_mount(struct mount		*mp)
{
	struct xfsmount		*xmp;
	struct xfs_vnode	*rootvp;
	struct ucred		*curcred;
	struct vnode		*rvp, *devvp;
	struct cdev		*ddev;
	struct g_consumer	*cp;
	struct thread		*td;
	int			error;
	
	td = curthread;
	ddev = NULL;
	cp = NULL;

	if (vfs_filteropt(mp->mnt_optnew, xfs_opts))
		return (EINVAL);

	if (mp->mnt_flag & MNT_UPDATE)
		return (0);
	if ((mp->mnt_flag & MNT_RDONLY) == 0)
		return (EPERM);

        xmp = xfsmount_allocate(mp);
        if (xmp == NULL)
                return (ENOMEM);

	if((error = _xfs_param_copyin(mp, td)) != 0)
		goto fail;

	curcred = td->td_ucred;
	XVFS_MOUNT(XFSTOVFS(xmp), &xmp->m_args, curcred, error);
	if (error)
		goto fail;

 	XVFS_ROOT(XFSTOVFS(xmp), &rootvp, error);
	ddev = XFS_VFSTOM(XFSTOVFS(xmp))->m_ddev_targp->dev;
	devvp = XFS_VFSTOM(XFSTOVFS(xmp))->m_ddev_targp->specvp;
	if (error)
		goto fail_unmount;

 	if (ddev->si_iosize_max != 0)
		mp->mnt_iosize_max = ddev->si_iosize_max;
        if (mp->mnt_iosize_max > MAXPHYS)
		mp->mnt_iosize_max = MAXPHYS;

        mp->mnt_flag |= MNT_LOCAL;
        mp->mnt_stat.f_fsid.val[0] = dev2udev(ddev);
        mp->mnt_stat.f_fsid.val[1] = mp->mnt_vfc->vfc_typenum;

        if ((error = VFS_STATFS(mp, &mp->mnt_stat)) != 0)
		goto fail_unmount;

	rvp = rootvp->v_vnode;
	rvp->v_vflag |= VV_ROOT;
	VN_RELE(rootvp);

	return (0);

 fail_unmount:
	XVFS_UNMOUNT(XFSTOVFS(xmp), 0, curcred, error);

	if (devvp != NULL) {
		cp = devvp->v_bufobj.bo_private;
		if (cp != NULL) {
			DROP_GIANT();
			g_topology_lock();
			g_vfs_close(cp);
			g_topology_unlock();
			PICKUP_GIANT();
		}
	}

 fail:
	if (xmp != NULL)
		xfsmount_deallocate(xmp);

	return (error);
}
Exemplo n.º 17
0
/*
 * VFS Operations.
 *
 * mount system call
 */
static int
ext2_mount(struct mount *mp)
{
	struct vfsoptlist *opts;
	struct vnode *devvp;
	struct thread *td;
	struct ext2mount *ump = NULL;
	struct m_ext2fs *fs;
	struct nameidata nd, *ndp = &nd;
	accmode_t accmode;
	char *path, *fspec;
	int error, flags, len;

	td = curthread;
	opts = mp->mnt_optnew;

	if (vfs_filteropt(opts, ext2_opts))
		return (EINVAL);

	vfs_getopt(opts, "fspath", (void **)&path, NULL);
	/* Double-check the length of path.. */
	if (strlen(path) >= MAXMNTLEN - 1)
		return (ENAMETOOLONG);

	fspec = NULL;
	error = vfs_getopt(opts, "from", (void **)&fspec, &len);
	if (!error && fspec[len - 1] != '\0')
		return (EINVAL);

	/*
	 * If updating, check whether changing from read-only to
	 * read/write; if there is no device name, that's all we do.
	 */
	if (mp->mnt_flag & MNT_UPDATE) {
		ump = VFSTOEXT2(mp);
		fs = ump->um_e2fs; 
		error = 0;
		if (fs->e2fs_ronly == 0 &&
		    vfs_flagopt(opts, "ro", NULL, 0)) {
			error = VFS_SYNC(mp, MNT_WAIT);
			if (error)
				return (error);
			flags = WRITECLOSE;
			if (mp->mnt_flag & MNT_FORCE)
				flags |= FORCECLOSE;
			error = ext2_flushfiles(mp, flags, td);
			if ( error == 0 && fs->e2fs_wasvalid && ext2_cgupdate(ump, MNT_WAIT) == 0) {
				fs->e2fs->e2fs_state |= E2FS_ISCLEAN;
				ext2_sbupdate(ump, MNT_WAIT);
			}
			fs->e2fs_ronly = 1;
			vfs_flagopt(opts, "ro", &mp->mnt_flag, MNT_RDONLY);
			DROP_GIANT();
			g_topology_lock();
			g_access(ump->um_cp, 0, -1, 0);
			g_topology_unlock();
			PICKUP_GIANT();
		}
		if (!error && (mp->mnt_flag & MNT_RELOAD))
			error = ext2_reload(mp, td);
		if (error)
			return (error);
		devvp = ump->um_devvp;
		if (fs->e2fs_ronly && !vfs_flagopt(opts, "ro", NULL, 0)) {
			if (ext2_check_sb_compat(fs->e2fs, devvp->v_rdev, 0))
				return (EPERM);

			/*
			 * If upgrade to read-write by non-root, then verify
			 * that user has necessary permissions on the device.
			 */
			vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
			error = VOP_ACCESS(devvp, VREAD | VWRITE,
			    td->td_ucred, td);
			if (error)
				error = priv_check(td, PRIV_VFS_MOUNT_PERM);
			if (error) {
				VOP_UNLOCK(devvp, 0);
				return (error);
			}
			VOP_UNLOCK(devvp, 0);
			DROP_GIANT();
			g_topology_lock();
			error = g_access(ump->um_cp, 0, 1, 0);
			g_topology_unlock();
			PICKUP_GIANT();
			if (error)
				return (error);

			if ((fs->e2fs->e2fs_state & E2FS_ISCLEAN) == 0 ||
			    (fs->e2fs->e2fs_state & E2FS_ERRORS)) {
				if (mp->mnt_flag & MNT_FORCE) {
					printf(
"WARNING: %s was not properly dismounted\n", fs->e2fs_fsmnt);
				} else {
					printf(
"WARNING: R/W mount of %s denied.  Filesystem is not clean - run fsck\n",
					    fs->e2fs_fsmnt);
					return (EPERM);
				}
			}
			fs->e2fs->e2fs_state &= ~E2FS_ISCLEAN;
			(void)ext2_cgupdate(ump, MNT_WAIT);
			fs->e2fs_ronly = 0;
			MNT_ILOCK(mp);
			mp->mnt_flag &= ~MNT_RDONLY;
			MNT_IUNLOCK(mp);
		}
		if (vfs_flagopt(opts, "export", NULL, 0)) {
			/* Process export requests in vfs_mount.c. */
			return (error);
		}
	}

	/*
	 * Not an update, or updating the name: look up the name
	 * and verify that it refers to a sensible disk device.
	 */
	if (fspec == NULL)
		return (EINVAL);
	NDINIT(ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, fspec, td);
	if ((error = namei(ndp)) != 0)
		return (error);
	NDFREE(ndp, NDF_ONLY_PNBUF);
	devvp = ndp->ni_vp;

	if (!vn_isdisk(devvp, &error)) {
		vput(devvp);
		return (error);
	}

	/*
	 * If mount by non-root, then verify that user has necessary
	 * permissions on the device.
	 *
	 * XXXRW: VOP_ACCESS() enough?
	 */
	accmode = VREAD;
	if ((mp->mnt_flag & MNT_RDONLY) == 0)
		accmode |= VWRITE;
	error = VOP_ACCESS(devvp, accmode, td->td_ucred, td);
	if (error)
		error = priv_check(td, PRIV_VFS_MOUNT_PERM);
	if (error) {
		vput(devvp);
		return (error);
	}

	if ((mp->mnt_flag & MNT_UPDATE) == 0) {
		error = ext2_mountfs(devvp, mp);
	} else {
		if (devvp != ump->um_devvp) {
			vput(devvp);
			return (EINVAL);	/* needs translation */
		} else
			vput(devvp);
	}
	if (error) {
		vrele(devvp);
		return (error);
	}
	ump = VFSTOEXT2(mp);
	fs = ump->um_e2fs;

	/*
	 * Note that this strncpy() is ok because of a check at the start
	 * of ext2_mount().
	 */
	strncpy(fs->e2fs_fsmnt, path, MAXMNTLEN);
	fs->e2fs_fsmnt[MAXMNTLEN - 1] = '\0';
	vfs_mountedfrom(mp, fspec);
	return (0);
}
Exemplo n.º 18
0
/*ARGSUSED*/
static int
zfs_mount(vfs_t *vfsp)
{
	kthread_t	*td = curthread;
	vnode_t		*mvp = vfsp->mnt_vnodecovered;
	cred_t		*cr = td->td_ucred;
	char		*osname;
	int		error = 0;
	int		canwrite;

	if (vfs_getopt(vfsp->mnt_optnew, "from", (void **)&osname, NULL))
		return (EINVAL);

	/*
	 * If full-owner-access is enabled and delegated administration is
	 * turned on, we must set nosuid.
	 */
	if (zfs_super_owner &&
	    dsl_deleg_access(osname, ZFS_DELEG_PERM_MOUNT, cr) != ECANCELED) {
		secpolicy_fs_mount_clearopts(cr, vfsp);
	}

	/*
	 * Check for mount privilege?
	 *
	 * If we don't have privilege then see if
	 * we have local permission to allow it
	 */
	error = secpolicy_fs_mount(cr, mvp, vfsp);
	if (error) {
		error = dsl_deleg_access(osname, ZFS_DELEG_PERM_MOUNT, cr);
		if (error != 0)
			goto out;

		if (!(vfsp->vfs_flag & MS_REMOUNT)) {
			vattr_t		vattr;

			/*
			 * Make sure user is the owner of the mount point
			 * or has sufficient privileges.
			 */

			vattr.va_mask = AT_UID;

			vn_lock(mvp, LK_SHARED | LK_RETRY);
			if (error = VOP_GETATTR(mvp, &vattr, cr)) {
				VOP_UNLOCK(mvp, 0);
				goto out;
			}

#if 0 /* CHECK THIS! Is probably needed for zfs_suser. */
			if (secpolicy_vnode_owner(mvp, cr, vattr.va_uid) != 0 &&
			    VOP_ACCESS(mvp, VWRITE, cr, td) != 0) {
				error = EPERM;
				goto out;
			}
#else
			if (error = secpolicy_vnode_owner(mvp, cr, vattr.va_uid)) {
				VOP_UNLOCK(mvp, 0);
				goto out;
			}

			if (error = VOP_ACCESS(mvp, VWRITE, cr, td)) {
				VOP_UNLOCK(mvp, 0);
				goto out;
			}
			VOP_UNLOCK(mvp, 0);
#endif
		}

		secpolicy_fs_mount_clearopts(cr, vfsp);
	}

	/*
	 * Refuse to mount a filesystem if we are in a local zone and the
	 * dataset is not visible.
	 */
	if (!INGLOBALZONE(curthread) &&
	    (!zone_dataset_visible(osname, &canwrite) || !canwrite)) {
		error = EPERM;
		goto out;
	}

	/*
	 * When doing a remount, we simply refresh our temporary properties
	 * according to those options set in the current VFS options.
	 */
	if (vfsp->vfs_flag & MS_REMOUNT) {
		/* refresh mount options */
		zfs_unregister_callbacks(vfsp->vfs_data);
		error = zfs_register_callbacks(vfsp);
		goto out;
	}

	DROP_GIANT();
	error = zfs_domount(vfsp, osname);
	PICKUP_GIANT();
out:
	return (error);
}