Example #1
0
/*
 * ctfs_mount - the VFS_MOUNT entry point
 */
static int
ctfs_mount(vfs_t *vfsp, vnode_t *mvp, struct mounta *uap, cred_t *cr)
{
	ctfs_vfs_t *data;
	dev_t dev;
	gfs_dirent_t *dirent;
	int i;

	if (secpolicy_fs_mount(cr, mvp, vfsp) != 0)
		return (EPERM);

	if (mvp->v_type != VDIR)
		return (ENOTDIR);

	if ((uap->flags & MS_OVERLAY) == 0 &&
	    (mvp->v_count > 1 || (mvp->v_flag & VROOT)))
		return (EBUSY);

	data = kmem_alloc(sizeof (ctfs_vfs_t), KM_SLEEP);

	/*
	 * Initialize vfs fields not initialized by VFS_INIT/domount
	 */
	vfsp->vfs_bsize = DEV_BSIZE;
	vfsp->vfs_fstype = ctfs_fstype;
	do
		dev = makedevice(ctfs_major,
		    atomic_add_32_nv(&ctfs_minor, 1) & L_MAXMIN32);
	while (vfs_devismounted(dev));
	vfs_make_fsid(&vfsp->vfs_fsid, dev, ctfs_fstype);
	vfsp->vfs_data = data;
	vfsp->vfs_dev = dev;

	/*
	 * Dynamically create gfs_dirent_t array for the root directory.
	 */
	dirent = kmem_zalloc((ct_ntypes + 2) * sizeof (gfs_dirent_t), KM_SLEEP);
	for (i = 0; i < ct_ntypes; i++) {
		dirent[i].gfse_name = (char *)ct_types[i]->ct_type_name;
		dirent[i].gfse_ctor = ctfs_create_tdirnode;
		dirent[i].gfse_flags = GFS_CACHE_VNODE;
	}
	dirent[i].gfse_name = "all";
	dirent[i].gfse_ctor = ctfs_create_adirnode;
	dirent[i].gfse_flags = GFS_CACHE_VNODE;
	dirent[i+1].gfse_name = NULL;

	/*
	 * Create root vnode
	 */
	data->ctvfs_root = gfs_root_create(sizeof (ctfs_rootnode_t),
	    vfsp, ctfs_ops_root, CTFS_INO_ROOT, dirent, ctfs_root_do_inode,
	    CTFS_NAME_MAX, NULL, NULL);

	kmem_free(dirent, (ct_ntypes + 2) * sizeof (gfs_dirent_t));

	return (0);
}
Example #2
0
static int
zfs_create_unique_device(dev_t *dev)
{
	major_t new_major;

	do {
		ASSERT3U(zfs_minor, <=, MAXMIN32);
		minor_t start = zfs_minor;
		do {
			mutex_enter(&zfs_dev_mtx);
			if (zfs_minor >= MAXMIN32) {
				/*
				 * If we're still using the real major
				 * keep out of /dev/zfs and /dev/zvol minor
				 * number space.  If we're using a getudev()'ed
				 * major number, we can use all of its minors.
				 */
				if (zfs_major == ddi_name_to_major(ZFS_DRIVER))
					zfs_minor = ZFS_MIN_MINOR;
				else
					zfs_minor = 0;
			} else {
				zfs_minor++;
			}
			*dev = makedevice(zfs_major, zfs_minor);
			mutex_exit(&zfs_dev_mtx);
		} while (vfs_devismounted(*dev) && zfs_minor != start);
		if (zfs_minor == start) {
			/*
			 * We are using all ~262,000 minor numbers for the
			 * current major number.  Create a new major number.
			 */
			if ((new_major = getudev()) == (major_t)-1) {
				cmn_err(CE_WARN,
				    "zfs_mount: Can't get unique major "
				    "device number.");
				return (-1);
			}
			mutex_enter(&zfs_dev_mtx);
			zfs_major = new_major;
			zfs_minor = 0;

			mutex_exit(&zfs_dev_mtx);
		} else {
			break;
		}
		/* CONSTANTCONDITION */
	} while (1);

	return (0);
}
Example #3
0
/*ARGSUSED*/
static int
fdmount(vfs_t *vfsp, vnode_t *mvp, struct mounta *uap, cred_t *cr)
{
	struct vnode *vp;

	if (secpolicy_fs_mount(cr, mvp, vfsp) != 0)
		return (EPERM);
	if (mvp->v_type != VDIR)
		return (ENOTDIR);

	mutex_enter(&mvp->v_lock);
	if ((uap->flags & MS_OVERLAY) == 0 &&
	    (mvp->v_count > 1 || (mvp->v_flag & VROOT))) {
		mutex_exit(&mvp->v_lock);
		return (EBUSY);
	}
	mutex_exit(&mvp->v_lock);

	/*
	 * Having the resource be anything but "fd" doesn't make sense
	 */
	vfs_setresource(vfsp, "fd");

	vp = vn_alloc(KM_SLEEP);
	vp->v_vfsp = vfsp;
	vn_setops(vp, fd_vnodeops);
	vp->v_type = VDIR;
	vp->v_data = NULL;
	vp->v_flag |= VROOT;
	vfsp->vfs_fstype = fdfstype;
	vfsp->vfs_data = (char *)vp;
	mutex_enter(&fd_minor_lock);
	do {
		fdfsmin = (fdfsmin + 1) & L_MAXMIN32;
		vfsp->vfs_dev = makedevice(fdfsmaj, fdfsmin);
	} while (vfs_devismounted(vfsp->vfs_dev));
	mutex_exit(&fd_minor_lock);
	vfs_make_fsid(&vfsp->vfs_fsid, vfsp->vfs_dev, fdfstype);
	vfsp->vfs_bsize = 1024;
	return (0);
}
/*
 * VFS entry points
 */
static int
objfs_mount(vfs_t *vfsp, vnode_t *mvp, struct mounta *uap, cred_t *cr)
{
	objfs_vfs_t *data;
	dev_t dev;

	if (secpolicy_fs_mount(cr, mvp, vfsp) != 0)
		return (EPERM);

	if (mvp->v_type != VDIR)
		return (ENOTDIR);

	if ((uap->flags & MS_OVERLAY) == 0 &&
	    (mvp->v_count > 1 || (mvp->v_flag & VROOT)))
		return (EBUSY);

	data = kmem_alloc(sizeof (objfs_vfs_t), KM_SLEEP);

	/*
	 * Initialize vfs fields
	 */
	vfsp->vfs_bsize = DEV_BSIZE;
	vfsp->vfs_fstype = objfs_fstype;
	do {
		dev = makedevice(objfs_major,
		    atomic_add_32_nv(&objfs_minor, 1) & L_MAXMIN32);
	} while (vfs_devismounted(dev));
	vfs_make_fsid(&vfsp->vfs_fsid, dev, objfs_fstype);
	vfsp->vfs_data = data;
	vfsp->vfs_dev = dev;

	/*
	 * Create root
	 */
	data->objfs_vfs_root = objfs_create_root(vfsp);

	return (0);
}
Example #5
0
static int
zfs_domount(struct mount *mp, dev_t mount_dev, char *osname, vfs_context_t ctx)
{
	uint64_t readonly;
	int error = 0;
	int mode;
	zfsvfs_t *zfsvfs;
	znode_t *zp = NULL;
	struct timeval tv;

	ASSERT(mp);
	ASSERT(osname);

	/*
	 * Initialize the zfs-specific filesystem structure.
	 * Should probably make this a kmem cache, shuffle fields,
	 * and just bzero up to z_hold_mtx[].
	 */
	zfsvfs = kmem_zalloc(sizeof (zfsvfs_t), KM_SLEEP);
	zfsvfs->z_vfs = mp;
	zfsvfs->z_parent = zfsvfs;
	zfsvfs->z_assign = TXG_NOWAIT;
	zfsvfs->z_max_blksz = SPA_MAXBLOCKSIZE;
	zfsvfs->z_show_ctldir = ZFS_SNAPDIR_VISIBLE;

	mutex_init(&zfsvfs->z_znodes_lock, NULL, MUTEX_DEFAULT, NULL);
	list_create(&zfsvfs->z_all_znodes, sizeof (znode_t),
	    offsetof(znode_t, z_link_node));
	rw_init(&zfsvfs->z_unmount_lock, NULL, RW_DEFAULT, NULL);
	rw_init(&zfsvfs->z_unmount_inactive_lock, NULL, RW_DEFAULT, NULL);
#ifndef __APPLE__
	/* Initialize the generic filesystem structure. */
	vfsp->vfs_bcount = 0;
	vfsp->vfs_data = NULL;

	if (zfs_create_unique_device(&mount_dev) == -1) {
		error = ENODEV;
		goto out;
	}
	ASSERT(vfs_devismounted(mount_dev) == 0);
#endif

	vfs_setfsprivate(mp, zfsvfs);

	if (error = dsl_prop_get_integer(osname, "readonly", &readonly, NULL))
		goto out;

	if (readonly) {
		mode = DS_MODE_PRIMARY | DS_MODE_READONLY;
		vfs_setflags(mp, (u_int64_t)((unsigned int)MNT_RDONLY));
	} else {
		mode = DS_MODE_PRIMARY;
	}
	error = dmu_objset_open(osname, DMU_OST_ZFS, mode, &zfsvfs->z_os);
	if (error == EROFS) {
		mode = DS_MODE_PRIMARY | DS_MODE_READONLY;
		error = dmu_objset_open(osname, DMU_OST_ZFS, mode,
		    &zfsvfs->z_os);
	}

	if (error)
		goto out;

	if (error = zfs_init_fs(zfsvfs, &zp, (cred_t *) vfs_context_ucred(ctx)))
		goto out;

	/* The call to zfs_init_fs leaves the vnode held, release it here. */
	vnode_put(ZTOV(zp));

	if (dmu_objset_is_snapshot(zfsvfs->z_os)) {
		uint64_t xattr;

		ASSERT(mode & DS_MODE_READONLY);
#if 0
		atime_changed_cb(zfsvfs, B_FALSE);
		readonly_changed_cb(zfsvfs, B_TRUE);
		if (error = dsl_prop_get_integer(osname, "xattr", &xattr, NULL))
			goto out;
		xattr_changed_cb(zfsvfs, xattr);
#endif
		zfsvfs->z_issnap = B_TRUE;
	} else {
		
		if (!vfs_isrdonly(mp))
			zfs_unlinked_drain(zfsvfs);

#ifndef __APPLE__
		/*
		 * Parse and replay the intent log.
		 *
		 * Because of ziltest, this must be done after
		 * zfs_unlinked_drain().  (Further note: ziltest doesn't
		 * use readonly mounts, where zfs_unlinked_drain() isn't
		 * called.)  This is because ziltest causes spa_sync()
		 * to think it's committed, but actually it is not, so
		 * the intent log contains many txg's worth of changes.
		 *
		 * In particular, if object N is in the unlinked set in
		 * the last txg to actually sync, then it could be
		 * actually freed in a later txg and then reallocated in
		 * a yet later txg.  This would write a "create object
		 * N" record to the intent log.  Normally, this would be
		 * fine because the spa_sync() would have written out
		 * the fact that object N is free, before we could write
		 * the "create object N" intent log record.
		 *
		 * But when we are in ziltest mode, we advance the "open
		 * txg" without actually spa_sync()-ing the changes to
		 * disk.  So we would see that object N is still
		 * allocated and in the unlinked set, and there is an
		 * intent log record saying to allocate it.
		 */
		zil_replay(zfsvfs->z_os, zfsvfs, &zfsvfs->z_assign,
		    zfs_replay_vector);

		if (!zil_disable)
			zfsvfs->z_log = zil_open(zfsvfs->z_os, zfs_get_data);
#endif
	}

#if 0
	if (!zfsvfs->z_issnap)
		zfsctl_create(zfsvfs);
#endif

	/*
	 * Record the mount time (for Spotlight)
	 */
	microtime(&tv);
	zfsvfs->z_mount_time = tv.tv_sec;
	
out:
	if (error) {
		if (zfsvfs->z_os)
			dmu_objset_close(zfsvfs->z_os);
		mutex_destroy(&zfsvfs->z_znodes_lock);
		list_destroy(&zfsvfs->z_all_znodes);
		rw_destroy(&zfsvfs->z_unmount_lock);
		rw_destroy(&zfsvfs->z_unmount_inactive_lock);
		kmem_free(zfsvfs, sizeof (zfsvfs_t));
	} else {
		OSIncrementAtomic(&zfs_active_fs_count);
		(void) copystr(osname, vfs_statfs(mp)->f_mntfromname, MNAMELEN - 1, 0);
		vfs_getnewfsid(mp);
	}

	return (error);
}
Example #6
0
static int
zfs_domount(vfs_t *vfsp, char *osname, cred_t *cr)
{
	dev_t mount_dev;
	uint64_t recordsize, readonly;
	int error = 0;
	int mode;
	zfsvfs_t *zfsvfs;
	znode_t *zp = NULL;

	ASSERT(vfsp);
	ASSERT(osname);

	/*
	 * Initialize the zfs-specific filesystem structure.
	 * Should probably make this a kmem cache, shuffle fields,
	 * and just bzero up to z_hold_mtx[].
	 */
	zfsvfs = kmem_zalloc(sizeof (zfsvfs_t), KM_SLEEP);
	zfsvfs->z_vfs = vfsp;
	zfsvfs->z_parent = zfsvfs;
	zfsvfs->z_assign = TXG_NOWAIT;
	zfsvfs->z_max_blksz = SPA_MAXBLOCKSIZE;
	zfsvfs->z_show_ctldir = ZFS_SNAPDIR_VISIBLE;

	mutex_init(&zfsvfs->z_znodes_lock, NULL, MUTEX_DEFAULT, NULL);
	list_create(&zfsvfs->z_all_znodes, sizeof (znode_t),
	    offsetof(znode_t, z_link_node));
	rw_init(&zfsvfs->z_um_lock, NULL, RW_DEFAULT, NULL);

	/* Initialize the generic filesystem structure. */
	vfsp->vfs_bcount = 0;
	vfsp->vfs_data = NULL;

	if (zfs_create_unique_device(&mount_dev) == -1) {
		error = ENODEV;
		goto out;
	}
	ASSERT(vfs_devismounted(mount_dev) == 0);

	if (error = dsl_prop_get_integer(osname, "recordsize", &recordsize,
	    NULL))
		goto out;

	vfsp->vfs_dev = mount_dev;
	vfsp->vfs_fstype = zfsfstype;
	vfsp->vfs_bsize = recordsize;
	vfsp->vfs_flag |= VFS_NOTRUNC;
	vfsp->vfs_data = zfsvfs;

	if (error = dsl_prop_get_integer(osname, "readonly", &readonly, NULL))
		goto out;

	if (readonly)
		mode = DS_MODE_PRIMARY | DS_MODE_READONLY;
	else
		mode = DS_MODE_PRIMARY;

	error = dmu_objset_open(osname, DMU_OST_ZFS, mode, &zfsvfs->z_os);
	if (error == EROFS) {
		mode = DS_MODE_PRIMARY | DS_MODE_READONLY;
		error = dmu_objset_open(osname, DMU_OST_ZFS, mode,
		    &zfsvfs->z_os);
	}

	if (error)
		goto out;

	if (error = zfs_init_fs(zfsvfs, &zp, cr))
		goto out;

	/* The call to zfs_init_fs leaves the vnode held, release it here. */
	VN_RELE(ZTOV(zp));

	if (dmu_objset_is_snapshot(zfsvfs->z_os)) {
		ASSERT(mode & DS_MODE_READONLY);
		atime_changed_cb(zfsvfs, B_FALSE);
		readonly_changed_cb(zfsvfs, B_TRUE);
		zfsvfs->z_issnap = B_TRUE;
	} else {
		error = zfs_register_callbacks(vfsp);
		if (error)
			goto out;

		/*
		 * Start a delete thread running.
		 */
		(void) zfs_delete_thread_target(zfsvfs, 1);

		/*
		 * Parse and replay the intent log.
		 */
		zil_replay(zfsvfs->z_os, zfsvfs, &zfsvfs->z_assign,
		    zfs_replay_vector, (void (*)(void *))zfs_delete_wait_empty);

		if (!zil_disable)
			zfsvfs->z_log = zil_open(zfsvfs->z_os, zfs_get_data);
	}

	if (!zfsvfs->z_issnap)
		zfsctl_create(zfsvfs);
out:
	if (error) {
		if (zfsvfs->z_os)
			dmu_objset_close(zfsvfs->z_os);
		kmem_free(zfsvfs, sizeof (zfsvfs_t));
	} else {
		atomic_add_32(&zfs_active_fs_count, 1);
	}

	return (error);

}
Example #7
0
static int
VMBlockMount(struct vfs *vfsp,     // IN: file system to mount
             struct vnode *vnodep, // IN: Vnode that we are mounting on
             struct mounta *mntp,  // IN: Arguments to mount(2) from user
             struct cred *credp)   // IN: Credentials of caller
{
   VMBlockMountInfo *mip;
   int ret;

   Debug(VMBLOCK_ENTRY_LOGLEVEL, "VMBlockMount: entry\n");

   /*
    * These next few checks are done by all other Solaris file systems, so
    * let's follow their lead.
    */
   ret = secpolicy_fs_mount(credp, vnodep, vfsp);
   if (ret) {
      Warning("VMBlockMount: mounting security check failed.\n");
      return ret;
   }

   if (vnodep->v_type != VDIR) {
      Warning("VMBlockMount: not mounting on a directory.\n");
      return ENOTDIR;
   }

   mutex_enter(&vnodep->v_lock);
   if ((mntp->flags & MS_OVERLAY) == 0 &&
       (vnodep->v_count != 1 || (vnodep->v_flag & VROOT))) {
      mutex_exit(&vnodep->v_lock);
      Warning("VMBlockMount: cannot allow unrequested overlay mount.\n");
      return EBUSY;
   }
   mutex_exit(&vnodep->v_lock);

   /*
    * The directory we are redirecting to is specified as the special file
    * since we have no actual device to mount on.  We store that path in the
    * mount information structure (note that there's another allocation inside
    * pn_get() so we must pn_free() that path at unmount time). KM_SLEEP
    * guarantees our memory allocation will succeed (pn_get() uses this flag
    * too).
    */
   mip = kmem_zalloc(sizeof *mip, KM_SLEEP);
   ret = pn_get(mntp->spec,
                (mntp->flags & MS_SYSSPACE) ? UIO_SYSSPACE : UIO_USERSPACE,
                &mip->redirectPath);
   if (ret) {
      Warning("VMBlockMount: could not obtain redirecting directory.\n");
      kmem_free(mip, sizeof *mip);
      return ret;
   }

   /* Do a lookup on the specified path. */
   ret = lookupname(mntp->spec,
                    (mntp->flags & MS_SYSSPACE) ? UIO_SYSSPACE : UIO_USERSPACE,
                    FOLLOW,
                    NULLVPP,
                    &mip->redirectVnode);
   if (ret) {
      Warning("VMBlockMount: could not obtain redirecting directory.\n");
      goto error_lookup;
   }

   if (mip->redirectVnode->v_type != VDIR) {
      Warning("VMBlockMount: not redirecting to a directory.\n");
      ret = ENOTDIR;
      goto error;
   }

   /*
    * Initialize our vfs structure.
    */
   vfsp->vfs_vnodecovered = vnodep;
   vfsp->vfs_flag &= ~VFS_UNMOUNTED;
   vfsp->vfs_flag |= VMBLOCK_VFS_FLAGS;
   vfsp->vfs_bsize = PAGESIZE;
   vfsp->vfs_fstype = vmblockType;
   vfsp->vfs_bcount = 0;
   /* If we had mount options, we'd call vfs_setmntopt with vfsp->vfs_mntopts */

   /* Locate a unique device minor number for this mount. */
   mutex_enter(&vmblockMutex);
   do {
      vfsp->vfs_dev = makedevice(vmblockMajor, vmblockMinor);
      vmblockMinor = (vmblockMinor + 1) & L_MAXMIN32;
   } while (vfs_devismounted(vfsp->vfs_dev));
   mutex_exit(&vmblockMutex);

   vfs_make_fsid(&vfsp->vfs_fsid, vfsp->vfs_dev, vmblockType);
   vfsp->vfs_data = (caddr_t)mip;

   /*
    * Now create the root vnode of the file system.
    */
   ret = VMBlockVnodeGet(&mip->root, mip->redirectVnode,
                         mip->redirectPath.pn_path,
                         mip->redirectPath.pn_pathlen,
                         NULL, vfsp, TRUE);
   if (ret) {
      Warning("VMBlockMount: couldn't create root vnode.\n");
      ret = EFAULT;
      goto error;
   }

   VN_HOLD(vfsp->vfs_vnodecovered);
   return 0;

error:
   /* lookupname() provides a held vnode. */
   VN_RELE(mip->redirectVnode);
error_lookup:
   pn_free(&mip->redirectPath);
   kmem_free(mip, sizeof *mip);
   return ret;
}
Example #8
0
/*
 * Verify that the information in the configuration file regarding the
 * location for the statefile is still valid, depending on cf_type.
 * for CFT_UFS, cf_fs must still be a mounted filesystem, it must be
 *	mounted on the same device as when pmconfig was last run,
 *	and the translation of that device to a node in the prom's
 *	device tree must be the same as when pmconfig was last run.
 * for CFT_SPEC and CFT_ZVOL, cf_path must be the path to a block
 *      special file, it must have no file system mounted on it,
 *	and the translation of that device to a node in the prom's
 *	device tree must be the same as when pmconfig was last run.
 */
static int
cpr_verify_statefile_path(void)
{
	struct cprconfig *cf = &cprconfig;
	static const char long_name[] = "Statefile pathname is too long.\n";
	static const char lookup_fmt[] = "Lookup failed for "
	    "cpr statefile device %s.\n";
	static const char path_chg_fmt[] = "Device path for statefile "
	    "has changed from %s to %s.\t%s\n";
	static const char rerun[] = "Please rerun pmconfig(1m).";
	struct vfs *vfsp = NULL, *vfsp_save = rootvfs;
	ufsvfs_t *ufsvfsp = (ufsvfs_t *)rootvfs->vfs_data;
	ufsvfs_t *ufsvfsp_save = ufsvfsp;
	int error;
	struct vnode *vp;
	char *slash, *tail, *longest;
	char *errstr;
	int found = 0;
	union {
		char un_devpath[OBP_MAXPATHLEN];
		char un_sfpath[MAXNAMELEN];
	} un;
#define	devpath	un.un_devpath
#define	sfpath	un.un_sfpath

	ASSERT(cprconfig_loaded);
	/*
	 * We need not worry about locking or the timing of releasing
	 * the vnode, since we are single-threaded now.
	 */

	switch (cf->cf_type) {
	case CFT_SPEC:
		error = i_devname_to_promname(cf->cf_devfs, devpath,
		    OBP_MAXPATHLEN);
		if (error || strcmp(devpath, cf->cf_dev_prom)) {
			cpr_err(CE_CONT, path_chg_fmt,
			    cf->cf_dev_prom, devpath, rerun);
			return (error);
		}
		/*FALLTHROUGH*/
	case CFT_ZVOL:
		if (strlen(cf->cf_path) > sizeof (sfpath)) {
			cpr_err(CE_CONT, long_name);
			return (ENAMETOOLONG);
		}
		if ((error = lookupname(cf->cf_devfs,
		    UIO_SYSSPACE, FOLLOW, NULLVPP, &vp)) != 0) {
			cpr_err(CE_CONT, lookup_fmt, cf->cf_devfs);
			return (error);
		}
		if (vp->v_type != VBLK)
			errstr = "statefile must be a block device";
		else if (vfs_devismounted(vp->v_rdev))
			errstr = "statefile device must not "
			    "have a file system mounted on it";
		else if (IS_SWAPVP(vp))
			errstr = "statefile device must not "
			    "be configured as swap file";
		else
			errstr = NULL;

		VN_RELE(vp);
		if (errstr) {
			cpr_err(CE_CONT, "%s.\n", errstr);
			return (ENOTSUP);
		}

		return (error);
	case CFT_UFS:
		break;		/* don't indent all the original code */
	default:
		cpr_err(CE_PANIC, "invalid cf_type");
	}

	/*
	 * The original code for UFS statefile
	 */
	if (strlen(cf->cf_fs) + strlen(cf->cf_path) + 2 > sizeof (sfpath)) {
		cpr_err(CE_CONT, long_name);
		return (ENAMETOOLONG);
	}

	bzero(sfpath, sizeof (sfpath));
	(void) strcpy(sfpath, cpr_cprconfig_to_path());

	if (*sfpath != '/') {
		cpr_err(CE_CONT, "Statefile pathname %s "
		    "must begin with a /\n", sfpath);
		return (EINVAL);
	}

	/*
	 * Find the longest prefix of the statefile pathname which
	 * is the mountpoint of a filesystem.  This string must
	 * match the cf_fs field we read from the config file.  Other-
	 * wise the user has changed things without running pmconfig.
	 */
	tail = longest = sfpath + 1;	/* pt beyond the leading "/" */
	while ((slash = strchr(tail, '/')) != NULL) {
		*slash = '\0';	  /* temporarily terminate the string */
		if ((error = lookupname(sfpath,
		    UIO_SYSSPACE, FOLLOW, NULLVPP, &vp)) != 0) {
			*slash = '/';
			cpr_err(CE_CONT, "A directory in the "
			    "statefile path %s was not found.\n", sfpath);
			VN_RELE(vp);

			return (error);
		}

		vfs_list_read_lock();
		vfsp = rootvfs;
		do {
			ufsvfsp = (struct ufsvfs *)vfsp->vfs_data;
			if (ufsvfsp != NULL && ufsvfsp->vfs_root == vp) {
				found = 1;
				break;
			}
			vfsp = vfsp->vfs_next;
		} while (vfsp != rootvfs);
		vfs_list_unlock();

		/*
		 * If we have found a filesystem mounted on the current
		 * path prefix, remember the end of the string in
		 * "longest".  If it happens to be the the exact fs
		 * saved in the configuration file, save the current
		 * ufsvfsp so we can make additional checks further down.
		 */
		if (found) {
			longest = slash;
			if (strcmp(cf->cf_fs, sfpath) == 0) {
				ufsvfsp_save = ufsvfsp;
				vfsp_save = vfsp;
			}
			found = 0;
		}

		VN_RELE(vp);
		*slash = '/';
		tail = slash + 1;
	}
	*longest = '\0';
	if (cpr_is_ufs(vfsp_save) == 0 || strcmp(cf->cf_fs, sfpath)) {
		cpr_err(CE_CONT, "Filesystem containing "
		    "the statefile when pmconfig was run (%s) has "
		    "changed to %s. %s\n", cf->cf_fs, sfpath, rerun);
		return (EINVAL);
	}

	if ((error = lookupname(cf->cf_devfs,
	    UIO_SYSSPACE, FOLLOW, NULLVPP, &vp)) != 0) {
		cpr_err(CE_CONT, lookup_fmt, cf->cf_devfs);
		return (error);
	}

	if (ufsvfsp_save->vfs_devvp->v_rdev != vp->v_rdev) {
		cpr_err(CE_CONT, "Filesystem containing "
		    "statefile no longer mounted on device %s. "
		    "See power.conf(4).", cf->cf_devfs);
		VN_RELE(vp);
		return (ENXIO);
	}
	VN_RELE(vp);

	error = i_devname_to_promname(cf->cf_devfs, devpath, OBP_MAXPATHLEN);
	if (error || strcmp(devpath, cf->cf_dev_prom)) {
		cpr_err(CE_CONT, path_chg_fmt,
		    cf->cf_dev_prom, devpath, rerun);
		return (error);
	}

	return (0);
}
Example #9
0
/*
 * smbfs mount vfsop
 * Set up mount info record and attach it to vfs struct.
 */
static int
smbfs_mount(vfs_t *vfsp, vnode_t *mvp, struct mounta *uap, cred_t *cr)
{
	char		*data = uap->dataptr;
	int		error;
	smbnode_t 	*rtnp = NULL;	/* root of this fs */
	smbmntinfo_t 	*smi = NULL;
	dev_t 		smbfs_dev;
	int 		version;
	int 		devfd;
	zone_t		*zone = curproc->p_zone;
	zone_t		*mntzone = NULL;
	smb_share_t 	*ssp = NULL;
	smb_cred_t 	scred;
	int		flags, sec;

	STRUCT_DECL(smbfs_args, args);		/* smbfs mount arguments */

	if ((error = secpolicy_fs_mount(cr, mvp, vfsp)) != 0)
		return (error);

	if (mvp->v_type != VDIR)
		return (ENOTDIR);

	/*
	 * get arguments
	 *
	 * uap->datalen might be different from sizeof (args)
	 * in a compatible situation.
	 */
	STRUCT_INIT(args, get_udatamodel());
	bzero(STRUCT_BUF(args), SIZEOF_STRUCT(smbfs_args, DATAMODEL_NATIVE));
	if (copyin(data, STRUCT_BUF(args), MIN(uap->datalen,
	    SIZEOF_STRUCT(smbfs_args, DATAMODEL_NATIVE))))
		return (EFAULT);

	/*
	 * Check mount program version
	 */
	version = STRUCT_FGET(args, version);
	if (version != SMBFS_VERSION) {
		cmn_err(CE_WARN, "mount version mismatch:"
		    " kernel=%d, mount=%d\n",
		    SMBFS_VERSION, version);
		return (EINVAL);
	}

	/*
	 * Deal with re-mount requests.
	 */
	if (uap->flags & MS_REMOUNT) {
		cmn_err(CE_WARN, "MS_REMOUNT not implemented");
		return (ENOTSUP);
	}

	/*
	 * Check for busy
	 */
	mutex_enter(&mvp->v_lock);
	if (!(uap->flags & MS_OVERLAY) &&
	    (mvp->v_count != 1 || (mvp->v_flag & VROOT))) {
		mutex_exit(&mvp->v_lock);
		return (EBUSY);
	}
	mutex_exit(&mvp->v_lock);

	/*
	 * Get the "share" from the netsmb driver (ssp).
	 * It is returned with a "ref" (hold) for us.
	 * Release this hold: at errout below, or in
	 * smbfs_freevfs().
	 */
	devfd = STRUCT_FGET(args, devfd);
	error = smb_dev2share(devfd, &ssp);
	if (error) {
		cmn_err(CE_WARN, "invalid device handle %d (%d)\n",
		    devfd, error);
		return (error);
	}

	/*
	 * Use "goto errout" from here on.
	 * See: ssp, smi, rtnp, mntzone
	 */

	/*
	 * Determine the zone we're being mounted into.
	 */
	zone_hold(mntzone = zone);		/* start with this assumption */
	if (getzoneid() == GLOBAL_ZONEID) {
		zone_rele(mntzone);
		mntzone = zone_find_by_path(refstr_value(vfsp->vfs_mntpt));
		ASSERT(mntzone != NULL);
		if (mntzone != zone) {
			error = EBUSY;
			goto errout;
		}
	}

	/*
	 * Stop the mount from going any further if the zone is going away.
	 */
	if (zone_status_get(mntzone) >= ZONE_IS_SHUTTING_DOWN) {
		error = EBUSY;
		goto errout;
	}

	/*
	 * On a Trusted Extensions client, we may have to force read-only
	 * for read-down mounts.
	 */
	if (is_system_labeled()) {
		void *addr;
		int ipvers = 0;
		struct smb_vc *vcp;

		vcp = SSTOVC(ssp);
		addr = smb_vc_getipaddr(vcp, &ipvers);
		error = smbfs_mount_label_policy(vfsp, addr, ipvers, cr);

		if (error > 0)
			goto errout;

		if (error == -1) {
			/* change mount to read-only to prevent write-down */
			vfs_setmntopt(vfsp, MNTOPT_RO, NULL, 0);
		}
	}

	/* Prevent unload. */
	atomic_inc_32(&smbfs_mountcount);

	/*
	 * Create a mount record and link it to the vfs struct.
	 * No more possiblities for errors from here on.
	 * Tear-down of this stuff is in smbfs_free_smi()
	 *
	 * Compare with NFS: nfsrootvp()
	 */
	smi = kmem_zalloc(sizeof (*smi), KM_SLEEP);

	mutex_init(&smi->smi_lock, NULL, MUTEX_DEFAULT, NULL);
	cv_init(&smi->smi_statvfs_cv, NULL, CV_DEFAULT, NULL);

	rw_init(&smi->smi_hash_lk, NULL, RW_DEFAULT, NULL);
	smbfs_init_hash_avl(&smi->smi_hash_avl);

	smi->smi_share = ssp;
	ssp = NULL;

	/*
	 * Convert the anonymous zone hold acquired via zone_hold() above
	 * into a zone reference.
	 */
	zone_init_ref(&smi->smi_zone_ref);
	zone_hold_ref(mntzone, &smi->smi_zone_ref, ZONE_REF_SMBFS);
	zone_rele(mntzone);
	mntzone = NULL;

	/*
	 * Initialize option defaults
	 */
	smi->smi_flags	= SMI_LLOCK;
	smi->smi_acregmin = SEC2HR(SMBFS_ACREGMIN);
	smi->smi_acregmax = SEC2HR(SMBFS_ACREGMAX);
	smi->smi_acdirmin = SEC2HR(SMBFS_ACDIRMIN);
	smi->smi_acdirmax = SEC2HR(SMBFS_ACDIRMAX);

	/*
	 * All "generic" mount options have already been
	 * handled in vfs.c:domount() - see mntopts stuff.
	 * Query generic options using vfs_optionisset().
	 */
	if (vfs_optionisset(vfsp, MNTOPT_INTR, NULL))
		smi->smi_flags |= SMI_INT;
	if (vfs_optionisset(vfsp, MNTOPT_ACL, NULL))
		smi->smi_flags |= SMI_ACL;

	/*
	 * Get the mount options that come in as smbfs_args,
	 * starting with args.flags (SMBFS_MF_xxx)
	 */
	flags = STRUCT_FGET(args, flags);
	smi->smi_uid 	= STRUCT_FGET(args, uid);
	smi->smi_gid 	= STRUCT_FGET(args, gid);
	smi->smi_fmode	= STRUCT_FGET(args, file_mode) & 0777;
	smi->smi_dmode	= STRUCT_FGET(args, dir_mode) & 0777;

	/*
	 * Hande the SMBFS_MF_xxx flags.
	 */
	if (flags & SMBFS_MF_NOAC)
		smi->smi_flags |= SMI_NOAC;
	if (flags & SMBFS_MF_ACREGMIN) {
		sec = STRUCT_FGET(args, acregmin);
		if (sec < 0 || sec > SMBFS_ACMINMAX)
			sec = SMBFS_ACMINMAX;
		smi->smi_acregmin = SEC2HR(sec);
	}
	if (flags & SMBFS_MF_ACREGMAX) {
		sec = STRUCT_FGET(args, acregmax);
		if (sec < 0 || sec > SMBFS_ACMAXMAX)
			sec = SMBFS_ACMAXMAX;
		smi->smi_acregmax = SEC2HR(sec);
	}
	if (flags & SMBFS_MF_ACDIRMIN) {
		sec = STRUCT_FGET(args, acdirmin);
		if (sec < 0 || sec > SMBFS_ACMINMAX)
			sec = SMBFS_ACMINMAX;
		smi->smi_acdirmin = SEC2HR(sec);
	}
	if (flags & SMBFS_MF_ACDIRMAX) {
		sec = STRUCT_FGET(args, acdirmax);
		if (sec < 0 || sec > SMBFS_ACMAXMAX)
			sec = SMBFS_ACMAXMAX;
		smi->smi_acdirmax = SEC2HR(sec);
	}

	/*
	 * Get attributes of the remote file system,
	 * i.e. ACL support, named streams, etc.
	 */
	smb_credinit(&scred, cr);
	error = smbfs_smb_qfsattr(smi->smi_share, &smi->smi_fsa, &scred);
	smb_credrele(&scred);
	if (error) {
		SMBVDEBUG("smbfs_smb_qfsattr error %d\n", error);
	}

	/*
	 * We enable XATTR by default (via smbfs_mntopts)
	 * but if the share does not support named streams,
	 * force the NOXATTR option (also clears XATTR).
	 * Caller will set or clear VFS_XATTR after this.
	 */
	if ((smi->smi_fsattr & FILE_NAMED_STREAMS) == 0)
		vfs_setmntopt(vfsp, MNTOPT_NOXATTR, NULL, 0);

	/*
	 * Ditto ACLs (disable if not supported on this share)
	 */
	if ((smi->smi_fsattr & FILE_PERSISTENT_ACLS) == 0) {
		vfs_setmntopt(vfsp, MNTOPT_NOACL, NULL, 0);
		smi->smi_flags &= ~SMI_ACL;
	}

	/*
	 * Assign a unique device id to the mount
	 */
	mutex_enter(&smbfs_minor_lock);
	do {
		smbfs_minor = (smbfs_minor + 1) & MAXMIN32;
		smbfs_dev = makedevice(smbfs_major, smbfs_minor);
	} while (vfs_devismounted(smbfs_dev));
	mutex_exit(&smbfs_minor_lock);

	vfsp->vfs_dev	= smbfs_dev;
	vfs_make_fsid(&vfsp->vfs_fsid, smbfs_dev, smbfsfstyp);
	vfsp->vfs_data	= (caddr_t)smi;
	vfsp->vfs_fstype = smbfsfstyp;
	vfsp->vfs_bsize = MAXBSIZE;
	vfsp->vfs_bcount = 0;

	smi->smi_vfsp	= vfsp;
	smbfs_zonelist_add(smi);	/* undo in smbfs_freevfs */

	/*
	 * Create the root vnode, which we need in unmount
	 * for the call to smbfs_check_table(), etc.
	 * Release this hold in smbfs_unmount.
	 */
	rtnp = smbfs_node_findcreate(smi, "\\", 1, NULL, 0, 0,
	    &smbfs_fattr0);
	ASSERT(rtnp != NULL);
	rtnp->r_vnode->v_type = VDIR;
	rtnp->r_vnode->v_flag |= VROOT;
	smi->smi_root = rtnp;

	/*
	 * NFS does other stuff here too:
	 *   async worker threads
	 *   init kstats
	 *
	 * End of code from NFS nfsrootvp()
	 */
	return (0);

errout:
	vfsp->vfs_data = NULL;
	if (smi != NULL)
		smbfs_free_smi(smi);

	if (mntzone != NULL)
		zone_rele(mntzone);

	if (ssp != NULL)
		smb_share_rele(ssp);

	return (error);
}