Ejemplo n.º 1
0
static dsl_pool_t *
dsl_pool_open_impl(spa_t *spa, uint64_t txg)
{
	dsl_pool_t *dp;
	blkptr_t *bp = spa_get_rootblkptr(spa);

	dp = kmem_zalloc(sizeof (dsl_pool_t), KM_SLEEP);
	dp->dp_spa = spa;
	dp->dp_meta_rootbp = *bp;
	rrw_init(&dp->dp_config_rwlock, B_TRUE);
	dp->dp_write_limit = zfs_write_limit_min;
	txg_init(dp, txg);

	txg_list_create(&dp->dp_dirty_datasets,
	    offsetof(dsl_dataset_t, ds_dirty_link));
	txg_list_create(&dp->dp_dirty_zilogs,
	    offsetof(zilog_t, zl_dirty_link));
	txg_list_create(&dp->dp_dirty_dirs,
	    offsetof(dsl_dir_t, dd_dirty_link));
	txg_list_create(&dp->dp_sync_tasks,
	    offsetof(dsl_sync_task_t, dst_node));

	mutex_init(&dp->dp_lock, NULL, MUTEX_DEFAULT, NULL);

	dp->dp_vnrele_taskq = taskq_create("zfs_vn_rele_taskq", 1, minclsyspri,
	    1, 4, 0);

	return (dp);
}
Ejemplo n.º 2
0
static dsl_pool_t *
dsl_pool_open_impl(spa_t *spa, uint64_t txg)
{
	dsl_pool_t *dp;
	blkptr_t *bp = spa_get_rootblkptr(spa);

	dp = kmem_zalloc(sizeof (dsl_pool_t), KM_SLEEP);
	dp->dp_spa = spa;
	dp->dp_meta_rootbp = *bp;
	rrw_init(&dp->dp_config_rwlock, B_TRUE);
	txg_init(dp, txg);

	txg_list_create(&dp->dp_dirty_datasets,
	    offsetof(dsl_dataset_t, ds_dirty_link));
	txg_list_create(&dp->dp_dirty_zilogs,
	    offsetof(zilog_t, zl_dirty_link));
	txg_list_create(&dp->dp_dirty_dirs,
	    offsetof(dsl_dir_t, dd_dirty_link));
	txg_list_create(&dp->dp_sync_tasks,
	    offsetof(dsl_sync_task_t, dst_node));

	mutex_init(&dp->dp_lock, NULL, MUTEX_DEFAULT, NULL);
	cv_init(&dp->dp_spaceavail_cv, NULL, CV_DEFAULT, NULL);

	dp->dp_iput_taskq = taskq_create("z_iput", max_ncpus, defclsyspri,
	    max_ncpus * 8, INT_MAX, TASKQ_PREPOPULATE | TASKQ_DYNAMIC);

	return (dp);
}
Ejemplo n.º 3
0
/*
 * A reader-mostly lock implementation, tuning above reader-writer locks
 * for hightly parallel read acquisitions, while pessimizing writes.
 *
 * The idea is to split single busy lock into array of locks, so that
 * each reader can lock only one of them for read, depending on result
 * of simple hash function.  That proportionally reduces lock congestion.
 * Writer same time has to sequentially aquire write on all the locks.
 * That makes write aquisition proportionally slower, but in places where
 * it is used (filesystem unmount) performance is not critical.
 *
 * All the functions below are direct wrappers around functions above.
 */
void
rrm_init(rrmlock_t *rrl, boolean_t track_all)
{
	int i;

	for (i = 0; i < RRM_NUM_LOCKS; i++)
		rrw_init(&rrl->locks[i], track_all);
}
Ejemplo n.º 4
0
int
zfs_sb_create(const char *osname, zfs_sb_t **zsbp)
{
	objset_t *os;
	zfs_sb_t *zsb;
	uint64_t zval;
	int i, error;
	uint64_t sa_obj;

	zsb = kmem_zalloc(sizeof (zfs_sb_t), KM_SLEEP | KM_NODEBUG);

	/*
	 * We claim to always be readonly so we can open snapshots;
	 * other ZPL code will prevent us from writing to snapshots.
	 */
	error = dmu_objset_own(osname, DMU_OST_ZFS, B_TRUE, zsb, &os);
	if (error) {
		kmem_free(zsb, sizeof (zfs_sb_t));
		return (error);
	}

	/*
	 * Initialize the zfs-specific filesystem structure.
	 * Should probably make this a kmem cache, shuffle fields,
	 * and just bzero up to z_hold_mtx[].
	 */
	zsb->z_sb = NULL;
	zsb->z_parent = zsb;
	zsb->z_max_blksz = SPA_MAXBLOCKSIZE;
	zsb->z_show_ctldir = ZFS_SNAPDIR_VISIBLE;
	zsb->z_os = os;

	error = zfs_get_zplprop(os, ZFS_PROP_VERSION, &zsb->z_version);
	if (error) {
		goto out;
	} else if (zsb->z_version >
	    zfs_zpl_version_map(spa_version(dmu_objset_spa(os)))) {
		(void) printk("Can't mount a version %lld file system "
		    "on a version %lld pool\n. Pool must be upgraded to mount "
		    "this file system.", (u_longlong_t)zsb->z_version,
		    (u_longlong_t)spa_version(dmu_objset_spa(os)));
		error = ENOTSUP;
		goto out;
	}
	if ((error = zfs_get_zplprop(os, ZFS_PROP_NORMALIZE, &zval)) != 0)
		goto out;
	zsb->z_norm = (int)zval;

	if ((error = zfs_get_zplprop(os, ZFS_PROP_UTF8ONLY, &zval)) != 0)
		goto out;
	zsb->z_utf8 = (zval != 0);

	if ((error = zfs_get_zplprop(os, ZFS_PROP_CASE, &zval)) != 0)
		goto out;
	zsb->z_case = (uint_t)zval;

	/*
	 * Fold case on file systems that are always or sometimes case
	 * insensitive.
	 */
	if (zsb->z_case == ZFS_CASE_INSENSITIVE ||
	    zsb->z_case == ZFS_CASE_MIXED)
		zsb->z_norm |= U8_TEXTPREP_TOUPPER;

	zsb->z_use_fuids = USE_FUIDS(zsb->z_version, zsb->z_os);
	zsb->z_use_sa = USE_SA(zsb->z_version, zsb->z_os);

	if (zsb->z_use_sa) {
		/* should either have both of these objects or none */
		error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_SA_ATTRS, 8, 1,
		    &sa_obj);
		if (error)
			goto out;

		error = zfs_get_zplprop(os, ZFS_PROP_XATTR, &zval);
		if ((error == 0) && (zval == ZFS_XATTR_SA))
			zsb->z_xattr_sa = B_TRUE;
	} else {
		/*
		 * Pre SA versions file systems should never touch
		 * either the attribute registration or layout objects.
		 */
		sa_obj = 0;
	}

	error = sa_setup(os, sa_obj, zfs_attr_table, ZPL_END,
	    &zsb->z_attr_table);
	if (error)
		goto out;

	if (zsb->z_version >= ZPL_VERSION_SA)
		sa_register_update_callback(os, zfs_sa_upgrade);

	error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_ROOT_OBJ, 8, 1,
	    &zsb->z_root);
	if (error)
		goto out;
	ASSERT(zsb->z_root != 0);

	error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_UNLINKED_SET, 8, 1,
	    &zsb->z_unlinkedobj);
	if (error)
		goto out;

	error = zap_lookup(os, MASTER_NODE_OBJ,
	    zfs_userquota_prop_prefixes[ZFS_PROP_USERQUOTA],
	    8, 1, &zsb->z_userquota_obj);
	if (error && error != ENOENT)
		goto out;

	error = zap_lookup(os, MASTER_NODE_OBJ,
	    zfs_userquota_prop_prefixes[ZFS_PROP_GROUPQUOTA],
	    8, 1, &zsb->z_groupquota_obj);
	if (error && error != ENOENT)
		goto out;

	error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_FUID_TABLES, 8, 1,
	    &zsb->z_fuid_obj);
	if (error && error != ENOENT)
		goto out;

	error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_SHARES_DIR, 8, 1,
	    &zsb->z_shares_dir);
	if (error && error != ENOENT)
		goto out;

	mutex_init(&zsb->z_znodes_lock, NULL, MUTEX_DEFAULT, NULL);
	mutex_init(&zsb->z_lock, NULL, MUTEX_DEFAULT, NULL);
	list_create(&zsb->z_all_znodes, sizeof (znode_t),
	    offsetof(znode_t, z_link_node));
	rrw_init(&zsb->z_teardown_lock, B_FALSE);
	rw_init(&zsb->z_teardown_inactive_lock, NULL, RW_DEFAULT, NULL);
	rw_init(&zsb->z_fuid_lock, NULL, RW_DEFAULT, NULL);
	for (i = 0; i != ZFS_OBJ_MTX_SZ; i++)
		mutex_init(&zsb->z_hold_mtx[i], NULL, MUTEX_DEFAULT, NULL);

	avl_create(&zsb->z_ctldir_snaps, snapentry_compare,
	    sizeof (zfs_snapentry_t), offsetof(zfs_snapentry_t, se_node));
	mutex_init(&zsb->z_ctldir_lock, NULL, MUTEX_DEFAULT, NULL);

	*zsbp = zsb;
	return (0);

out:
	dmu_objset_disown(os, zsb);
	*zsbp = NULL;
	kmem_free(zsb, sizeof (zfs_sb_t));
	return (error);
}
Ejemplo n.º 5
0
static int
zfs_domount(vfs_t *vfsp, char *osname)
{
	uint64_t recordsize, readonly;
	int error = 0;
	int mode;
	zfsvfs_t *zfsvfs;
	znode_t *zp = NULL;

	ASSERT(vfsp);
	ASSERT(osname);

	/*
	 * Initialize the zfs-specific filesystem structure.
	 * Should probably make this a kmem cache, shuffle fields,
	 * and just bzero up to z_hold_mtx[].
	 */
	zfsvfs = kmem_zalloc(sizeof (zfsvfs_t), KM_SLEEP);
	zfsvfs->z_vfs = vfsp;
	zfsvfs->z_parent = zfsvfs;
	zfsvfs->z_assign = TXG_NOWAIT;
	zfsvfs->z_max_blksz = SPA_MAXBLOCKSIZE;
	zfsvfs->z_show_ctldir = ZFS_SNAPDIR_VISIBLE;

	mutex_init(&zfsvfs->z_znodes_lock, NULL, MUTEX_DEFAULT, NULL);
	mutex_init(&zfsvfs->z_online_recv_lock, NULL, MUTEX_DEFAULT, NULL);
	list_create(&zfsvfs->z_all_znodes, sizeof (znode_t),
	    offsetof(znode_t, z_link_node));
	rrw_init(&zfsvfs->z_teardown_lock);
	rw_init(&zfsvfs->z_teardown_inactive_lock, NULL, RW_DEFAULT, NULL);
	rw_init(&zfsvfs->z_fuid_lock, NULL, RW_DEFAULT, NULL);

	if (error = dsl_prop_get_integer(osname, "recordsize", &recordsize,
	    NULL))
		goto out;
	zfsvfs->z_vfs->vfs_bsize = recordsize;

	vfsp->vfs_data = zfsvfs;
	vfsp->mnt_flag |= MNT_LOCAL;
	vfsp->mnt_kern_flag |= MNTK_MPSAFE;
	vfsp->mnt_kern_flag |= MNTK_LOOKUP_SHARED;
	vfsp->mnt_kern_flag |= MNTK_SHARED_WRITES;

	if (error = dsl_prop_get_integer(osname, "readonly", &readonly, NULL))
		goto out;

	mode = DS_MODE_OWNER;
	if (readonly)
		mode |= DS_MODE_READONLY;

	error = dmu_objset_open(osname, DMU_OST_ZFS, mode, &zfsvfs->z_os);
	if (error == EROFS) {
		mode = DS_MODE_OWNER | DS_MODE_READONLY;
		error = dmu_objset_open(osname, DMU_OST_ZFS, mode,
		    &zfsvfs->z_os);
	}

	if (error)
		goto out;

	if (error = zfs_init_fs(zfsvfs, &zp))
		goto out;

	/*
	 * Set features for file system.
	 */
	zfsvfs->z_use_fuids = USE_FUIDS(zfsvfs->z_version, zfsvfs->z_os);
	if (zfsvfs->z_use_fuids) {
		vfs_set_feature(vfsp, VFSFT_XVATTR);
		vfs_set_feature(vfsp, VFSFT_SYSATTR_VIEWS);
		vfs_set_feature(vfsp, VFSFT_ACEMASKONACCESS);
		vfs_set_feature(vfsp, VFSFT_ACLONCREATE);
	}
	if (zfsvfs->z_case == ZFS_CASE_INSENSITIVE) {
		vfs_set_feature(vfsp, VFSFT_DIRENTFLAGS);
		vfs_set_feature(vfsp, VFSFT_CASEINSENSITIVE);
		vfs_set_feature(vfsp, VFSFT_NOCASESENSITIVE);
	} else if (zfsvfs->z_case == ZFS_CASE_MIXED) {
		vfs_set_feature(vfsp, VFSFT_DIRENTFLAGS);
		vfs_set_feature(vfsp, VFSFT_CASEINSENSITIVE);
	}

	if (dmu_objset_is_snapshot(zfsvfs->z_os)) {
		uint64_t pval;

		ASSERT(mode & DS_MODE_READONLY);
		atime_changed_cb(zfsvfs, B_FALSE);
		readonly_changed_cb(zfsvfs, B_TRUE);
		if (error = dsl_prop_get_integer(osname, "xattr", &pval, NULL))
			goto out;
		xattr_changed_cb(zfsvfs, pval);
		zfsvfs->z_issnap = B_TRUE;
	} else {
		error = zfsvfs_setup(zfsvfs, B_TRUE);
	}

	vfs_mountedfrom(vfsp, osname);

	if (!zfsvfs->z_issnap)
		zfsctl_create(zfsvfs);
out:
	if (error) {
		if (zfsvfs->z_os)
			dmu_objset_close(zfsvfs->z_os);
		zfs_freezfsvfs(zfsvfs);
	} else {
		atomic_add_32(&zfs_active_fs_count, 1);
	}

	return (error);
}